From 049ac56c081381c524b3f8cbabb8e2504ca45c49 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Tue, 18 Aug 2020 17:14:38 +0800 Subject: [PATCH 01/37] Print user-friendly error message in core.ops [part 2] (#26377) --- paddle/fluid/pybind/op_function.h | 27 +++++++++++++++++--- paddle/fluid/pybind/op_function_generator.cc | 12 ++++++--- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/pybind/op_function.h b/paddle/fluid/pybind/op_function.h index b33555759f8ff..70b321f658cd2 100644 --- a/paddle/fluid/pybind/op_function.h +++ b/paddle/fluid/pybind/op_function.h @@ -90,15 +90,36 @@ CastPyHandleToVarBaseList(const std::string& op_type, return result; } // namespace pybind -static inline void ConstructAttrMapFromPyArgs(framework::AttributeMap* attrs, +static inline void ConstructAttrMapFromPyArgs(const std::string& op_type, + int start_idx, + framework::AttributeMap* attrs, const py::args& args) { PADDLE_ENFORCE_EQ( args.size() % 2, 0, platform::errors::InvalidArgument( "The number of arguments for arributes should be even.")); for (size_t i = 0; i < args.size(); i += 2) { - auto name = args[i].cast(); - auto value = args[i + 1].cast(); + std::string name; + framework::Attribute value; + try { + name = args[i].cast(); + } catch (std::exception& e) { + PyObject* py_obj = args[i].ptr(); // get underlying PyObject + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be str, but got " + "%s", + op_type, start_idx + i, Py_TYPE(py_obj)->tp_name)); + } + try { + value = args[i + 1].cast(); + } catch (std::exception& e) { + PyObject* py_obj = args[i + 1].ptr(); // get underlying PyObject + PADDLE_THROW(platform::errors::InvalidArgument( + "%s(): argument (position %d) must be " + "Attribute type (one of str, bool, int, int64, float, or list of " + "them), but got %s", + op_type, start_idx + i + 1, Py_TYPE(py_obj)->tp_name)); + } (*attrs)[name] = value; } } diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index 93ba9feedf95b..89770ccc8cec1 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -146,7 +146,7 @@ R"( { %s framework::AttributeMap attrs; - ConstructAttrMapFromPyArgs(&attrs, args); + ConstructAttrMapFromPyArgs("%s", %d, &attrs, args); { py::gil_scoped_release release; auto tracer = imperative::GetCurrentTracer(); @@ -204,6 +204,7 @@ GenerateOpFunctions(const std::string& module_name) { std::string ins_initializer_with_null = ""; std::string py_arg = ""; int arg_idx = 0; + int input_args_num = 0; std::string ins_cast_str = ""; for (auto& input : op_proto->inputs()) { auto& in_name = input.name(); @@ -216,6 +217,7 @@ GenerateOpFunctions(const std::string& module_name) { paddle::string::Sprintf(ARG_TEMPLATE, in_type, TempName(in_name)); input_args += input_arg; input_args += ","; + input_args_num++; const auto in_cast_type = input.duplicable() ? CAST_VAR_LIST_TEMPLATE : CAST_VAR_TEMPLATE; ins_cast_str += @@ -269,6 +271,7 @@ GenerateOpFunctions(const std::string& module_name) { } input_args += out_type; input_args += out_name; + input_args_num++; if (output.dispensable()) { const auto out_template = @@ -295,6 +298,7 @@ GenerateOpFunctions(const std::string& module_name) { auto out_num_str = paddle::string::Sprintf(ARG_OUT_NUM, out_name); input_args += ARG_OUT_NUM_TYPE; input_args += out_num_str; + input_args_num++; outs_initializer += paddle::string::Sprintf( OUT_DUPLICABLE_INITIALIZER_TEMPLATE, out_name, out_num_str); } else { @@ -334,9 +338,9 @@ GenerateOpFunctions(const std::string& module_name) { // generate op funtcion body auto op_function_str = paddle::string::Sprintf( OP_FUNCTION_TEMPLATE, return_type, func_name, function_args, - ins_cast_str, outs_initializer, ins_initializer, - ins_initializer_with_null + outs_initializer_with_null, op_type, - return_str); + ins_cast_str, op_type, input_args_num, outs_initializer, + ins_initializer, ins_initializer_with_null + outs_initializer_with_null, + op_type, return_str); // generate pybind item auto bind_function_str = paddle::string::Sprintf( From a83e0f264ce30be2acfa3fa2c4f1c2b7a97b6b82 Mon Sep 17 00:00:00 2001 From: Thunderbrook <52529258+Thunderbrook@users.noreply.github.com> Date: Tue, 18 Aug 2020 18:59:30 +0800 Subject: [PATCH 02/37] fix heter proto (#26093) test=develop --- paddle/fluid/framework/fleet/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/framework/fleet/CMakeLists.txt b/paddle/fluid/framework/fleet/CMakeLists.txt index 0d62488bfe67a..3eee0a1abbaf0 100644 --- a/paddle/fluid/framework/fleet/CMakeLists.txt +++ b/paddle/fluid/framework/fleet/CMakeLists.txt @@ -19,6 +19,6 @@ else() cc_library(gloo_wrapper SRCS gloo_wrapper.cc DEPS framework_proto variable_helper scope) endif(WITH_GLOO) -cc_library(heter_wrapper SRCS heter_wrapper.cc DEPS framework_proto device_context) +cc_library(heter_wrapper SRCS heter_wrapper.cc DEPS framework_proto device_context heter_service_proto) cc_test(test_fleet SRCS test_fleet.cc DEPS fleet_wrapper gloo_wrapper fs shell) From 5fdec3ed35696fcd16594e0e6e1b319391f7a75b Mon Sep 17 00:00:00 2001 From: Kaipeng Deng Date: Tue, 18 Aug 2020 19:43:56 +0800 Subject: [PATCH 03/37] fix test_metrics accuracy. test=develop (#26358) --- python/paddle/incubate/hapi/tests/test_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/paddle/incubate/hapi/tests/test_metrics.py b/python/paddle/incubate/hapi/tests/test_metrics.py index 3d25a275d5f1c..19c94b73f61a2 100644 --- a/python/paddle/incubate/hapi/tests/test_metrics.py +++ b/python/paddle/incubate/hapi/tests/test_metrics.py @@ -40,7 +40,8 @@ def accuracy(pred, label, topk=(1, )): def convert_to_one_hot(y, C): - oh = np.random.random((y.shape[0], C)).astype('float32') * .5 + oh = np.random.choice(np.arange(C), C, replace=False).astype('float32') / C + oh = np.tile(oh[np.newaxis, :], (y.shape[0], 1)) for i in range(y.shape[0]): oh[i, int(y[i])] = 1. return oh From 638bbb6153ccf47d23c74e58719a86be2ace5c13 Mon Sep 17 00:00:00 2001 From: lilong12 Date: Tue, 18 Aug 2020 19:54:08 +0800 Subject: [PATCH 04/37] Improve expand as (#26290) align expand_as op to expand. --- paddle/fluid/operators/expand_as_v2_op.cc | 150 ++++++++++++ paddle/fluid/operators/expand_as_v2_op.cu | 26 +++ paddle/fluid/operators/expand_as_v2_op.h | 214 ++++++++++++++++++ python/paddle/__init__.py | 1 + .../tests/unittests/test_expand_as_v2_op.py | 121 ++++++++++ .../tests/unittests/test_expand_v2_op.py | 2 +- .../fluid/tests/unittests/test_tile_op.py | 50 ++-- python/paddle/tensor/__init__.py | 1 + python/paddle/tensor/manipulation.py | 197 ++++++++-------- 9 files changed, 642 insertions(+), 120 deletions(-) create mode 100644 paddle/fluid/operators/expand_as_v2_op.cc create mode 100644 paddle/fluid/operators/expand_as_v2_op.cu create mode 100644 paddle/fluid/operators/expand_as_v2_op.h create mode 100755 python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py diff --git a/paddle/fluid/operators/expand_as_v2_op.cc b/paddle/fluid/operators/expand_as_v2_op.cc new file mode 100644 index 0000000000000..495b640bb4399 --- /dev/null +++ b/paddle/fluid/operators/expand_as_v2_op.cc @@ -0,0 +1,150 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/expand_as_v2_op.h" +#include +#include + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class ExpandAsV2Op : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAsV2"); + OP_INOUT_CHECK(ctx->HasInput("target_tensor"), "Input", "target_tensor", + "ExpandAsV2"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandAsV2"); + auto x_dims = ctx->GetInputDim("X"); + auto target_tensor_dims = ctx->GetInputDim("target_tensor"); + PADDLE_ENFORCE_GE( + target_tensor_dims.size(), static_cast(x_dims.size()), + platform::errors::InvalidArgument( + "The rank of Input(target_tensor) must be greater than or equal " + "to the rank of Input(X). But received Input(X): input " + "rank %u, input shape [%s]; received Input(target_tensor): " + "input rank %u, input shape [%s].", + x_dims.size(), x_dims, target_tensor_dims.size(), + target_tensor_dims)); + PADDLE_ENFORCE_LE( + target_tensor_dims.size(), MAX_RANK_SUPPORTED, + platform::errors::InvalidArgument( + "The rank of Input(target_tensor) must not be less than or equal " + "to %d. But received: input rank %u, input shape [%s].", + MAX_RANK_SUPPORTED, x_dims.size(), x_dims)); + std::vector out_shape(target_tensor_dims.size()); + ctx->SetOutputDim("Out", framework::make_ddim(out_shape)); + } +}; + +class ExpandAsV2OpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor, default Tensor). A tensor with rank in [1, 6]." + "X is the input to be expanded."); + AddOutput("Out", + "(Tensor, default Tensor). A tensor with rank in [1, 6]." + "The rank of Output(Out) have the same with Input(X). " + "After expanding, size of each dimension of Output(Out) is equal " + "to size of the corresponding dimension of Input(X) multiplying " + "the corresponding value given by Attr(expand_times)."); + AddInput("target_tensor", "Expand tensor's shape for each dimension."); + AddComment(R"DOC( +Expand the input by given times number. You should set times +number for each dimension by providing tensor 'expend_tensor'. The rank of X +should be in [1, 6]. Please note that size of 'expend_tensor' must be the same +with X's rank. Following is a using case: +Input(X) is a 3-D tensor with shape [2, 3, 1]: + [ + [[1], [2], [3]], + [[4], [5], [6]] + ] +target_tensors'shape: [2, 6, 2] +Output(Out) is a 3-D tensor with shape [2, 6, 2]: + [ + [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], + [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] + ] +)DOC"); + } +}; + +class ExpandAsV2GradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAsV2Grad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "ExpandAsV2Grad"); + + auto x_dims = ctx->GetInputDim("X"); + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType( + ctx, framework::GradVarName("Out")), + ctx.device_context()); + } +}; + +template +class ExpandAsV2GradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + void Apply(GradOpPtr op) const override { + op->SetType("expand_as_v2_grad"); + op->SetInput("X", this->Input("X")); + op->SetInput("target_tensor", this->Input("target_tensor")); + op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + op->SetAttrMap(this->Attrs()); + } +}; + +DECLARE_NO_NEED_BUFFER_VARS_INFERER(ExpandAsV2GradNoNeedBufVarsInferer, "X"); + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(expand_as_v2, ops::ExpandAsV2Op, ops::ExpandAsV2OpMaker, + ops::ExpandAsV2GradOpMaker, + ops::ExpandAsV2GradOpMaker); +REGISTER_OPERATOR(expand_as_v2_grad, ops::ExpandAsV2GradOp, + ops::ExpandAsV2GradNoNeedBufVarsInferer); +REGISTER_OP_CPU_KERNEL( + expand_as_v2, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel); +REGISTER_OP_CPU_KERNEL( + expand_as_v2_grad, + ops::ExpandAsV2GradKernel, + ops::ExpandAsV2GradKernel, + ops::ExpandAsV2GradKernel, + ops::ExpandAsV2GradKernel); diff --git a/paddle/fluid/operators/expand_as_v2_op.cu b/paddle/fluid/operators/expand_as_v2_op.cu new file mode 100644 index 0000000000000..e315144472dd9 --- /dev/null +++ b/paddle/fluid/operators/expand_as_v2_op.cu @@ -0,0 +1,26 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#include "paddle/fluid/operators/expand_as_v2_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + expand_as_v2, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel, + ops::ExpandAsV2Kernel); +REGISTER_OP_CUDA_KERNEL( + expand_as_v2_grad, + ops::ExpandAsV2GradKernel, + ops::ExpandAsV2GradKernel, + ops::ExpandAsV2GradKernel, + ops::ExpandAsV2GradKernel); diff --git a/paddle/fluid/operators/expand_as_v2_op.h b/paddle/fluid/operators/expand_as_v2_op.h new file mode 100644 index 0000000000000..a4c30dfe1298d --- /dev/null +++ b/paddle/fluid/operators/expand_as_v2_op.h @@ -0,0 +1,214 @@ +/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +#define MAX_RANK_SUPPORTED 6 + +#define EXPAND_AS_TEMPLATE(z, n, data) \ + case n + 1: { \ + ExpandAs(context); \ + break; \ + } +#define REP_EXPAND_AS_TEMPLATE(n) BOOST_PP_REPEAT(n, EXPAND_AS_TEMPLATE, ~) +#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) +#define EXPAND_AS_GRAD_CASE(n) \ + case n: { \ + ExpandAsBackward(context, reshape_dims_vec, reduce_dims_vec); \ + break; \ + } +#define EXPAND_AS_GRAD_TEMPLATE(z, n, data) \ + BOOST_PP_IF(COND(n), EXPAND_AS_GRAD_CASE(n), ) +#define REP_EXPAND_AS_GRAD_TEMPLATE(n) \ + BOOST_PP_REPEAT(n, EXPAND_AS_GRAD_TEMPLATE, ~) + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +template +using EigenVector = framework::EigenVector; +template +using EigenTensor = framework::EigenTensor; + +template +class ExpandAsV2Kernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto rank = context.Input("X")->dims().size(); + auto* target_tensor = context.Input("target_tensor"); + auto target_rank = target_tensor->dims().size(); + PADDLE_ENFORCE_GE(target_rank, rank, + platform::errors::InvalidArgument( + "The rank (%d) of the input 'target_tensor' for " + "expand_as_v2 op must be greater than or equal to " + "the rank (%d) of the input 'x'.", + target_rank, rank)); + PADDLE_ENFORCE_GE(rank, 1, platform::errors::InvalidArgument( + "The rank (%d) of the input 'x' for " + "expand_as_v2 op must be positive.", + rank)); + PADDLE_ENFORCE_LE(target_rank, MAX_RANK_SUPPORTED, + platform::errors::InvalidArgument( + "The rank (%d) of the input 'target_tensor' for " + "expand_as_v2 op must be less than or equal to %d.", + target_rank, MAX_RANK_SUPPORTED)); + + switch (target_rank) { REP_EXPAND_AS_TEMPLATE(MAX_RANK_SUPPORTED) } + } + + protected: + template + void ExpandAs(const framework::ExecutionContext& context) const { + auto* in0 = context.Input("X"); + auto in_dims = in0->dims(); + auto* target_tensor = context.Input("target_tensor"); + auto vec_in_dims = framework::vectorize(in_dims); + auto target_shape = framework::vectorize(target_tensor->dims()); + auto diff = target_shape.size() - vec_in_dims.size(); + vec_in_dims.insert(vec_in_dims.begin(), diff, 1); + std::vector repeat_times(vec_in_dims.size()); + for (size_t i = 0; i < vec_in_dims.size(); ++i) { + PADDLE_ENFORCE_NE(target_shape[i], 0, + platform::errors::InvalidArgument( + "The value of target shape cannot be zero.")); + if (vec_in_dims[i] != 1) { + PADDLE_ENFORCE_EQ( + vec_in_dims[i], target_shape[i], + platform::errors::InvalidArgument( + "The value (%d) of the non-singleton dimension does not match" + " the corresponding value (%d) in " + "target tensor for expand_as_v2 op.", + vec_in_dims[i], target_shape[i])); + repeat_times[i] = 1; + } else { + repeat_times[i] = target_shape[i]; + } + } + auto* out0 = context.Output("Out"); + Eigen::DSizes bcast_dims; + for (size_t i = 0; i < repeat_times.size(); ++i) { + bcast_dims[i] = repeat_times[i]; + } + + framework::DDim new_in_dims = framework::make_ddim(vec_in_dims); + framework::DDim out_dims = framework::make_ddim(target_shape); + + out0->Resize(out_dims); + auto x = EigenTensor::From(*in0, new_in_dims); + out0->mutable_data(context.GetPlace()); + auto y = EigenTensor::From(*out0, out_dims); + auto& place = + *context.template device_context().eigen_device(); + y.device(place) = x.broadcast(bcast_dims); + } +}; + +template +class ExpandAsV2GradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* in0 = context.Input("X"); + auto* target_tensor = context.Input("target_tensor"); + auto x_dims = in0->dims(); + auto target_shape = target_tensor->dims(); + auto vec_in_dims = framework::vectorize(x_dims); + auto diff = target_shape.size() - vec_in_dims.size(); + vec_in_dims.insert(vec_in_dims.begin(), diff, 1); + std::vector repeat_times(vec_in_dims.size()); + for (size_t i = 0; i < vec_in_dims.size(); ++i) { + repeat_times[i] = target_shape[i] / vec_in_dims[i]; + } + std::vector reshape_dims_vec; + std::vector reduce_dims_vec; + for (size_t i = 0; i < repeat_times.size(); ++i) { + reduce_dims_vec.push_back(reshape_dims_vec.size()); + reshape_dims_vec.push_back(repeat_times[i]); + reshape_dims_vec.push_back(vec_in_dims[i]); + } + + int dims = reduce_dims_vec.size(); + bool just_copy = true; + for (size_t i = 0; i < repeat_times.size(); i++) { + if (repeat_times[i] != 1) { + just_copy = false; + break; + } + } + // no need reduce, just copy + if (just_copy) { + auto* in0 = context.Input(framework::GradVarName("Out")); + auto* out0 = context.Output(framework::GradVarName("X")); + out0->mutable_data(context.GetPlace()); + framework::TensorCopy(*in0, context.GetPlace(), context.device_context(), + out0); + } else { + PADDLE_ENFORCE_GE(dims, 1, + platform::errors::InvalidArgument( + "The rank of the input 'Out@GRAD' for " + "expand_as_v2_grad op must be greater than or " + "equal to 1, but the value received is %d.", + dims)); + PADDLE_ENFORCE_LE(dims, MAX_RANK_SUPPORTED, + platform::errors::InvalidArgument( + "The rank of the input 'Out@GRAD' for " + "expand_as_v2_grad op must be less than or equal " + "to %d, but the value received is %d.", + MAX_RANK_SUPPORTED, dims)); + switch (dims) { REP_EXPAND_AS_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) } + } + } + + protected: + template + void ExpandAsBackward(const framework::ExecutionContext& context, + const std::vector& reshape_dims_vec, + const std::vector& reduce_dims_vec) const { + size_t reshape_size = reshape_dims_vec.size(); + size_t reduce_size = reduce_dims_vec.size(); + auto* in0 = context.Input(framework::GradVarName("Out")); + auto* out0 = context.Output(framework::GradVarName("X")); + out0->mutable_data(context.GetPlace()); + auto x_grad = EigenVector::Flatten(*out0); + Eigen::DSizes reshape_dims; + for (size_t i = 0; i < reshape_size; ++i) { + reshape_dims[i] = reshape_dims_vec[i]; + } + Eigen::DSizes reduce_dims; + for (size_t i = 0; i < reduce_size; ++i) { + reduce_dims[i] = reduce_dims_vec[i]; + } + auto out_grad = EigenVector::Flatten(*in0); + x_grad.device( + *context.template device_context().eigen_device()) = + out_grad.reshape(reshape_dims) + .sum(reduce_dims) + .reshape(x_grad.dimensions()); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 50567ea80dd76..d99f51b666227 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -101,6 +101,7 @@ from .tensor.manipulation import cast #DEFINE_ALIAS from .tensor.manipulation import concat #DEFINE_ALIAS from .tensor.manipulation import expand #DEFINE_ALIAS +from .tensor.manipulation import broadcast_to #DEFINE_ALIAS from .tensor.manipulation import expand_as #DEFINE_ALIAS from .tensor.manipulation import tile #DEFINE_ALIAS from .tensor.manipulation import flatten #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py new file mode 100755 index 0000000000000..0ccb725870cde --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_expand_as_v2_op.py @@ -0,0 +1,121 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest +import paddle +import paddle.fluid as fluid + + +class TestExpandAsOpRank1(OpTest): + def setUp(self): + self.op_type = "expand_as_v2" + x = np.random.rand(100).astype("float64") + target_tensor = np.random.rand(2, 100).astype("float64") + self.inputs = {'X': x, 'target_tensor': target_tensor} + self.attrs = {} + bcast_dims = [2, 1] + output = np.tile(self.inputs['X'], bcast_dims) + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestExpandAsOpRank2(OpTest): + def setUp(self): + self.op_type = "expand_as_v2" + x = np.random.rand(10, 12).astype("float64") + target_tensor = np.random.rand(10, 12).astype("float64") + self.inputs = {'X': x, 'target_tensor': target_tensor} + self.attrs = {} + bcast_dims = [1, 1] + output = np.tile(self.inputs['X'], bcast_dims) + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestExpandAsOpRank3(OpTest): + def setUp(self): + self.op_type = "expand_as_v2" + x = np.random.rand(2, 3, 20).astype("float64") + target_tensor = np.random.rand(2, 3, 20).astype("float64") + self.inputs = {'X': x, 'target_tensor': target_tensor} + self.attrs = {} + bcast_dims = [1, 1, 1] + output = np.tile(self.inputs['X'], bcast_dims) + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestExpandAsOpRank4(OpTest): + def setUp(self): + self.op_type = "expand_as_v2" + x = np.random.rand(1, 1, 7, 16).astype("float64") + target_tensor = np.random.rand(4, 6, 7, 16).astype("float64") + self.inputs = {'X': x, 'target_tensor': target_tensor} + self.attrs = {} + bcast_dims = [4, 6, 1, 1] + output = np.tile(self.inputs['X'], bcast_dims) + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +# Test python API +class TestExpandAPI(unittest.TestCase): + def test_api(self): + input1 = np.random.random([12, 14]).astype("float32") + input2 = np.random.random([2, 12, 14]).astype("float32") + x = fluid.layers.data( + name='x', shape=[12, 14], append_batch_size=False, dtype="float32") + + y = fluid.layers.data( + name='target_tensor', + shape=[2, 12, 14], + append_batch_size=False, + dtype="float32") + + out_1 = paddle.expand_as(x, y=y) + + exe = fluid.Executor(place=fluid.CPUPlace()) + res_1 = exe.run(fluid.default_main_program(), + feed={"x": input1, + "target_tensor": input2}, + fetch_list=[out_1]) + assert np.array_equal(res_1[0], np.tile(input1, (2, 1, 1))) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 94669bc28f955..aee6ca249f535 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -193,7 +193,7 @@ def test_errors(self): x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") self.assertRaises(TypeError, paddle.tensor.expand, x2, shape) x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") - x3.stop_gradient = True + x3.stop_gradient = False self.assertRaises(ValueError, paddle.tensor.expand, x3, shape) diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index 0fc8efbd49e56..73e6ff5dbd69c 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -22,7 +22,7 @@ from paddle.fluid import compiler, Program, program_guard -# Situation 1: repeat_times is a list(without tensor) +# Situation 1: repeat_times is a list (without tensor) class TestTileOpRank1(OpTest): def setUp(self): self.op_type = "tile" @@ -81,7 +81,7 @@ def init_data(self): self.repeat_times = (3, 2, 1, 2) -# Situation 2: repeat_times is a list(with tensor) +# Situation 2: repeat_times is a list (with tensor) class TestTileOpRank1_tensor_attr(OpTest): def setUp(self): self.op_type = "tile" @@ -162,7 +162,7 @@ def setUp(self): self.op_type = "tile" self.inputs = { 'X': np.random.randint( - 10, size=(2, 4, 5)).astype("int32") + 10, size=(4, 4, 5)).astype("int32") } self.attrs = {'repeat_times': [2, 1, 4]} output = np.tile(self.inputs['X'], (2, 1, 4)) @@ -211,38 +211,30 @@ def test_errors(self): x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8") self.assertRaises(TypeError, paddle.tile, x2, repeat_times) x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool") - x3.stop_gradient = True + x3.stop_gradient = False self.assertRaises(ValueError, paddle.tile, x3, repeat_times) # Test python API class TestTileAPI(unittest.TestCase): def test_api(self): - input = np.random.random([12, 14]).astype("float32") - x = fluid.layers.data( - name='x', shape=[12, 14], append_batch_size=False, dtype="float32") - - positive_2 = fluid.layers.fill_constant([1], "int32", 2) - repeat_times = fluid.layers.data( - name="repeat_times", shape=[2], append_batch_size=False) - - out_1 = paddle.tile(x, repeat_times=[2, 3]) - out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) - out_3 = paddle.tile(x, repeat_times=repeat_times) - - g0 = fluid.backward.calc_gradient(out_2, x) - - exe = fluid.Executor(place=fluid.CPUPlace()) - res_1, res_2, res_3 = exe.run(fluid.default_main_program(), - feed={ - "x": input, - "repeat_times": - np.array([1, 3]).astype("int32") - }, - fetch_list=[out_1, out_2, out_3]) - assert np.array_equal(res_1, np.tile(input, (2, 3))) - assert np.array_equal(res_2, np.tile(input, (2, 3))) - assert np.array_equal(res_3, np.tile(input, (1, 3))) + with fluid.dygraph.guard(): + np_x = np.random.random([12, 14]).astype("float32") + x = paddle.to_variable(np_x) + + positive_2 = np.array([2]).astype("int32") + positive_2 = paddle.to_variable(positive_2) + + repeat_times = np.array([2, 3]).astype("int32") + repeat_times = paddle.to_variable(repeat_times) + + out_1 = paddle.tile(x, repeat_times=[2, 3]) + out_2 = paddle.tile(x, repeat_times=[positive_2, 3]) + out_3 = paddle.tile(x, repeat_times=repeat_times) + + assert np.array_equal(out_1.numpy(), np.tile(np_x, (2, 3))) + assert np.array_equal(out_2.numpy(), np.tile(np_x, (2, 3))) + assert np.array_equal(out_3.numpy(), np.tile(np_x, (2, 3))) if __name__ == "__main__": diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index aa0d8c408899a..2ab604638afae 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -74,6 +74,7 @@ from .manipulation import cast #DEFINE_ALIAS from .manipulation import concat #DEFINE_ALIAS from .manipulation import expand #DEFINE_ALIAS +from .manipulation import broadcast_to #DEFINE_ALIAS from .manipulation import expand_as #DEFINE_ALIAS from .manipulation import tile #DEFINE_ALIAS from .manipulation import flatten #DEFINE_ALIAS diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 9e2b7286ba677..b60ffe9210d2a 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -23,7 +23,6 @@ import numpy as np # TODO: define functions to manipulate a tensor from ..fluid.layers import cast #DEFINE_ALIAS -from ..fluid.layers import expand_as #DEFINE_ALIAS from ..fluid.layers import reshape #DEFINE_ALIAS from ..fluid.layers import scatter #DEFINE_ALIAS from ..fluid.layers import slice #DEFINE_ALIAS @@ -44,6 +43,7 @@ 'cast', 'concat', 'expand', + 'broadcast_to', 'expand_as', 'flatten', 'gather', @@ -791,80 +791,58 @@ def unbind(input, axis=0): def tile(x, repeat_times, name=None): """ - :alias_main: paddle.tile - :alias: paddle.tile,paddle.tensor.tile,paddle.tensor.manipulation.tile - Construct a new tensor by repeating ``x`` the number of times given by the parameter ``repeat_times``. - The rank of ``x`` should be less than or equal to 6, and the size of the shape of ``repeat_times`` should - be less than or equal to 6. - If the size of the parameter ``repeat_times`` is ``d``, and the rank for ``x`` is ``r``, then the number - of dimensions for the result is ``max(d, r)``. - If ``r < d``, ``x`` if first promoted to a d-dimensional tensor by inserting new axes at the beginning. - For example, a tensor ``x`` with the shape(3,) is promoted to a 2-D tensor with the shape (1, 3) if ``d`` is 2 - and a 3-D tensor with the shape(1, 1, 3) if ``d`` is 3. - If ``r > d``, ``repeat_times`` is first promoted by inserting 1's at the beginning. - For example, if the tensor ``x`` is with a shape(4, 3, 2, 2) and ``repeat_times`` is a tuple (3, 2), - ``repeat_times`` is first promoted to a tuple (1, 1, 3, 2). - The following gives an using case: - .. code-block:: text - Input(x) is a 3-D tensor of shape (2, 3, 1): - [ - [[1], [2], [3]], - [[4], [5], [6]] - ] - Attr(repeat_times): [1, 2, 2] - Output(out) is a 3-D tensor of shape (2, 6, 2): - [ - [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]], - [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]] - ] + + Construct a new Tensor by repeating ``x`` the number of times given by ``repeat_times``. + After tiling, the number of elements of the i'th dimension of the output is equal to ``x.dims[i] * repeat_times[i]``. + + Both the number of dimensions of ``x`` and the number of elements in ``repeat_times`` should be less than or equal to 6. + Args: - x (Tensor): The input tensor, its data type should be bool, float32, float64, int32. The rank of ``x`` should be in [1, 6]. - repeat_times (Tensor|tuple|list): The number of repeating times for each dimension of the input ``x``. If repeat_times is a list or tuple, the elements of - it should be integers or Tensors with shape [1]. If repeat_times is Tensor, it should be an 1-D Tensor. The size of its shape should be in [1, 6]. - name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name` . + x (Tensor): The input tensor, its data type should be bool, float32, float64, int32 or int64. + repeat_times (Tensor|tuple|list): The number of repeating times. If repeat_times is a list or tuple, all its elements + should be integers or 1-D Tensors with the data type int32. If repeat_times is a Tensor, it should be an 1-D Tensor with the data type int32. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + Returns: - N-D Tensor. The data type is the same as ``x``. After tiling, each dimension of the output is equal to the corresponding dimension of ``x`` multiplying the corresponding value given by ``repeat_times`` . - Raises: - TypeError: The type of ``repeat_times`` must be list, tuple or Tensor. - ValueError: The elements of ``repeat_times`` cannot be negative. + N-D Tensor. The data type is the same as ``x``. + Examples: .. code-block:: python + import paddle import numpy as np + paddle.disable_static() - # example 1: - np_data_1 = np.array([1, 2, 3]).astype('int32') - data_1 = paddle..to_variable(np_data_1) - tiled_1 = paddle.tile(data_1, repeat_times=[2, 1]) + np_data = np.array([1, 2, 3]).astype('int32') + data = paddle.to_variable(np_data) + out = paddle.tile(data, repeat_times=[2, 1]) + np_out = out1.numpy() # [[1, 2, 3], [1, 2, 3]] - # example 2: + + out = paddle.tile(data, repeat_times=[2, 2]) + np_out = out.numpy() + # [[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]] + np_repeat_times = np.array([2, 1]).astype("int32") repeat_times = paddle.to_variable(np_repeat_times) - tiled_2 = paddle.tile(data_1, repeat_times=repeat_times) + out = paddle.tile(data, repeat_times=repeat_times) + np_out = out.numpy() # [[1, 2, 3], [1, 2, 3]] """ - if in_dygraph_mode(): - if isinstance(repeat_times, (list, tuple)): - repeat_times = [ - item.numpy()[0] if isinstance(item, Variable) else item - for item in repeat_times - ] - - return core.ops.tile(x, 'repeat_times', repeat_times) - - inputs = {"X": [x]} - attrs = {} check_variable_and_dtype( x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile') check_type(repeat_times, 'repeat_times', (list, tuple, Variable), 'tile') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: + if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: raise ValueError( "When the date type is bool for the input 'x' of tile op, you " - "must set its stop_gradient to be False by " - "some_var.stop_gradient == True supporting some_var is the input.") + "must set its stop_gradient to be True by " + "some_var.stop_gradient == True supporting some_var as the input.") helper = LayerHelper('tile', input=x, **locals()) + inputs = {"X": [x]} + attrs = {} + def get_attr_repeat_times(list_repeat_times): attrs_repeat_times = [] for idx, times in enumerate(list_repeat_times): @@ -873,13 +851,13 @@ def get_attr_repeat_times(list_repeat_times): else: attrs_repeat_times.append(times) assert times > 0, ( - "Every element given in repeat_times must be positive.") + "All elements in repeat_times must be positive for tile.") return attrs_repeat_times if isinstance(repeat_times, Variable): repeat_times.stop_gradient = True inputs['RepeatTimes'] = repeat_times - attrs['repeat_times'] = [-1] * len(repeat_times.shape) + attrs['repeat_times'] = [-1] elif isinstance(repeat_times, (list, tuple)): attrs['repeat_times'] = get_attr_repeat_times(repeat_times) if utils._contain_var(repeat_times): @@ -893,67 +871,103 @@ def get_attr_repeat_times(list_repeat_times): return out +def expand_as(x, y, name=None): + """ + + Expand the input tensor ``x`` to the same shape as the input tensor ``y``. + + Both the number of dimensions of ``x`` and ``y`` must be less than or equal to 6, and the number of dimensions of ``y`` must be greather than or equal to that of ``x``. The dimension to expand must have a value of 1. + + Args: + x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64. + y (Tensor): The input tensor gives the shape that ``x`` to expand to. + name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + N-D Tensor: A Tensor with the same shape as ``y``. The data type is the same as ``x``. + + Examples: + .. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + + np_data_x = np.array([1, 2, 3]).astype=('int32) + np_data_y = np.array([[1, 2, 3], [4, 5, 6]]).astype=('int32) + data_x = paddle.to_variable(np_data_x) + data_y = paddle.to_variable(np_data_y) + out = paddle.expand_as(data_x, data_y) + np_out = out.numpy() + # [[1, 2, 3], [1, 2, 3]] + """ + check_variable_and_dtype( + x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand_as') + check_type(y, 'y', Variable, 'expand_as') + + if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + raise ValueError( + "When the data type of input 'x' for expand_as is bool, " + "you must set its stop_gradient to be False by " + "some_var.stop_gradient = True, supporting " + "some_var as the input 'x'.") + inputs = {"X": [x], "target_tensor": [y]} + + helper = LayerHelper('expand_as', input=x, **locals()) + dtype = helper.input_dtype(input_param_name='x') + out = helper.create_variable_for_type_inference(dtype) + helper.append_op(type='expand_as_v2', inputs=inputs, outputs={'Out': out}) + return out + + def expand(x, shape, name=None): """ - :alias_main: paddle.expand - :alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand Expand the input tensor to a given shape. - The rank of ``x`` should be less than or equal to 6, and the number of elements in ``shape`` should also be less than or equal to 6. The size of the dimension to expand must be 1. + Both the number of dimensions of ``x`` and the number of elements in ``shape`` should be less than or equal to 6. The dimension to expand must have a value 1. Args: - x (Tensor): The input Tensor with rank in [1, 6]. The data type is bool, float32, float64 or int32. - shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all elements of - it should be integers or Tensors with shape (1,). If shape is a Tensor, it should be an 1-D Tensor. - The value -1 in shape, means keeping the corresponding dimension unchanged. + x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64. + shape (list|tuple|Tensor): The result shape after expanding. The data type is int32. If shape is a list or tuple, all its elements + should be integers or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32. + The value -1 in shape means keeping the corresponding dimension unchanged. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: - Tensor: A Tensor with the given shape. The data type is the same as ``x``. - - Raises: - TypeError: The type of ``shape`` must be list, tuple or Variable. - ValueError: The elements of ``shape`` must be positive or -1. + N-D Tensor: A Tensor with the given shape. The data type is the same as ``x``. Examples: .. code-block:: python import numpy as np import paddle - paddle.disable_static() - # example 1: - np_data_1 = np.array([1, 2, 3]).astype=('int32) - data_1 = paddle.to_variable(np_data_1) - expanded_1 = paddle.expand(data_1, shape=[2, 3]) + paddle.disable_static() + np_data = np.array([1, 2, 3]).astype=('int32) + data = paddle.to_variable(np_data) + out = paddle.expand(data, shape=[2, 3]) + out = out.numpy() # [[1, 2, 3], [1, 2, 3]] - # example 2: np_shape = np.array([2, 3]).astype=('int32) shape = paddle.to_variable(np_shape) - expanded_2 = paddle.expand(data_1, shape=shape) + out = paddle.expand(data, shape=shape) + out = out.numpy # [[1, 2, 3], [1, 2, 3]] """ - if in_dygraph_mode(): - if isinstance(shape, (list, tuple)): - expand_shape = [ - item.numpy()[0] if isinstance(item, Variable) else item - for item in shape - ] - - return core.ops.expand_v2(x, 'shape', expand_shape) - - inputs = {"X": [x]} - attrs = {} check_variable_and_dtype( x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand') check_type(shape, 'shape', (list, tuple, Variable), 'expand') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: + + inputs = {"X": [x]} + attrs = {} + if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: raise ValueError("When the data type of input 'x' for expand is bool, " "you must set its stop_gradient to be False by " - " some_var.stop_gradient = False, supporting " + "some_var.stop_gradient = True, supporting " "some_var as the input.") helper = LayerHelper('expand', input=x, **locals()) @@ -966,7 +980,7 @@ def get_attr_expand_shape(list_expand_shape): else: attrs_expand_shape.append(shape) assert shape > 0 or shape == -1, ( - "Every element in shape must be positive or -1.") + "All elements in shape of expand must be positive or -1.") return attrs_expand_shape if isinstance(shape, Variable): @@ -983,3 +997,6 @@ def get_attr_expand_shape(list_expand_shape): helper.append_op( type='expand_v2', inputs=inputs, outputs={'Out': out}, attrs=attrs) return out + + +broadcast_to = expand From abfdffa03960a4bff0f6366b4ae3069378c2110c Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Tue, 18 Aug 2020 15:44:53 +0200 Subject: [PATCH 05/37] add use_mkldnn attribute to ops in dygraph (#25773) --- .../fluid/dygraph/layer_object_helper.py | 12 ++-- python/paddle/fluid/dygraph/nn.py | 55 ++++++++++++------- python/paddle/fluid/dygraph_utils.py | 6 +- python/paddle/fluid/layers/nn.py | 7 ++- .../tests/unittests/test_imperative_basic.py | 11 ++++ 5 files changed, 61 insertions(+), 30 deletions(-) diff --git a/python/paddle/fluid/dygraph/layer_object_helper.py b/python/paddle/fluid/dygraph/layer_object_helper.py index f2e914a2137d0..a904f80639752 100644 --- a/python/paddle/fluid/dygraph/layer_object_helper.py +++ b/python/paddle/fluid/dygraph/layer_object_helper.py @@ -136,18 +136,13 @@ def get_parameter(self, name): return param # TODO: this should not be called anymore after all activation func move to Layers - def append_activation(self, - input_var, - act=None, - use_cudnn=None, - use_mkl_dnn=None): + def append_activation(self, input_var, act=None, use_cudnn=None): """Append activation Args: input_var: the input variable. The len(input_var.shape) is larger or equal than 2. act: activation type - use_mkl_dnn: if use mkldnn use_cudnn: if use cudnn Return the Variable of after append activation @@ -163,8 +158,9 @@ def append_activation(self, if (use_cudnn is not None) and use_cudnn: act['use_cudnn'] = use_cudnn - if (use_mkl_dnn is not None) and use_mkl_dnn: - act['use_mkldnn'] = use_mkl_dnn + use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + if (use_mkldnn is not None) and use_mkldnn: + act['use_mkldnn'] = use_mkldnn act_type = act.pop('type') tmp = self.create_variable_for_type_inference(dtype=input_var.dtype) diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index e56f26f1b1b94..a7ffc2dd63ae6 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -180,6 +180,7 @@ def __init__(self, if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") self._use_cudnn = use_cudnn + self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] self._filter_size = filter_size self._num_filters = num_filters self._param_attr = param_attr @@ -187,7 +188,8 @@ def __init__(self, self._dtype = dtype if (self._num_channels == self._groups and - num_filters % self._num_channels == 0 and not self._use_cudnn): + num_filters % self._num_channels == 0 and + not self._use_cudnn and not self._use_mkldnn): self._l_type = 'depthwise_conv2d' else: self._l_type = 'conv2d' @@ -224,14 +226,15 @@ def forward(self, input): if in_dygraph_mode() and self._l_type == 'conv2d': attrs = ('strides', self._stride, 'paddings', self._padding, 'dilations', self._dilation, 'groups', self._groups - if self._groups else 1, 'use_cudnn', self._use_cudnn) + if self._groups else 1, 'use_cudnn', self._use_cudnn, + 'use_mkldnn', self._use_mkldnn) out = core.ops.conv2d(input, self.weight, *attrs) pre_bias = out - pre_act = dygraph_utils._append_bias_in_dygraph(pre_bias, self.bias, - 1) - return dygraph_utils._append_activation_in_dygraph(pre_act, - self._act) + pre_act = dygraph_utils._append_bias_in_dygraph( + pre_bias, self.bias, 1, use_mkldnn=self._use_mkldnn) + return dygraph_utils._append_activation_in_dygraph( + pre_act, self._act, use_mkldnn=self._use_mkldnn) inputs = { 'Input': [input], 'Filter': [self.weight], @@ -242,7 +245,7 @@ def forward(self, input): 'dilations': self._dilation, 'groups': self._groups if self._groups else 1, 'use_cudnn': self._use_cudnn, - 'use_mkldnn': False, + 'use_mkldnn': self._use_mkldnn, } check_variable_and_dtype(input, 'input', @@ -267,7 +270,8 @@ def forward(self, input): inputs={'X': [pre_bias], 'Y': [self.bias]}, outputs={'Out': [pre_act]}, - attrs={'axis': 1}) + attrs={'axis': 1, + 'use_mkldnn': self._use_mkldnn}) else: pre_act = pre_bias @@ -828,6 +832,8 @@ def __init__(self, if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") + self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " @@ -853,8 +859,8 @@ def forward(self, input): 'global_pooling', self._global_pooling, 'strides', self._pool_stride, 'paddings', self._pool_padding, 'use_cudnn', self._use_cudnn, 'ceil_mode', self._ceil_mode, - 'use_mkldnn', False, 'exclusive', self._exclusive, - 'data_format', self._data_format) + 'use_mkldnn', self._use_mkldnn, 'exclusive', + self._exclusive, 'data_format', self._data_format) return core.ops.pool2d(input, *attrs) check_variable_and_dtype( @@ -869,7 +875,7 @@ def forward(self, input): "paddings": self._pool_padding, "use_cudnn": self._use_cudnn, "ceil_mode": self._ceil_mode, - "use_mkldnn": False, + "use_mkldnn": self._use_mkldnn, "exclusive": self._exclusive, "data_format": self._data_format, } @@ -958,16 +964,22 @@ def __init__(self, self.bias = self.create_parameter( shape=[output_dim], attr=bias_attr, dtype=dtype, is_bias=True) + self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] + def forward(self, input): if in_dygraph_mode(): pre_bias = _varbase_creator(dtype=input.dtype) core.ops.matmul(input, self.weight, pre_bias, 'transpose_X', False, - 'transpose_Y', False, "alpha", 1) + 'transpose_Y', False, "alpha", 1, "use_mkldnn", + self._use_mkldnn) pre_act = dygraph_utils._append_bias_in_dygraph( - pre_bias, self.bias, axis=len(input.shape) - 1) + pre_bias, + self.bias, + axis=len(input.shape) - 1, + use_mkldnn=self._use_mkldnn) - return dygraph_utils._append_activation_in_dygraph(pre_act, - self._act) + return dygraph_utils._append_activation_in_dygraph( + pre_act, self._act, use_mkldnn=self._use_mkldnn) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], "Linear") @@ -976,6 +988,7 @@ def forward(self, input): "transpose_X": False, "transpose_Y": False, "alpha": 1, + "use_mkldnn": self._use_mkldnn, } inputs = {"X": [input], "Y": [self.weight]} @@ -990,7 +1003,10 @@ def forward(self, input): inputs={'X': [tmp], 'Y': [self.bias]}, outputs={'Out': [pre_activation]}, - attrs={'axis': len(input.shape) - 1}) + attrs={ + 'axis': len(input.shape) - 1, + 'use_mkldnn': self._use_mkldnn + }) else: pre_activation = tmp return self._helper.append_activation(pre_activation, act=self._act) @@ -1250,6 +1266,7 @@ def __init__(self, self._param_attr = param_attr self._bias_attr = bias_attr self._act = act + self._use_mkldnn = core.globals()["FLAGS_use_mkldnn"] assert bias_attr is not False, "bias_attr should not be False in batch_norm." @@ -1314,8 +1331,8 @@ def forward(self, input): if in_dygraph_mode(): attrs = ("momentum", self._momentum, "epsilon", self._epsilon, "is_test", not self.training, "data_layout", - self._data_layout, "use_mkldnn", False, "fuse_with_relu", - self._fuse_with_relu, "use_global_stats", + self._data_layout, "use_mkldnn", self._use_mkldnn, + "fuse_with_relu", self._fuse_with_relu, "use_global_stats", self._use_global_stats, 'trainable_statistics', self._trainable_statistics) batch_norm_out, _, _, _, _, _ = core.ops.batch_norm( @@ -1323,7 +1340,7 @@ def forward(self, input): mean_out, variance_out, *attrs) return dygraph_utils._append_activation_in_dygraph( - batch_norm_out, act=self._act) + batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn) check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm') diff --git a/python/paddle/fluid/dygraph_utils.py b/python/paddle/fluid/dygraph_utils.py index 7b559494e6c3b..a2338b874f51a 100644 --- a/python/paddle/fluid/dygraph_utils.py +++ b/python/paddle/fluid/dygraph_utils.py @@ -45,17 +45,19 @@ def _append_activation_in_dygraph(input, @dygraph_only -def _append_bias_in_dygraph(input, bias=None, axis=1): +def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False): """Append bias operation in dygraph mode. Args: input: the input variable. bias: the bias to be appended axis: the axis to perform operation + use_mkldnn: whether to use mkldnn Return the Variable after bias operation """ if bias is None: return input - return core.ops.elementwise_add(input, bias, 'axis', axis) + return core.ops.elementwise_add(input, bias, 'axis', axis, 'use_mkldnn', + use_mkldnn) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 446510121e72a..3595406a011ce 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11414,7 +11414,12 @@ def gen_data(): """ if in_dygraph_mode(): return _elementwise_op_in_dygraph( - x, y, axis=axis, act=act, op_name='elementwise_add') + x, + y, + axis=axis, + act=act, + op_name='elementwise_add', + use_mkldnn=core.globals()["FLAGS_use_mkldnn"]) return _elementwise_op(LayerHelper('elementwise_add', **locals())) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 8a88c2d673c4d..f83f8ef35215e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -21,6 +21,7 @@ from paddle.fluid import Linear from test_imperative_base import new_program_scope import paddle.fluid.dygraph_utils as dygraph_utils +from paddle.fluid.dygraph.layer_object_helper import LayerObjectHelper import paddle @@ -629,6 +630,16 @@ def test_append_activation_in_dygraph2(self): res2 = fluid.layers.sigmoid(a) self.assertTrue(np.allclose(res1.numpy(), res2.numpy())) + def test_append_activation_in_dygraph3(self): + a_np = np.random.random(size=(10, 20, 30)).astype(np.float32) + helper = LayerObjectHelper(fluid.unique_name.generate("test")) + func = helper.append_activation + with fluid.dygraph.guard(): + a = fluid.dygraph.to_variable(a_np) + res1 = func(a, act="sigmoid", use_cudnn=True) + res2 = fluid.layers.sigmoid(a) + self.assertTrue(np.array_equal(res1.numpy(), res2.numpy())) + def test_append_bias_in_dygraph_exception(self): with new_program_scope(): np_inp = np.random.random(size=(10, 20, 30)).astype(np.float32) From 2101dfd2b3552defe4e6e14f7eb96321ffe41fc2 Mon Sep 17 00:00:00 2001 From: wangchaochaohu Date: Wed, 19 Aug 2020 07:40:44 +0800 Subject: [PATCH 06/37] =?UTF-8?q?=E3=80=90API2.0=E3=80=91add=20Chunk=20API?= =?UTF-8?q?=20(#26314)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- python/paddle/__init__.py | 1 + .../fluid/tests/unittests/test_chunk_op.py | 138 ++++++++++++++++++ python/paddle/tensor/__init__.py | 1 + python/paddle/tensor/manipulation.py | 48 ++++++ 4 files changed, 188 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_chunk_op.py diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index d99f51b666227..518e2c0c4d90d 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -126,6 +126,7 @@ from .tensor.manipulation import flip #DEFINE_ALIAS from .tensor.manipulation import unbind #DEFINE_ALIAS from .tensor.manipulation import roll #DEFINE_ALIAS +from .tensor.manipulation import chunk #DEFINE_ALIAS from .tensor.math import abs #DEFINE_ALIAS from .tensor.math import acos #DEFINE_ALIAS from .tensor.math import asin #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_chunk_op.py b/python/paddle/fluid/tests/unittests/test_chunk_op.py new file mode 100644 index 0000000000000..043b326fbd987 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_chunk_op.py @@ -0,0 +1,138 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest +import numpy as np +from paddle.fluid import Program, program_guard +from paddle import fluid +import paddle + + +class TestChunkOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + # The type of axis in chunk_op should be int or Variable. + def test_axis_type(): + x1 = paddle.data(shape=[4], dtype='float16', name='x3') + paddle.chunk(x=x1, chunks=2, axis=3.2) + + self.assertRaises(TypeError, test_axis_type) + + # The type of axis in chunk op should be int or Variable. + def test_axis_variable_type(): + x2 = paddle.data(shape=[4], dtype='float16', name='x9') + x3 = paddle.data(shape=[1], dtype='float16', name='x10') + paddle.chunk(input=x2, chunks=2, axis=x3) + + self.assertRaises(TypeError, test_axis_variable_type) + + # The type of num_or_sections in chunk_op should be int, tuple or list. + def test_chunks_type(): + x4 = paddle.data(shape=[4], dtype='float16', name='x4') + paddle.chunk(input=x4, chunks=2.1, axis=3) + + self.assertRaises(TypeError, test_chunks_type) + + def test_axis_type_tensor(): + x5 = paddle.data(shape=[4], dtype='float16', name='x6') + paddle.chunk(input=x5, chunks=2, axis=3.2) + + self.assertRaises(TypeError, test_axis_type_tensor) + + +class API_TestChunk(unittest.TestCase): + def test_out(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64') + data2 = paddle.data('data2', shape=[1], dtype='int32') + x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=data2) + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + input1 = np.random.random([4, 6, 6]).astype('float64') + input2 = np.array([2]).astype('int32') + r0, r1, r2, = exe.run(feed={"data1": input1, + "data2": input2}, + fetch_list=[x0, x1, x2]) + ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2) + self.assertTrue(np.allclose(ex_x0, r0)) + self.assertTrue(np.allclose(ex_x1, r1)) + self.assertTrue(np.allclose(ex_x2, r2)) + + +class API_TestChunk1(unittest.TestCase): + def test_out(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + data1 = paddle.data('data1', shape=[4, 6, 6], dtype='float64') + x0, x1, x2 = paddle.chunk(data1, chunks=3, axis=2) + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + input1 = np.random.random([4, 6, 6]).astype('float64') + r0, r1, r2, = exe.run(feed={"data1": input1}, + fetch_list=[x0, x1, x2]) + ex_x0, ex_x1, ex_x2 = np.array_split(input1, 3, axis=2) + self.assertTrue(np.allclose(ex_x0, r0)) + self.assertTrue(np.allclose(ex_x1, r1)) + self.assertTrue(np.allclose(ex_x2, r2)) + + +class API_TestDygraphChunk(unittest.TestCase): + def test_out1(self): + with fluid.dygraph.guard(): + input_1 = np.random.random([4, 6, 6]).astype("int32") + # input is a variable which shape is [4, 6, 6] + input = fluid.dygraph.to_variable(input_1) + x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1) + x0_out = x0.numpy() + x1_out = x1.numpy() + x2_out = x2.numpy() + ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1) + self.assertTrue(np.allclose(ex_x0, x0_out)) + self.assertTrue(np.allclose(ex_x1, x1_out)) + self.assertTrue(np.allclose(ex_x2, x2_out)) + + def test_out2(self): + with fluid.dygraph.guard(): + input_1 = np.random.random([4, 6, 6]).astype("bool") + # input is a variable which shape is [4, 6, 6] + input = fluid.dygraph.to_variable(input_1) + x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1) + x0_out = x0.numpy() + x1_out = x1.numpy() + x2_out = x2.numpy() + ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1) + self.assertTrue(np.allclose(ex_x0, x0_out)) + self.assertTrue(np.allclose(ex_x1, x1_out)) + self.assertTrue(np.allclose(ex_x2, x2_out)) + + def test_axis_tensor_input(self): + with fluid.dygraph.guard(): + input_1 = np.random.random([4, 6, 6]).astype("int32") + # input is a variable which shape is [4, 6, 6] + input = fluid.dygraph.to_variable(input_1) + num1 = paddle.full(shape=[1], fill_value=1, dtype='int32') + x0, x1, x2 = paddle.chunk(input, chunks=3, axis=num1) + x0_out = x0.numpy() + x1_out = x1.numpy() + x2_out = x2.numpy() + ex_x0, ex_x1, ex_x2 = np.array_split(input_1, 3, axis=1) + self.assertTrue(np.allclose(ex_x0, x0_out)) + self.assertTrue(np.allclose(ex_x1, x1_out)) + self.assertTrue(np.allclose(ex_x2, x2_out)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 2ab604638afae..77d821d56b8e1 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -99,6 +99,7 @@ from .manipulation import flip #DEFINE_ALIAS from .manipulation import unbind #DEFINE_ALIAS from .manipulation import roll #DEFINE_ALIAS +from .manipulation import chunk #DEFINE_ALIAS from .math import abs #DEFINE_ALIAS from .math import acos #DEFINE_ALIAS from .math import asin #DEFINE_ALIAS diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index b60ffe9210d2a..2c8157645de29 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -56,6 +56,7 @@ 'shard_index', 'slice', 'split', + 'chunk' 'squeeze', 'stack', 'strided_slice', @@ -789,6 +790,53 @@ def unbind(input, axis=0): return outs +def chunk(x, chunks, axis=0, name=None): + """ + Split the input tensor into multiple sub-Tensors. + + Args: + x (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64. + chunks(int): The number of tensor to be split along the certain axis. + axis (int|Tensor, optional): The axis along which to split, it can be a scalar with type + ``int`` or a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``. + If :math::`axis < 0`, the axis to split along is :math:`rank(x) + axis`. Default is 0. + name (str, optional): The default value is None. Normally there is no need for user to set this property. + For more information, please refer to :ref:`api_guide_Name` . + Returns: + list(Tensor): The list of segmented Tensors. + Raises: + TypeError: The data type of ``x`` must be one of bool, float16, float32, float64, int32, int64. + TypeError: ``chunks`` is not int. + TypeError: ``axis`` is not int or Tensor. the data type of ``axis`` must be int32 or int64 when it's a Tensor. + Example: + .. code-block:: python + + import numpy as np + import paddle + + paddle.disable_static() + # x is a Tensor which shape is [3, 9, 5] + x_np = np.random.random([3, 9, 5]).astype("int32") + x = paddle.to_variable(x_np) + + out0, out1, out22 = paddle.chunk(x, chunks=3, axis=1) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + + + # axis is negative, the real axis is (rank(x) + axis) which real + # value is 1. + out0, out1, out2 = paddle.chunk(x, chunks=3, axis=-2) + # out0.shape [3, 3, 5] + # out1.shape [3, 3, 5] + # out2.shape [3, 3, 5] + """ + check_type(chunks, 'chunks', (int), 'chunk') + return paddle.fluid.layers.split( + input=x, num_or_sections=chunks, dim=axis, name=name) + + def tile(x, repeat_times, name=None): """ From 3f816bc8b4136d21f501e1a3c090df192046c46f Mon Sep 17 00:00:00 2001 From: cc <52520497+juncaipeng@users.noreply.github.com> Date: Wed, 19 Aug 2020 10:14:46 +0800 Subject: [PATCH 07/37] [Quantization] Conv2d_transpose and mul support channnelwise quantization (#25639) * Conv2d_transpose and mul support channnelwise quantization, test=develop * Skip collecting out threshold for output tensor of which the type is not fp32 or fp64, test=develop * Fix error in test_user_defined_quantization, test=develop * Add depthwise_conv_bn_fuse, test=develop * Add conv_transpose_bn_fuse_pass for post_training_quant, test=develop --- .../fluid/framework/ir/conv_bn_fuse_pass.cc | 4 + paddle/fluid/framework/ir/conv_bn_fuse_pass.h | 10 + paddle/fluid/operators/fake_dequantize_op.cc | 61 +++++- paddle/fluid/operators/fake_dequantize_op.cu | 45 ++++- paddle/fluid/operators/fake_dequantize_op.h | 14 +- paddle/fluid/operators/fake_quantize_op.cc | 115 +++++++++--- paddle/fluid/operators/fake_quantize_op.cu | 126 +++++++++++-- paddle/fluid/operators/fake_quantize_op.h | 13 +- .../post_training_quantization.py | 139 +++++++++----- .../slim/quantization/quantization_pass.py | 177 +++++++++++------- .../tests/test_user_defined_quantization.py | 53 +++--- .../unittests/test_fake_dequantize_op.py | 81 ++++---- .../tests/unittests/test_fake_quantize_op.py | 52 ++++- 13 files changed, 632 insertions(+), 258 deletions(-) diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 60e4ac8cbcfd8..9d3e0806ac79d 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -368,3 +368,7 @@ REGISTER_PASS(conv_transpose_bn_fuse_pass, paddle::framework::ir::ConvTransposeBNFusePass); REGISTER_PASS(conv_transpose_eltwiseadd_bn_fuse_pass, paddle::framework::ir::ConvTransposeEltwiseAddBNFusePass); +REGISTER_PASS(depthwise_conv_bn_fuse_pass, + paddle::framework::ir::DepthwiseConvBNFusePass); +REGISTER_PASS(depthwise_conv_eltwiseadd_bn_fuse_pass, + paddle::framework::ir::DepthwiseConvEltwiseAddBNFusePass); diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.h b/paddle/fluid/framework/ir/conv_bn_fuse_pass.h index fcdbcf299c504..57a9f69ca15af 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.h +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.h @@ -56,6 +56,16 @@ class ConvTransposeEltwiseAddBNFusePass : public ConvEltwiseAddBNFusePass { std::string conv_type() const { return "conv2d_transpose"; } }; +class DepthwiseConvBNFusePass : public ConvBNFusePass { + public: + std::string conv_type() const { return "depthwise_conv2d"; } +}; + +class DepthwiseConvEltwiseAddBNFusePass : public ConvEltwiseAddBNFusePass { + public: + std::string conv_type() const { return "depthwise_conv2d"; } +}; + } // namespace ir } // namespace framework } // namespace paddle diff --git a/paddle/fluid/operators/fake_dequantize_op.cc b/paddle/fluid/operators/fake_dequantize_op.cc index 0d2b951ee1c54..9b0328b0945ba 100644 --- a/paddle/fluid/operators/fake_dequantize_op.cc +++ b/paddle/fluid/operators/fake_dequantize_op.cc @@ -37,20 +37,49 @@ template struct ChannelDequantizeFunctor { void operator()(const platform::CPUDeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor** scales, - const int scale_num, T max_range, framework::Tensor* out) { + const int scale_num, T max_range, const int quant_axis, + framework::Tensor* out) { if (scale_num == 1) { - const int channel = in->dims()[0]; + // Dequant op is before quantized op + // Dequantize the weight of quantized op + auto in_dims = in->dims(); + const int64_t channel = in_dims[quant_axis]; const T* scale_factor = scales[0]->data(); - for (int i = 0; i < channel; i++) { - T s = scale_factor[i]; - framework::Tensor one_channel_in = in->Slice(i, i + 1); - framework::Tensor one_channel_out = out->Slice(i, i + 1); - auto in_e = framework::EigenVector::Flatten(one_channel_in); - auto out_e = framework::EigenVector::Flatten(one_channel_out); - auto& dev = *dev_ctx.eigen_device(); - out_e.device(dev) = in_e * s / max_range; + if (quant_axis == 0) { + for (int64_t i = 0; i < channel; i++) { + T s = scale_factor[i]; + framework::Tensor one_channel_in = in->Slice(i, i + 1); + framework::Tensor one_channel_out = out->Slice(i, i + 1); + auto in_e = framework::EigenVector::Flatten(one_channel_in); + auto out_e = framework::EigenVector::Flatten(one_channel_out); + auto& dev = *dev_ctx.eigen_device(); + out_e.device(dev) = in_e * s / max_range; + } + } else if (quant_axis == 1) { + int64_t out_iter = 1; + for (int i = 0; i < quant_axis; i++) { + out_iter *= in_dims[i]; + } + int64_t step_i = in->numel() / out_iter; + int64_t step_j = in->numel() / (out_iter * channel); + auto* in_data = in->data(); + auto* out_data = out->mutable_data(dev_ctx.GetPlace()); + for (int64_t i = 0; i < out_iter; i++) { + for (int64_t j = 0; j < channel; j++) { + auto* cur_in = in_data + i * step_i + j * step_j; + auto* cur_out = out_data + i * step_i + j * step_j; + T s = scale_factor[j]; + for (int64_t k = 0; k < step_j; k++) { + *cur_out = (*cur_in) * s / max_range; + ++cur_in; + ++cur_out; + } + } + } } } else if (scale_num == 2) { + // Dequant op is after quantized op + // Dequantize the output tensor of quantized op int batch_size = in->dims()[0]; int channel = in->dims()[1]; const T* scale_one = scales[0]->data(); @@ -157,6 +186,18 @@ class FakeChannelWiseDequantizeMaxAbsOpMaker "Quantization bit numbers in quantization stage. " "The size of `quant_bits` should be equal to the size of `Scales`.") .SetDefault({8}); + AddAttr("quant_axis", + "(int, default 0) The axis for quantization. " + "For conv2d, depthwise_conv2d, conv2d_transpose " + "and mul, the quant_axis is equal to the cout axis.") + .SetDefault(0) + .AddCustomChecker([](const int& quant_axis) { + PADDLE_ENFORCE_EQ(quant_axis == 0 || quant_axis == 1, true, + platform::errors::InvalidArgument( + "'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + }); AddComment(R"DOC( FakeChannelWiseDequantizeMaxAbsOp operator. diff --git a/paddle/fluid/operators/fake_dequantize_op.cu b/paddle/fluid/operators/fake_dequantize_op.cu index 02f9dc827d68c..54a92b055a39d 100644 --- a/paddle/fluid/operators/fake_dequantize_op.cu +++ b/paddle/fluid/operators/fake_dequantize_op.cu @@ -45,8 +45,9 @@ struct DequantizeFunctor { }; template -__global__ void DequantizeOneScale(const T* in, const T* scale, T max_range, - int num, int channel, T* out) { +__global__ void DequantizeOneScaleQuantAxis0(const T* in, const T* scale, + T max_range, int num, int channel, + T* out) { int tid = threadIdx.x; int channel_size = num / channel; const T* in_c = in + blockIdx.x * channel_size; @@ -56,6 +57,23 @@ __global__ void DequantizeOneScale(const T* in, const T* scale, T max_range, } } +template +__global__ void DequantizeOneScaleQuantAxis1(const T* in, const T* scale, + T max_range, const int num, + const int cin, const int cout, + T* out) { + int cout_wh_size = num / cin; + int wh_size = cout_wh_size / cout; + + T s = scale[blockIdx.x]; + const T* in_current = in + threadIdx.x * cout_wh_size + blockIdx.x * wh_size; + T* out_current = out + threadIdx.x * cout_wh_size + blockIdx.x * wh_size; + + for (int i = 0; i < wh_size; i++) { + out_current[i] = in_current[i] * s / max_range; + } +} + template __global__ void DequantizeTwoScale(const T* in, const T* scale_one, const T* scale_two, T max_range, int num, @@ -74,18 +92,29 @@ template struct ChannelDequantizeFunctor { void operator()(const platform::CUDADeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor** scales, - const int scale_num, T max_range, framework::Tensor* out) { + const int scale_num, T max_range, const int quant_axis, + framework::Tensor* out) { + auto in_dims = in->dims(); const T* in_data = in->data(); T* out_data = out->mutable_data(dev_ctx.GetPlace()); if (scale_num == 1) { int num = in->numel(); - int channel = in->dims()[0]; const T* scale_factor = scales[0]->data(); - int block = 1024; - int grid = channel; - DequantizeOneScale<<>>( - in_data, scale_factor, max_range, num, channel, out_data); + if (quant_axis == 0) { + int grid = in_dims[0]; + int block = 1024; + DequantizeOneScaleQuantAxis0<<>>( + in_data, scale_factor, max_range, num, in_dims[0], out_data); + } else if (quant_axis == 1) { + // Dequantize weight of Cin * Cout * W * H + int grid = in_dims[1]; + int block = in_dims[0]; + DequantizeOneScaleQuantAxis1<<>>( + in_data, scale_factor, max_range, num, in_dims[0], in_dims[1], + out_data); + } } else if (scale_num == 2) { + // Not need to consider quant_axis int num = in->numel(); int batch_size = in->dims()[0]; int channel = in->dims()[1]; diff --git a/paddle/fluid/operators/fake_dequantize_op.h b/paddle/fluid/operators/fake_dequantize_op.h index 500960098f5ce..6ddb12771fd51 100644 --- a/paddle/fluid/operators/fake_dequantize_op.h +++ b/paddle/fluid/operators/fake_dequantize_op.h @@ -33,7 +33,7 @@ template struct ChannelDequantizeFunctor { void operator()(const DeviceContext& dev_ctx, const framework::Tensor* in, const framework::Tensor** scales, const int scale_num, - T max_range, framework::Tensor* out); + T max_range, const int quant_axis, framework::Tensor* out); }; template @@ -63,6 +63,7 @@ class FakeChannelWiseDequantizeMaxAbsKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto quant_bits = ctx.Attr>("quant_bits"); + auto quant_axis = ctx.Attr("quant_axis"); int max_range = 1; auto& dev_ctx = ctx.template device_context(); @@ -70,12 +71,12 @@ class FakeChannelWiseDequantizeMaxAbsKernel : public framework::OpKernel { int scale_num = scales.size(); if (scale_num == 1) { PADDLE_ENFORCE_EQ( - scales[0]->numel(), in->dims()[0], + scales[0]->numel(), in->dims()[quant_axis], platform::errors::PreconditionNotMet( "The number of first scale values must be the same with " - "first dimension value of Input(X) when the `Scales` has only " - "one element, but %ld != %ld here.", - scales[0]->numel(), in->dims()[0])); + "quant_axis dimension value of Input(X) when the `Scales` has " + "only one element, but %ld != %ld here.", + scales[0]->numel(), in->dims()[quant_axis])); max_range *= (std::pow(2, quant_bits[0] - 1) - 1); } else if (scale_num == 2) { PADDLE_ENFORCE_EQ( @@ -94,7 +95,8 @@ class FakeChannelWiseDequantizeMaxAbsKernel : public framework::OpKernel { (std::pow(2, quant_bits[1] - 1) - 1); } ChannelDequantizeFunctor()( - dev_ctx, in, scales.data(), scale_num, static_cast(max_range), out); + dev_ctx, in, scales.data(), scale_num, static_cast(max_range), + quant_axis, out); } }; diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc index 358f122c8359f..04ac4a35208a5 100644 --- a/paddle/fluid/operators/fake_quantize_op.cc +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fake_quantize_op.h" +#include #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/clip_op.h" @@ -39,13 +40,41 @@ template struct FindAbsMaxFunctor; template struct FindChannelAbsMaxFunctor { - void operator()(const platform::CPUDeviceContext& ctx, const T* in, - const int num, const int channel, T* out) { - const int channel_size = num / channel; - for (int i = 0; i < channel; i++) { - auto* start = in + i * channel_size; - auto* end = in + (i + 1) * channel_size; - out[i] = std::abs(*(std::max_element(start, end, Compare()))); + void operator()(const platform::CPUDeviceContext& ctx, + const framework::Tensor& in_tensor, const int quant_axis, + T* out_abs_max) { + // At present, channelwise quantization supports conv2d, depthwise_conv2d + // conv2d_transpose and mul + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, true, + platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + auto* in_data = in_tensor.data(); + auto in_dims = in_tensor.dims(); + const int64_t channel = in_dims[quant_axis]; + if (quant_axis == 0) { + const int64_t channel_size = in_tensor.numel() / channel; + for (int64_t i = 0; i < channel; i++) { + auto* start = in_data + i * channel_size; + auto* end = in_data + (i + 1) * channel_size; + out_abs_max[i] = + std::abs(*(std::max_element(start, end, Compare()))); + } + } else if (quant_axis == 1) { + for (int64_t i = 0; i < channel; i++) { + out_abs_max[i] = 0; + } + const int64_t step_i = in_tensor.numel() / in_dims[0]; + const int64_t step_j = in_tensor.numel() / (in_dims[0] * in_dims[1]); + for (int64_t i = 0; i < in_dims[0]; i++) { + for (int64_t j = 0; j < in_dims[1]; j++) { + auto* start = in_data + i * step_i + j * step_j; + auto* end = in_data + i * step_i + (j + 1) * step_j; + T abs_max = std::abs(*(std::max_element(start, end, Compare()))); + out_abs_max[j] = std::max(out_abs_max[j], abs_max); + } + } } } }; @@ -92,26 +121,53 @@ template struct ChannelClipAndFakeQuantFunctor { void operator()(const platform::CPUDeviceContext& ctx, const framework::Tensor& in, const framework::Tensor& scale, - const int bin_cnt, const int channel, + const int bin_cnt, const int quant_axis, framework::Tensor* out) { + // At present, channelwise quantization supports conv2d, depthwise_conv2d + // conv2d_transpose and mul + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, true, + platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); auto* scale_data = scale.data(); auto* in_data = in.data(); auto* out_data = out->mutable_data(ctx.GetPlace()); - const int channel_size = in.numel() / channel; + auto in_dims = in.dims(); + const int64_t channel = in_dims[quant_axis]; platform::Transform trans; - for (int i = 0; i < channel; i++) { - T s = scale_data[i]; - auto* start = in_data + i * channel_size; - auto* end = in_data + (i + 1) * channel_size; - trans(ctx, start, end, out_data + i * channel_size, - ClipFunctor(-s, s)); - } - for (int i = 0; i < channel; i++) { - T s = scale_data[i]; - T inv_s = inverse(s); - framework::Tensor one_channel_out = out->Slice(i, i + 1); - auto out_e = framework::EigenVector::Flatten(one_channel_out); - out_e.device(*ctx.eigen_device()) = (bin_cnt * inv_s * out_e).round(); + if (quant_axis == 0) { + const int64_t channel_size = in.numel() / channel; + for (int64_t i = 0; i < channel; i++) { + T s = scale_data[i]; + auto* start = in_data + i * channel_size; + auto* end = in_data + (i + 1) * channel_size; + trans(ctx, start, end, out_data + i * channel_size, + ClipFunctor(-s, s)); + } + for (int64_t i = 0; i < channel; i++) { + T s = scale_data[i]; + T inv_s = inverse(s); + framework::Tensor one_channel_out = out->Slice(i, i + 1); + auto out_e = framework::EigenVector::Flatten(one_channel_out); + out_e.device(*ctx.eigen_device()) = (bin_cnt * inv_s * out_e).round(); + } + } else if (quant_axis == 1) { + const int64_t step_i = in.numel() / in_dims[0]; + const int64_t step_j = in.numel() / (in_dims[0] * in_dims[1]); + for (int i = 0; i < in_dims[0]; i++) { + for (int j = 0; j < in_dims[1]; j++) { + T s = scale_data[j]; + T inv_s = inverse(s); + auto* start = in_data + i * step_i + j * step_j; + auto* end = in_data + i * step_i + (j + 1) * step_j; + auto* cur_out_data = out_data + i * step_i + j * step_j; + trans(ctx, start, end, cur_out_data, ClipFunctor(-s, s)); + for (int k = 0; k < step_j; k++) { + cur_out_data[k] = std::round(bin_cnt * inv_s * cur_out_data[k]); + } + } + } } } }; @@ -247,8 +303,9 @@ class FakeChannelWiseQuantizeAbsMaxOp : public framework::OperatorWithKernel { "FakeChannelWiseQuantizeAbsMax"); OP_INOUT_CHECK(ctx->HasOutput("OutScale"), "Output", "OutScale", "FakeChannelWiseQuantizeAbsMax"); + int quant_axis = ctx->Attrs().Get("quant_axis"); ctx->SetOutputDim("Out", ctx->GetInputDim("X")); - ctx->SetOutputDim("OutScale", {ctx->GetInputDim("X")[0]}); + ctx->SetOutputDim("OutScale", {ctx->GetInputDim("X")[quant_axis]}); ctx->ShareLoD("X", /*->*/ "Out"); } @@ -269,6 +326,18 @@ class FakeChannelWiseQuantizeAbsMaxOpMaker "(Tensor) Output of quantized low level tensor, " "but also saved as float data type."); AddOutput("OutScale", "(Tensor) Current channel wise scale"); + AddAttr("quant_axis", + "(int, default 0) The axis for quantization. " + "For conv2d, depthwise_conv2d, conv2d_transpose " + "and mul, the quant_axis is equal to the cout axis.") + .SetDefault(0) + .AddCustomChecker([](const int& quant_axis) { + PADDLE_ENFORCE_EQ(quant_axis == 0 || quant_axis == 1, true, + platform::errors::InvalidArgument( + "'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + }); AddAttr("bit_length", "(int, default 8)") .SetDefault(8) .AddCustomChecker([](const int& bit_length) { diff --git a/paddle/fluid/operators/fake_quantize_op.cu b/paddle/fluid/operators/fake_quantize_op.cu index 75a55fa821f0a..6ff3c7ec632f2 100644 --- a/paddle/fluid/operators/fake_quantize_op.cu +++ b/paddle/fluid/operators/fake_quantize_op.cu @@ -75,8 +75,8 @@ struct FindAbsMaxFunctor { template struct FindAbsMaxFunctor; template -__global__ void FindChannelAbsMaxKernel(const T* in, const int n, const int c, - T* out) { +__global__ void FindChannelAbsMaxKernelQuantAxis0(const T* in, const int n, + const int c, T* out) { int tid = threadIdx.x; int channel_size = n / c; const T* in_c = in + blockIdx.x * channel_size; @@ -100,14 +100,69 @@ __global__ void FindChannelAbsMaxKernel(const T* in, const int n, const int c, } } +template +__global__ void FindChannelAbsMaxKernelQuantAxis1(const T* in, const int n, + const int cin, const int cout, + T* out) { + extern __shared__ T shared_max_data[]; + int cout_wh_size = n / cin; + int wh_size = n / (cin * cout); + + int tid = threadIdx.x; + int bid = blockIdx.x; + const T* in_current = in + tid * cout_wh_size + bid * wh_size; + shared_max_data[tid] = T(0); + for (int i = 0; i < wh_size; i++) { + T tmp = fabs(in_current[i]); + if (tmp > shared_max_data[tid]) { + shared_max_data[tid] = tmp; + } + } + __syncthreads(); + + int len = blockDim.x; + for (int i = (len + 1) / 2; i > 0; len = i, i = (i + 1) / 2) { + if (tid < i && tid + i < len && + shared_max_data[tid] < shared_max_data[tid + i]) { + shared_max_data[tid] = shared_max_data[tid + i]; + } + if (i == 1) { + i = 0; // break the loop + } + __syncthreads(); + } + if (tid == 0) { + out[bid] = shared_max_data[0]; + } +} + template struct FindChannelAbsMaxFunctor { - void operator()(const platform::CUDADeviceContext& ctx, const T* in, - const int num, const int channel, T* out) { - int block = 1024; - int grid = channel; - FindChannelAbsMaxKernel<<>>( - in, num, channel, out); + void operator()(const platform::CUDADeviceContext& ctx, + const framework::Tensor& in_tensor, const int quant_axis, + T* out_abs_max) { + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, true, + platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + const int num = in_tensor.numel(); + auto in_dims = in_tensor.dims(); + int channel = in_dims[quant_axis]; + const T* in_data = in_tensor.data(); + if (quant_axis == 0) { + int grid = channel; + int block = 1024; + FindChannelAbsMaxKernelQuantAxis0< + T><<>>( + in_data, num, channel, out_abs_max); + } else if (quant_axis == 1) { + int grid = in_dims[1]; + int block = in_dims[0]; + FindChannelAbsMaxKernelQuantAxis1< + T><<>>( + in_data, num, in_dims[0], in_dims[1], out_abs_max); + } } }; @@ -189,10 +244,12 @@ struct ClipAndFakeQuantDequantFunctor { template struct ClipAndFakeQuantDequantFunctor; +// ChannelClipAndQuantKernel for quant_axis is 0 template -__global__ void ChannelClipAndQuantKernel(const T* in, const T* scale, - const int bin_cnt, const int n, - const int c, T* out) { +__global__ void ChannelClipAndQuantKernelQuantAxis0(const T* in, const T* scale, + const int bin_cnt, + const int n, const int c, + T* out) { int tid = threadIdx.x; int channel_size = n / c; @@ -211,22 +268,57 @@ __global__ void ChannelClipAndQuantKernel(const T* in, const T* scale, } } +// ChannelClipAndQuantKernel for quant_axis is 1 +template +__global__ void ChannelClipAndQuantKernelQuantAxis1(const T* in, const T* scale, + const int bin_cnt, + const int n, const int cin, + const int cout, T* out) { + T s = scale[blockIdx.x % cout]; + T inv_s = inverse(s); + + int wh_size = n / (cin * cout); + const T* in_c = in + blockIdx.x * wh_size; + T* out_c = out + blockIdx.x * wh_size; + + for (int i = threadIdx.x; i < wh_size; i += blockDim.x) { + T x = in_c[i]; + T v = x > s ? s : x; + v = v < -s ? -s : v; + v = bin_cnt * inv_s * v; + out_c[i] = round(v); + } +} + template struct ChannelClipAndFakeQuantFunctor { void operator()(const platform::CUDADeviceContext& ctx, const framework::Tensor& in, const framework::Tensor& scale, - const int bin_cnt, const int channel, + const int bin_cnt, const int quant_axis, framework::Tensor* out) { - int num = in.numel(); - int block = 1024; - int grid = channel; + PADDLE_ENFORCE_EQ( + quant_axis == 0 || quant_axis == 1, true, + platform::errors::InvalidArgument("'quant_axis' should be 0 or 1, but " + "the received is %d", + quant_axis)); + int num = in.numel(); + auto in_dims = in.dims(); const T* in_data = in.data(); const T* scale_data = scale.data(); T* out_data = out->mutable_data(ctx.GetPlace()); - ChannelClipAndQuantKernel<<>>( - in_data, scale_data, bin_cnt, num, channel, out_data); + if (quant_axis == 0) { + int grid = in_dims[0]; + int block = 1024; + ChannelClipAndQuantKernelQuantAxis0<<>>( + in_data, scale_data, bin_cnt, num, in_dims[0], out_data); + } else if (quant_axis == 1) { + int grid = in_dims[0] * in_dims[1]; + int block = 1024; + ChannelClipAndQuantKernelQuantAxis1<<>>( + in_data, scale_data, bin_cnt, num, in_dims[0], in_dims[1], out_data); + } } }; diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h index 4136217fb0c5f..5c6e0b1f6e26d 100644 --- a/paddle/fluid/operators/fake_quantize_op.h +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -61,15 +61,15 @@ struct FindRangeAbsMaxFunctor { template struct FindChannelAbsMaxFunctor { - void operator()(const DeviceContext& ctx, const T* in, const int num, - const int channel, T* out); + void operator()(const DeviceContext& ctx, const framework::Tensor& in_tensor, + const int quant_axis, T* out_abs_max); }; template struct ChannelClipAndFakeQuantFunctor { void operator()(const DeviceContext& ctx, const framework::Tensor& in, const framework::Tensor& scale, const int bin_cnt, - const int channel, framework::Tensor* out); + const int quant_axis, framework::Tensor* out); }; template @@ -144,12 +144,13 @@ class FakeChannelWiseQuantizeAbsMaxKernel : public framework::OpKernel { int bit_length = context.Attr("bit_length"); int bin_cnt = std::pow(2, bit_length - 1) - 1; + int quant_axis = context.Attr("quant_axis"); auto& dev_ctx = context.template device_context(); - FindChannelAbsMaxFunctor()( - dev_ctx, in->data(), in->numel(), in->dims()[0], out_scale_data); + FindChannelAbsMaxFunctor()(dev_ctx, *in, quant_axis, + out_scale_data); ChannelClipAndFakeQuantFunctor()( - dev_ctx, *in, *out_scale, bin_cnt, in->dims()[0], out); + dev_ctx, *in, *out_scale, bin_cnt, quant_axis, out); } }; diff --git a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py index 3097e1d82a9cb..244a621611060 100644 --- a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py +++ b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py @@ -29,6 +29,7 @@ from .quantization_pass import _get_op_input_var_names from .quantization_pass import _get_op_output_var_names from .quantization_pass import _get_output_name_index +from .quantization_pass import _channelwise_quant_axis1_ops __all__ = ['PostTrainingQuantization', 'WeightQuantization'] @@ -316,6 +317,7 @@ def __init__(self, self._out_scale_op_list = _out_scale_op_list self._quantized_weight_var_name = set() self._quantized_act_var_name = set() + self.weight_op_pairs = {} self._sampling_data = {} self._quantized_var_kl_threshold = {} self._quantized_var_min = {} @@ -436,6 +438,8 @@ def _optimize_fp32_model(self): graph = IrGraph(core.Graph(self._program.desc), for_test=True) graph = _remove_ctrl_vars(graph) graph = _apply_pass(self._scope, graph, 'conv_bn_fuse_pass') + graph = _apply_pass(self._scope, graph, 'depthwise_conv_bn_fuse_pass') + graph = _apply_pass(self._scope, graph, 'conv_transpose_bn_fuse_pass') self._program = graph.to_program() def _collect_target_varnames(self): @@ -446,10 +450,11 @@ def _collect_target_varnames(self): # TODO(juncaipeng), consider the name_scope of skip_quant _logger.info("Collect quantized variable names ...") - def collect_var_name(var_name_list, persistable_var_names): + def collect_var_name(var_name_list, persistable_var_names, op_type): for var_name in var_name_list: if var_name in persistable_var_names: self._quantized_weight_var_name.add(var_name) + self.weight_op_pairs[var_name] = op_type else: self._quantized_act_var_name.add(var_name) @@ -462,13 +467,15 @@ def collect_var_name(var_name_list, persistable_var_names): # For quantized ops, sample inputs and outputs if op_type in self._quantizable_op_type: collect_var_name( - _get_op_input_var_names(op), persistable_var_names) + _get_op_input_var_names(op), persistable_var_names, op_type) collect_var_name( - _get_op_output_var_names(op), persistable_var_names) + _get_op_output_var_names(op), persistable_var_names, + op_type) # For other op, only sample output scale elif op_type in self._out_scale_op_list: collect_var_name( - _get_op_output_var_names(op), persistable_var_names) + _get_op_output_var_names(op), persistable_var_names, + op_type) def _set_activation_persistable(self): ''' @@ -492,45 +499,75 @@ def _sample_threshold(self): Sample the input threshold(min, max, or abs_max) in every iterations. ''' assert self._algo in ["abs_max", "min_max"], \ - "The algo should be abs_max or min_max to sample min max value." - + "The algo should be abs_max or min_max for _sample_threshold." if self._algo == "abs_max": - # Only calculate abs_max value for weight for once - if self._quantized_var_abs_max == {}: - for var_name in self._quantized_weight_var_name: - var_tensor = _load_variable_data(self._scope, var_name) - abs_max_per_channel = [] - for i in range(var_tensor.shape[0]): - abs_max_per_channel.append( - float(np.max(np.abs(var_tensor[i])))) - self._quantized_var_abs_max[var_name] = abs_max_per_channel - for var_name in self._quantized_act_var_name: - var_tensor = _load_variable_data(self._scope, var_name) - abs_max_value = float(np.max(np.abs(var_tensor))) - if (var_name not in self._quantized_var_abs_max) or \ - (abs_max_value > self._quantized_var_abs_max[var_name]): - self._quantized_var_abs_max[var_name] = abs_max_value + self._sample_threshold_abs_max() elif self._algo == "min_max": - if self._quantized_var_min == {} and self._quantized_var_max == {}: - for var_name in self._quantized_weight_var_name: - var_tensor = _load_variable_data(self._scope, var_name) - min_per_channel = [] - max_per_channle = [] - for i in range(var_tensor.shape[0]): - min_per_channel.append(float(np.min(var_tensor[i]))) - max_per_channle.append(float(np.max(var_tensor[i]))) - self._quantized_var_min[var_name] = min_per_channel - self._quantized_var_max[var_name] = max_per_channle - for var_name in self._quantized_act_var_name: + self._sample_threshold_min_max() + + def _sample_threshold_abs_max(self): + assert self._algo == "abs_max", \ + "The algo should be abs_max for _sample_threshold_abs_max." + # Only calculate abs_max value for weight for once + if self._quantized_var_abs_max == {}: + for var_name in self._quantized_weight_var_name: + var_tensor = _load_variable_data(self._scope, var_name) + if self._weight_quantize_type == "abs_max": + abs_max_value = float(np.max(np.abs(var_tensor))) + elif self._weight_quantize_type == "channel_wise_abs_max": + abs_max_value = [] + if self.weight_op_pairs[ + var_name] in _channelwise_quant_axis1_ops: + for i in range(var_tensor.shape[1]): + abs_max_value.append( + float(np.max(np.abs(var_tensor[:, i])))) + else: + for i in range(var_tensor.shape[0]): + abs_max_value.append( + float(np.max(np.abs(var_tensor[i])))) + self._quantized_var_abs_max[var_name] = abs_max_value + + for var_name in self._quantized_act_var_name: + var_tensor = _load_variable_data(self._scope, var_name) + abs_max_value = float(np.max(np.abs(var_tensor))) + if (var_name not in self._quantized_var_abs_max) or \ + (abs_max_value > self._quantized_var_abs_max[var_name]): + self._quantized_var_abs_max[var_name] = abs_max_value + + def _sample_threshold_min_max(self): + assert self._algo == "min_max", \ + "The algo should be min_max for _sample_threshold_min_max." + if self._quantized_var_min == {} and self._quantized_var_max == {}: + for var_name in self._quantized_weight_var_name: var_tensor = _load_variable_data(self._scope, var_name) - min_value = float(np.min(var_tensor)) - max_value = float(np.max(var_tensor)) - if (var_name not in self._quantized_var_min) or \ - (min_value < self._quantized_var_min[var_name]): - self._quantized_var_min[var_name] = min_value - if (var_name not in self._quantized_var_max) or \ - (max_value > self._quantized_var_max[var_name]): - self._quantized_var_max[var_name] = max_value + if self._weight_quantize_type == "abs_max": + min_value = float(np.min(var_tensor)) + max_value = float(np.max(var_tensor)) + elif self._weight_quantize_type == "channel_wise_abs_max": + min_value = [] + max_value = [] + if self.weight_op_pairs[ + var_name] in _channelwise_quant_axis1_ops: + for i in range(var_tensor.shape[1]): + min_value.append(float(np.min(var_tensor[:, i]))) + max_value.append(float(np.max(var_tensor[:, i]))) + else: + for i in range(var_tensor.shape[0]): + min_value.append(float(np.min(var_tensor[i]))) + max_value.append(float(np.max(var_tensor[i]))) + self._quantized_var_min[var_name] = min_value + self._quantized_var_max[var_name] = max_value + + for var_name in self._quantized_act_var_name: + var_tensor = _load_variable_data(self._scope, var_name) + min_value = float(np.min(var_tensor)) + max_value = float(np.max(var_tensor)) + if (var_name not in self._quantized_var_min) or \ + (min_value < self._quantized_var_min[var_name]): + self._quantized_var_min[var_name] = min_value + if (var_name not in self._quantized_var_max) or \ + (max_value > self._quantized_var_max[var_name]): + self._quantized_var_max[var_name] = max_value def _save_input_threhold(self): ''' @@ -554,11 +591,6 @@ def _sample_data(self, iter): applied in every iteration. ''' assert self._algo == "KL", "The algo should be KL to sample data." - for var_name in self._quantized_weight_var_name: - if var_name not in self._sampling_data: - var_tensor = _load_variable_data(self._scope, var_name) - self._sampling_data[var_name] = var_tensor - if self._is_use_cache_file: for var_name in self._quantized_act_var_name: var_tensor = _load_variable_data(self._scope, var_name) @@ -584,15 +616,20 @@ def _calculate_kl_threshold(self): # Abs_max threshold for weights for var_name in self._quantized_weight_var_name: - weight_data = self._sampling_data[var_name] - weight_threshold = None + weight_data = _load_variable_data(self._scope, var_name) if self._weight_quantize_type == "abs_max": - weight_threshold = np.max(np.abs(weight_data)) + weight_threshold = float(np.max(np.abs(weight_data))) elif self._weight_quantize_type == "channel_wise_abs_max": weight_threshold = [] - for i in range(weight_data.shape[0]): - abs_max_value = np.max(np.abs(weight_data[i])) - weight_threshold.append(abs_max_value) + if self.weight_op_pairs[ + var_name] in _channelwise_quant_axis1_ops: + for i in range(weight_data.shape[1]): + weight_threshold.append( + float(np.max(np.abs(weight_data[:, i])))) + else: + for i in range(weight_data.shape[0]): + weight_threshold.append( + float(np.max(np.abs(weight_data[i])))) self._quantized_var_kl_threshold[var_name] = weight_threshold # KL threshold for activations diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 8851bcc6440d4..0eef94896287a 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -111,6 +111,10 @@ "scale": [["X"], ["Out"]], } +_conv_ops = ['conv2d', 'depthwise_conv2d', 'conv2d_transpose'] + +_channelwise_quant_axis1_ops = ['conv2d_transpose', 'mul'] + def _get_op_input_var_names(op): """ """ @@ -185,10 +189,24 @@ def _is_input_all_not_persistable(graph, op_node): return is_input_all_not_persistable +def _check_grandchild_op_node(op_node, grandchild_op_name): + ''' + Check whether the fake_quant node has a grandchild op node named + grandchild_op_name. + ''' + for out1_var_node in op_node.outputs: + for out1_op_node in out1_var_node.outputs: + for out2_var_node in out1_op_node.outputs: + for out2_op_node in out2_var_node.outputs: + if out2_op_node.name() == grandchild_op_name: + return True + return False + + class QuantizationTransformPass(object): """ - Quantize the ops that have weights. Add quant and dequant ops for the quantized - ops's inputs. + Quantize the ops that have weights. Add quant and dequant ops for + the quantized ops's inputs. """ _supported_quantizable_op_type = [ 'conv2d', 'depthwise_conv2d', 'conv2d_transpose', 'mul', 'matmul' @@ -311,8 +329,8 @@ def __init__(self, if weight_quantize_type not in quant_type: raise ValueError( "Unknown weight_quantize_type: '%s'. It can only be " - "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' or 'moving_average_abs_max'." - % (str(weight_quantize_type))) + "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' " + "or 'moving_average_abs_max'." % (str(weight_quantize_type))) self._activation_quantize_type = activation_quantize_type self._weight_quantize_type = weight_quantize_type @@ -323,7 +341,6 @@ def __init__(self, for op in self._quantizable_ops: assert op in QuantizationTransformPass._supported_quantizable_op_type, \ op + " is not supported for quantization." - self._conv_ops = ['conv2d', 'depthwise_conv2d'] self._quantizable_grad_ops = [ '%s_grad' % (op) for op in self._quantizable_ops ] @@ -356,10 +373,12 @@ def _quant_preprocess(op_node): user_skipped = False if isinstance(self._skip_pattern, list): user_skipped = op_node.op().has_attr("op_namescope") and \ - any(pattern in op_node.op().attr("op_namescope") for pattern in self._skip_pattern) + any(pattern in op_node.op().attr("op_namescope") \ + for pattern in self._skip_pattern) elif isinstance(self._skip_pattern, str): user_skipped = op_node.op().has_attr("op_namescope") and \ - op_node.op().attr("op_namescope").find(self._skip_pattern) != -1 + op_node.op().attr("op_namescope").find( + self._skip_pattern) != -1 if user_skipped: op_node.op()._set_attr("skip_quant", True) @@ -373,15 +392,11 @@ def _transform_forward(graph, op): if var_node.name() in dequantized_vars: dequant_var_node = dequantized_vars[var_node.name()] else: - name = var_node.name() if name in processed_vars: continue - - if var_node.name() in persistable_vars: - is_weight = True - else: - is_weight = False + is_weight = True if var_node.name() in persistable_vars \ + else False # if var node is weight and weight_preprocess_func is not None, # will insert weight preprocess func @@ -415,20 +430,14 @@ def _transform_forward(graph, op): else self._activation_bits quant_type = self._weight_quantize_type if is_weight \ else self._activation_quantize_type - if quant_type == 'channel_wise_abs_max': - assert is_weight, "'channel_wise_abs_max' can only be applied on weights." - if op.name() in self._conv_ops: - quant_var_node, scale_var_node = self._insert_channel_quant_op( - graph, var_node, name, quant_bits) - dequant_var_node = self._insert_channel_dequant_op( - graph, quant_var_node, [scale_var_node], - [quant_bits]) - else: - quant_var_node, scale_var_node = self._insert_quant_op( - graph, var_node, name, quant_bits, 'abs_max') - dequant_var_node = self._insert_dequant_op( - graph, quant_var_node, scale_var_node, - quant_bits) + if quant_type == 'channel_wise_abs_max': # Weight quantization + quant_axis = 1 if op.name() in \ + _channelwise_quant_axis1_ops else 0 + quant_var_node, scale_var_node = self._insert_channel_quant_op( + graph, var_node, name, quant_bits, quant_axis) + dequant_var_node = self._insert_channel_dequant_op( + graph, quant_var_node, [scale_var_node], + [quant_bits], quant_axis) else: quant_var_node, scale_var_node = self._insert_quant_op( graph, var_node, name, quant_bits, quant_type) @@ -529,11 +538,19 @@ def _insert_quant_abs_max_op(self, graph, var_node, name, quant_bits): var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) - scale_var_node = graph.create_var_node( + scale_var_node = graph.create_persistable_node( name=self._quantized_scale_name(name), var_type=var_node.type(), shape=[1], var_dtype=var_node.dtype()) + data_type = 'float64' if var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + _init_var_node( + scale_var_node, + np.zeros( + scale_var_node.shape(), dtype=data_type), + self._scope, + self._place) quant_op_node = graph.create_op_node( op_type='fake_quantize_abs_max', attrs={ @@ -706,7 +723,8 @@ def _insert_quant_moving_average_abs_max_op(self, graph, var_node, name, return quant_var_node, scale_out_node - def _insert_channel_quant_op(self, graph, var_node, name, quant_bits): + def _insert_channel_quant_op(self, graph, var_node, name, quant_bits, + quant_axis): """ Insert fake_channel_wise_quantize_abs_max op in the graph. """ @@ -717,15 +735,24 @@ def _insert_channel_quant_op(self, graph, var_node, name, quant_bits): var_type=var_node.type(), shape=var_node.shape(), var_dtype=var_node.dtype()) - scale_var_node = graph.create_var_node( + scale_var_node = graph.create_persistable_node( name=self._quantized_scale_name(name), var_type=var_node.type(), - shape=[var_node.shape()[0]], + shape=[var_node.shape()[quant_axis]], var_dtype=var_node.dtype()) + data_type = 'float64' if var_node.dtype( + ) == core.VarDesc.VarType.FP64 else 'float32' + _init_var_node( + scale_var_node, + np.zeros( + scale_var_node.shape(), dtype=data_type), + self._scope, + self._place) quant_op_node = graph.create_op_node( op_type='fake_channel_wise_quantize_abs_max', attrs={ 'bit_length': quant_bits, + 'quant_axis': quant_axis, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': var_node}, @@ -763,7 +790,7 @@ def _insert_dequant_op(self, graph, var_node, scale_var_node, quant_bits): return dequant_var_node def _insert_channel_dequant_op(self, graph, var_node, scale_var_nodes, - quant_bits): + quant_bits, quant_axis): """ Insert fake_channel_wise_dequantize_max_abs in the graph. """ @@ -778,6 +805,7 @@ def _insert_channel_dequant_op(self, graph, var_node, scale_var_nodes, op_type='fake_channel_wise_dequantize_max_abs', attrs={ 'quant_bits': quant_bits, + 'quant_axis': quant_axis, 'op_role': core.op_proto_and_checker_maker.OpRole.Forward }, inputs={'X': var_node, @@ -1036,7 +1064,6 @@ def __init__(self, self._weight_bits = weight_bits self._activation_bits = activation_bits self._weight_quantize_type = weight_quantize_type - self._conv_ops = ['conv2d', 'depthwise_conv2d', 'conv2d_transpose'] self._fake_quant_op_names = _fake_quant_op_list self._fake_dequant_op_names = _fake_dequant_op_list self._op_input_rename_map = collections.OrderedDict() @@ -1063,34 +1090,37 @@ def apply(self, graph): if input_arg_name in graph.out_node_mapping_table.keys(): input_arg_name = graph.out_node_mapping_table[ input_arg_name] - if input_arg_name in persistable_vars: - if self._weight_quantize_type == 'abs_max': - param = self._load_var(input_arg_name) - scale_v = np.max(np.abs(param)) - elif self._weight_quantize_type == 'channel_wise_abs_max': - param = self._load_var(input_arg_name) - if len(param.shape) == 4: # conv2d or depthwise_conv2d - scale_v = [] - for i in range(param.shape[0]): - scale_v.append(np.max(np.abs(param[i]))) - else: - scale_v = np.max(np.abs(param)) + if input_arg_name not in persistable_vars: + scale_v = graph._find_node_by_name( + op_node.outputs, op_node.output('OutScale')[0]) + self._quant_var_scale_map[input_arg_name] = scale_v + else: + # Obtain scale from OutScale var node + scale_v = self._load_var(op_node.output('OutScale')[0]) + assert scale_v.ndim in [ + 1, 2 + ], "the dim of scale_v should be 1 or 2" + if scale_v.ndim == 2: + scale_v = scale_v[0] + if scale_v.size == 1: + scale_v = scale_v[0] else: - scale_v = self._load_var( - op_node.output('OutScale')[0])[0] + scale_v = scale_v.tolist() self._quant_var_scale_map[input_arg_name] = scale_v - self._remove_fake_quant_and_dequant_op(graph, op_node) - # quantize weight and restore + # Quantize weight and restore param_v = self._load_var(input_arg_name) - quantized_param_v = self._quant(param_v, scale_v, - self._weight_bits) + if isinstance(scale_v, list) and \ + any(_check_grandchild_op_node(op_node, op) + for op in _channelwise_quant_axis1_ops): + quant_axis = 1 + else: + quant_axis = 0 + quantized_param_v = self._quant( + param_v, scale_v, self._weight_bits, quant_axis) self._restore_var(input_arg_name, quantized_param_v) - else: - scale_v = graph._find_node_by_name( - op_node.outputs, op_node.output('OutScale')[0]) - self._quant_var_scale_map[input_arg_name] = scale_v + self._remove_fake_quant_and_dequant_op(graph, op_node) - # Remove all fake dequant op +# Remove all fake dequant op ops = graph.all_op_nodes() for op_node in ops: op_name = op_node.name() @@ -1103,8 +1133,7 @@ def apply(self, graph): op_node_desc = op_node.op() if op_node_desc.has_attr("quantization_type") and \ op_node_desc.attr("quantization_type") == "qat_with_weight": - if self._weight_quantize_type == 'channel_wise_abs_max' \ - and op_node.name() in self._conv_ops: + if self._weight_quantize_type == 'channel_wise_abs_max': self._insert_post_channel_dequant_op(graph, op_node) else: self._insert_post_dequant_op(graph, op_node) @@ -1295,10 +1324,15 @@ def _is_float(self, v): return isinstance(v, float) or isinstance(v, np.float32) \ or isinstance(v, np.float64) - def _quant(self, x, scale, num_bits): + def _quant(self, x, scale, num_bits, quant_axis): + assert quant_axis in [0, 1], 'quant_axis should be 0 or 1 for now.' if isinstance(scale, list): for i, s in enumerate(scale): - x[i] = np.round(x[i] / s * ((1 << (num_bits - 1)) - 1)) + if quant_axis == 0: + x[i] = np.round(x[i] / s * ((1 << (num_bits - 1)) - 1)) + else: + x[:, i] = np.round(x[:, i] / s * ( + (1 << (num_bits - 1)) - 1)) return x else: return np.round(x / scale * ((1 << (num_bits - 1)) - 1)) @@ -1468,6 +1502,10 @@ def apply(self, graph): for op in target_ops: for output_var_name in _get_op_output_var_names(op): in_node = graph._find_node_by_name(op.outputs, output_var_name) + if in_node.dtype() not in \ + [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]: + continue + scale_node = graph.create_persistable_node( name=self._scale_name(in_node.name()), var_type=core.VarDesc.VarType.LOD_TENSOR, @@ -1570,17 +1608,26 @@ def apply(self, graph): if op_node.name() in self._teller_set: var_names = _get_op_output_var_names(op_node) for var_name in var_names: - # For compatibility, we save output threshold by two methods. + in_node = graph._find_node_by_name(op_node.outputs, + var_name) + if in_node.dtype() not in \ + [core.VarDesc.VarType.FP64, core.VarDesc.VarType.FP32]: + continue + scale_name = self._scale_name(var_name) - scale_v = np.array( - self._scope.find_var(scale_name).get_tensor())[0] - op_node.op()._set_attr("out_threshold", float(scale_v)) + scale_var = self._scope.find_var(scale_name) + assert scale_var is not None, \ + "Can not find {} variable in the scope".format(scale_name) + scale_value = np.array(scale_var.get_tensor())[0] + + # For compatibility, we save output threshold by two methods. + op_node.op()._set_attr("out_threshold", float(scale_value)) argname_index = _get_output_name_index(op_node, var_name) assert argname_index is not None, \ var_name + " is not the output of the op" op_node.op()._set_attr(argname_index[0] + str(argname_index[1]) \ - + "_threshold", float(scale_v)) + + "_threshold", float(scale_value)) graph.resolve_hazard() return graph diff --git a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py index c9ea15bf6cde9..32292c8a47b50 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py +++ b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py @@ -33,34 +33,29 @@ os.environ["CPU_NUM"] = "1" -def residual_block(img, label, num=1): - def conv_bn_layer(input, - ch_out, - filter_size, - stride, - padding, - act='relu', - bias_attr=False): - tmp = fluid.layers.conv2d( - input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - use_cudnn=False, - act=None, - bias_attr=bias_attr) - return fluid.layers.batch_norm(input=tmp, act=act) - - hidden = img - for _ in six.moves.xrange(num): - conv = conv_bn_layer(hidden, 20, 3, 1, 1, act=None, bias_attr=True) - short = conv_bn_layer(hidden, 20, 1, 1, 0, act=None) - hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu') - fc = fluid.layers.fc(input=hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=fc, label=label) - loss = fluid.layers.mean(loss) - return loss +def conv_net(img, label): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + pool_type='max', + act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + pool_type='avg', + act="relu") + hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + return avg_loss def pact(x, name=None): @@ -102,7 +97,7 @@ def build_program(main, startup, is_test): img.stop_gradient = False label = fluid.layers.data( name='label', shape=[1], dtype='int64') - loss = residual_block(img, label, 1) + loss = conv_net(img, label) if not is_test: opt = fluid.optimizer.SGD(learning_rate=0.0001) opt.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py index 0812b02b47db7..b30e0a6775ea9 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py @@ -31,45 +31,45 @@ def dequantize_max_abs(x, scale, max_range): return y -def channel_wise_quantize_max_abs(x, quant_bit=8, use_second_dim=False): +def channel_wise_quantize_max_abs(x, quant_bit=8, quant_axis=0): + assert quant_axis in [0, 1], "The quant_axis should be 0 or 1." scales = [] - if not use_second_dim: + y = x.copy() + max_range = math.pow(2, quant_bit - 1) - 1 + if quant_axis == 0: for i in range(x.shape[0]): - scales.append(np.max(np.abs(x[i])).astype("float32")) - y = x.copy() - max_range = math.pow(2, quant_bit - 1) - 1 - for i, scale in enumerate(scales): - y[i] = np.round(x[i] / scale * max_range) - else: - for i in range(x.shape[0]): - s = [] - for j in range(x.shape[1]): - s.append(np.max(np.abs(x[i][j])).astype("float32")) - scales.append(s) - scales = np.amax(np.array(scales), axis=0) - y = x.copy() - max_range = math.pow(2, quant_bit - 1) - 1 - for i in range(x.shape[0]): - for j, scale in enumerate(scales): - y[i][j] = np.round(x[i][j] / scale * max_range) + scale = np.max(np.abs(x[i])).astype("float32") + scales.append(scale) + y[i] = np.round(x[i] * max_range / scale) + elif quant_axis == 1: + for i in range(x.shape[1]): + scale = np.max(np.abs(x[:, i])).astype("float32") + scales.append(scale) + y[:, i] = np.round(x[:, i] * max_range / scale) return y, scales def channel_wise_dequantize_max_abs(x, scales, quant_bits, + quant_axis, activation_scale=None): - if activation_scale is None: - y = x.copy() - for i in range(x.shape[0]): - y[i] = (scales[i] / (math.pow(2, quant_bits[0] - 1) - 1)) * x[i] + assert quant_axis in [0, 1], "The quant_axis should be 0 or 1." + + if isinstance(quant_bits, list): + max_range = math.pow(2, quant_bits[0] - 1) - 1 else: - y = x.copy() + max_range = math.pow(2, quant_bits - 1) - 1 + y = x.copy() + if quant_axis == 0: for i in range(x.shape[0]): - for j in range(x.shape[1]): - y[i][j] = (scales[j] / - (math.pow(2, quant_bits[0] - 1) - 1)) * x[i][j] - y *= activation_scale / (math.pow(2, quant_bits[1] - 1) - 1) + y[i] = x[i] * scales[i] / max_range + elif quant_axis == 1: + for i in range(x.shape[1]): + y[:, i] = x[:, i] * scales[i] / max_range + + if activation_scale is not None: + y = y * activation_scale / (math.pow(2, quant_bits[1] - 1) - 1) return y @@ -83,9 +83,8 @@ def setUp(self): self.set_args() self.op_type = "fake_channel_wise_dequantize_max_abs" x = np.random.randn(4, 3, 64, 64).astype(self.data_type) - yq, scales = channel_wise_quantize_max_abs( - x, self.quant_bits[0], use_second_dim=True) - ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits, + yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0], 1) + ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits, 1, self.activation_scale) self.inputs = { @@ -105,25 +104,39 @@ class TestFakeChannelWiseDequantizeMaxAbsOpOneScale(OpTest): def set_args(self): self.quant_bits = [8] self.data_type = "float32" + self.quant_axis = 0 def setUp(self): self.set_args() self.op_type = "fake_channel_wise_dequantize_max_abs" x = np.random.randn(4, 3, 64, 64).astype(self.data_type) - yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0]) - ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits) + yq, scales = channel_wise_quantize_max_abs(x, self.quant_bits[0], + self.quant_axis) + ydq = channel_wise_dequantize_max_abs(yq, scales, self.quant_bits, + self.quant_axis) self.inputs = { 'X': yq, 'Scales': [("scales0", np.array(scales).astype(self.data_type))] } - self.attrs = {'quant_bits': self.quant_bits} + self.attrs = { + 'quant_bits': self.quant_bits, + 'quant_axis': self.quant_axis + } self.outputs = {'Out': ydq} def test_check_output(self): self.check_output() +class TestFakeChannelWiseDequantizeMaxAbsOpOneScale1( + TestFakeChannelWiseDequantizeMaxAbsOpOneScale): + def set_args(self): + self.quant_bits = [8] + self.data_type = "float32" + self.quant_axis = 1 + + class TestFakeDequantizeMaxAbsOp(OpTest): def set_args(self): self.num_bits = 8 diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py index 1c8335e3bceab..7835fd3f53ddb 100644 --- a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -72,28 +72,62 @@ def test_check_output(self): class TestFakeChannelWiseQuantizeOp(OpTest): def setUp(self): + self.set_arg() + assert self.quant_axis in [0, 1], "quant_axis should be 0 or 1." + self.op_type = "fake_channel_wise_quantize_abs_max" - self.attrs = {'bit_length': 8} - self.inputs = { - 'X': np.random.random((4, 3, 64, 64)).astype("float32"), - } + self.attrs = {'bit_length': 8, 'quant_axis': self.quant_axis} + scales = [] - for i in range(self.inputs['X'].shape[0]): - scales.append(np.max(np.abs(self.inputs['X'][i])).astype("float32")) outputs = self.inputs['X'].copy() - for i, scale in enumerate(scales): - outputs[i] = np.round(outputs[i] / scale * ( - (1 << (self.attrs['bit_length'] - 1)) - 1)) + bnt = (1 << (self.attrs['bit_length'] - 1)) - 1 + if self.quant_axis == 0: + for i in range(self.inputs['X'].shape[0]): + scale_v = np.max(np.abs(self.inputs['X'][i])).astype("float32") + scales.append(scale_v) + outputs[i] = np.round(outputs[i] / scale_v * bnt) + elif self.quant_axis == 1: + for i in range(self.inputs['X'].shape[1]): + scale_v = np.max(np.abs(self.inputs['X'][:, i])).astype( + "float32") + scales.append(scale_v) + outputs[:, i] = np.round(outputs[:, i] / scale_v * bnt) self.outputs = { 'Out': outputs, 'OutScale': np.array(scales).astype("float32"), } + def set_arg(self): + self.quant_axis = 0 + self.inputs = { + 'X': np.random.random((20, 15, 6, 6)).astype("float32"), + } + def test_check_output(self): self.check_output() +class TestFakeChannelWiseQuantizeOp1(TestFakeChannelWiseQuantizeOp): + def set_quant_axis(self): + self.quant_axis = 1 + self.inputs = { + 'X': np.random.random((15, 20, 5, 5)).astype("float32"), + } + + +class TestFakeChannelWiseQuantizeOp2(TestFakeChannelWiseQuantizeOp): + def set_quant_axis(self): + self.quant_axis = 0 + self.inputs = {'X': np.random.random((30, 15)).astype("float32"), } + + +class TestFakeChannelWiseQuantizeOp3(TestFakeChannelWiseQuantizeOp): + def set_quant_axis(self): + self.quant_axis = 1 + self.inputs = {'X': np.random.random((30, 15)).astype("float32"), } + + class TestFakeQuantizeRangeAbsMaxOp(OpTest): def setUp(self): self.op_type = "fake_quantize_range_abs_max" From 3ec0bcbbb8736fc5b549475714fa9f8db4d7c93b Mon Sep 17 00:00:00 2001 From: Wilber Date: Wed, 19 Aug 2020 10:22:14 +0800 Subject: [PATCH 08/37] [Bug] Fix prune for save_inference_model about transformer (#25347) --- paddle/fluid/framework/prune.cc | 17 +++++++++++++++ paddle/fluid/framework/prune_test.cc | 31 ++++++++++++++++++++++++++++ python/paddle/fluid/layers/rnn.py | 3 ++- 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/prune.cc b/paddle/fluid/framework/prune.cc index 919378c929185..274b0ca0d903d 100644 --- a/paddle/fluid/framework/prune.cc +++ b/paddle/fluid/framework/prune.cc @@ -210,6 +210,23 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, should_run.push_back(true); } else { should_run.push_back(false); + // If the output of an op modifies feed vars, the op should not clip. + // For example, in the transformer structure, the third parameter returned + // by beam_search op is generally assigned to a feed var. Cutting the + // assign op will cause an error. + if (parent_block_id != -1) { + bool flag = false; + for (auto& var : op_desc.outputs()) { + for (auto& argu : var.arguments()) { + if (feed_var_names.count(argu)) { + flag = true; + } + } + } + if (flag) { + should_run.back() = true; + } + } } } diff --git a/paddle/fluid/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc index eb5c241a8372a..12fa0c61f8121 100644 --- a/paddle/fluid/framework/prune_test.cc +++ b/paddle/fluid/framework/prune_test.cc @@ -185,3 +185,34 @@ TEST(Prune, recurrrent_op) { EXPECT_EQ(pruned.blocks(0).ops_size(), 2); EXPECT_EQ(pruned.blocks(1).ops_size(), 1); } + +// If the output of an op modifies feed vars, the op should not clip. +TEST(Prune, recurrrent_op_2) { + f::ProgramDesc program; + f::BlockDesc *block = program.MutableBlock(0); + f::BlockDesc *sub_block = program.AppendBlock(*block); + AddOp("one_two", {{"input", {"a"}}}, {{"output", {"b", "c"}}}, + f::AttributeMap{}, block); + + std::vector state_var_name(1, "y"); + AddOp("recurrent", {{"input", {"b", "c"}}}, {{"output", {"b1, c1"}}}, + {{"ex_states", state_var_name}, + {"states", state_var_name}, + {"sub_block", sub_block}}, + block); + + EXPECT_TRUE(sub_block != nullptr); + AddOp("rnn_memory_helper", {{"input", {"x"}}}, {{"output", {"a"}}}, + f::AttributeMap{}, sub_block); + + f::proto::ProgramDesc *pdesc = program.Proto(); + pdesc->mutable_blocks(0)->mutable_ops(1)->set_is_target(true); + + f::proto::ProgramDesc pruned; + std::set feed_var_names = {"x", "a"}; + + f::Prune(*pdesc, feed_var_names, &pruned); + EXPECT_EQ(pruned.blocks_size(), 2); + EXPECT_EQ(pruned.blocks(0).ops_size(), 2); + EXPECT_EQ(pruned.blocks(1).ops_size(), 1); +} diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 39d25a9c7ffd4..4ec0770aaf0a5 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -3102,7 +3102,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): 'beam_search_encode') helper = LayerHelper('beam_search_decode', **locals()) sentence_ids = helper.create_variable_for_type_inference(dtype=ids.dtype) - sentence_scores = helper.create_variable_for_type_inference(dtype=ids.dtype) + sentence_scores = helper.create_variable_for_type_inference( + dtype=scores.dtype) helper.append_op( type="beam_search_decode", From ea6716a55b7d6d5b5365bf033584e37b38ca572a Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Wed, 19 Aug 2020 10:24:17 +0800 Subject: [PATCH 09/37] Add check if fluid.data() variable no feed data (#25858) * add check if fluid.data() variable no feed data, test=develop * Add testcase for feed check, test=develop --- python/paddle/fluid/executor.py | 20 +++++ .../unittests/test_executor_check_feed.py | 84 +++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_executor_check_feed.py diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index f16da029e29a6..5759b94276351 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -1156,6 +1156,26 @@ def _run_impl(self, program, feed, fetch_list, feed_var_name, compiled = isinstance(program, compiler.CompiledProgram) + # Check if fluid.data() variable no feed data + if use_prune: + if compiled: + global_block = program._program.global_block() + else: + global_block = program.global_block() + for varname in global_block.vars: + vardesc = global_block.desc.find_var(cpt.to_bytes(varname)) + varobj = global_block.vars[varname] + + # Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed + if vardesc.persistable() == False and \ + vardesc.type() == core.VarDesc.VarType.LOD_TENSOR and \ + vardesc.need_check_feed() == True and \ + varobj._stop_gradient == True and \ + varobj.is_data == True and \ + varobj.belong_to_optimizer == False and \ + varname not in feed: + raise ValueError('Need feed data for variable %s' % varname) + acp._auto_checkpoint(self, program) # For backward compatibility, run directly. diff --git a/python/paddle/fluid/tests/unittests/test_executor_check_feed.py b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py new file mode 100644 index 0000000000000..6b1e3c5a28a54 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_executor_check_feed.py @@ -0,0 +1,84 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest + +import numpy +import paddle.fluid.core as core +import paddle.fluid as fluid + + +class TestExecutor(unittest.TestCase): + def net(self): + lr = fluid.data(name="lr", shape=[1], dtype='float32') + x = fluid.data(name="x", shape=[None, 1], dtype='float32') + y = fluid.data(name="y", shape=[None, 1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + opt = fluid.optimizer.Adam(learning_rate=lr) + opt.minimize(avg_cost) + + return lr, avg_cost + + def test_program_check_feed(self): + main_program = fluid.Program() + startup_program = fluid.Program() + scope = fluid.Scope() + with fluid.program_guard(main_program, startup_program): + with fluid.scope_guard(scope): + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + lr, cost = self.net() + exe.run(startup_program) + train_data = [[1.0], [2.0], [3.0], [4.0]] + y_true = [[2.0], [4.0], [6.0], [8.0]] + a = 0 + with self.assertRaises(ValueError): + exe.run(feed={'x': train_data, + 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + use_prune=True) + + def test_compiled_program_check_feed(self): + main_program = fluid.Program() + startup_program = fluid.Program() + scope = fluid.Scope() + with fluid.program_guard(main_program, startup_program): + with fluid.scope_guard(scope): + cpu = fluid.CPUPlace() + exe = fluid.Executor(cpu) + lr, cost = self.net() + exe.run(startup_program) + compiled_prog = fluid.CompiledProgram( + main_program).with_data_parallel(loss_name=cost.name) + train_data = [[1.0], [2.0], [3.0], [4.0]] + y_true = [[2.0], [4.0], [6.0], [8.0]] + a = 0 + with self.assertRaises(ValueError): + exe.run(compiled_prog, + feed={'x': train_data, + 'lr': a}, + fetch_list=[lr, cost], + return_numpy=False, + use_prune=True) + + +if __name__ == '__main__': + unittest.main() From b757466b0d36497736eb31117021e9cbf3a98507 Mon Sep 17 00:00:00 2001 From: Pei Yang Date: Wed, 19 Aug 2020 10:39:18 +0800 Subject: [PATCH 10/37] fix trt dynamic ernie serialization unit test (#26228) --- paddle/fluid/framework/ir/subgraph_detector.cc | 3 ++- paddle/fluid/inference/tests/api/CMakeLists.txt | 11 +---------- .../api/trt_dynamic_shape_ernie_deserialize_test.cc | 5 ++++- .../tests/api/trt_dynamic_shape_ernie_test.cc | 2 +- 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/framework/ir/subgraph_detector.cc b/paddle/fluid/framework/ir/subgraph_detector.cc index 62c91af15da60..7979953d7be82 100644 --- a/paddle/fluid/framework/ir/subgraph_detector.cc +++ b/paddle/fluid/framework/ir/subgraph_detector.cc @@ -309,7 +309,8 @@ std::vector> SubgraphDetector::ExtractSubGraphs() { BriefNode *brief_node = itr.second; if (!Agent(brief_node->node).marked()) { - VLOG(4) << brief_node->node->id() << " node not a trt candidate."; + VLOG(4) << brief_node->node->id() << " node named " + << brief_node->node->Name() << " is not a trt candidate."; continue; } diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 959ba2288acc0..9a3a73f6c946d 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -471,19 +471,10 @@ if(WITH_GPU AND TENSORRT_FOUND) inference_download_and_uncompress(${TEST_TRT_ERNIE_MODEL} ${INFERENCE_URL}/tensorrt_test "ernie_model_4_unserialized.tgz") endif() - inference_analysis_test(test_trt_dynamic_shape_ernie_serialize SRCS trt_dynamic_shape_ernie_deserialize_test.cc + inference_analysis_test(test_trt_dynamic_shape_ernie_ser_deser SRCS trt_dynamic_shape_ernie_deserialize_test.cc EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${TEST_TRT_ERNIE_MODEL}/ernie_model_4_unserialized) - set(TEST_TRT_ERNIE_SER_MODEL "${TRT_MODEL_INSTALL_DIR}/ernie_test/ernie_model_4_serialized/") - if (NOT EXISTS ${TEST_TRT_ERNIE_SER_MODEL}) - inference_download_and_uncompress(${TEST_TRT_ERNIE_MODEL} ${INFERENCE_URL}/tensorrt_test "ernie_model_4_serialized.tgz") - endif() - - inference_analysis_test(test_trt_dynamic_shape_ernie_deserialize SRCS trt_dynamic_shape_ernie_deserialize_test.cc - EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} - ARGS --infer_model=${TEST_TRT_ERNIE_MODEL}/ernie_model_4_serialized) - endif() set(LITE_MODEL_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/lite") diff --git a/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_deserialize_test.cc b/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_deserialize_test.cc index 6526b87436557..7e5dfa2424dbc 100644 --- a/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_deserialize_test.cc +++ b/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_deserialize_test.cc @@ -123,8 +123,11 @@ void trt_ernie(bool with_fp16, std::vector result) { config.EnableTensorRtEngine(1 << 30, 1, 5, precision, true, false); config.SetTRTDynamicShapeInfo(min_input_shape, max_input_shape, opt_input_shape); + AnalysisConfig* config_deser = new AnalysisConfig(config); + std::vector out_data; - run(config, &out_data); + run(config, &out_data); // serialize + run(*config_deser, &out_data); // deserialize for (size_t i = 0; i < out_data.size(); i++) { EXPECT_NEAR(result[i], out_data[i], 1e-6); } diff --git a/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc b/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc index babe9977cd571..c99ebcdcb5f31 100644 --- a/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc +++ b/paddle/fluid/inference/tests/api/trt_dynamic_shape_ernie_test.cc @@ -126,7 +126,7 @@ void trt_ernie(bool with_fp16, std::vector result) { std::vector out_data; run(config, &out_data); for (size_t i = 0; i < out_data.size(); i++) { - EXPECT_NEAR(result[i], out_data[i], 1e-6); + EXPECT_NEAR(result[i], out_data[i], 1e-5); } } From 0a461ac3d56c1cb95c376682ddf623f92c3e5b54 Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 19 Aug 2020 10:48:37 +0800 Subject: [PATCH 11/37] specify numpy>=1.13 (#26400) --- python/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/requirements.txt b/python/requirements.txt index a055ad92139ce..76e6b5dedbeaf 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,6 +1,6 @@ requests>=2.20.0 -numpy>=1.12, <=1.16.4 ; python_version<"3.5" -numpy>=1.12 ; python_version>="3.5" +numpy>=1.13, <=1.16.4 ; python_version<"3.5" +numpy>=1.13 ; python_version>="3.5" protobuf>=3.1.0 gast==0.3.3 matplotlib<=2.2.4 ; python_version<"3.6" From 61800f4a1e185661b9adc993cd9ce28c7bc725f2 Mon Sep 17 00:00:00 2001 From: Qi Li Date: Wed, 19 Aug 2020 10:57:00 +0800 Subject: [PATCH 12/37] elu gelu relu logsigmoid, test=develop (#26304) * logsigmoid and LogSigmoid, test=develop * add elu gelu relu, test=develop * update to_variable to to_tensor, test=develop * address review comments, test=develop * address review comments, test=develop * change to_variable to to_tensor in test, test=develop --- python/paddle/fluid/layers/ops.py | 5 +- .../tests/unittests/test_activation_op.py | 273 ++++++++++++------ python/paddle/nn/__init__.py | 5 +- python/paddle/nn/functional/activation.py | 192 +++++++++--- python/paddle/nn/layer/activation.py | 179 ++++++++++-- 5 files changed, 503 insertions(+), 151 deletions(-) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index ac518ac83b7a0..1a61072ace6ff 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -645,6 +645,7 @@ def thresholded_relu(x, threshold=None): _gelu_ = generate_layer_fn('gelu') +@deprecated(since="2.0.0", update_to="paddle.nn.functional.gelu") def gelu(x, approximate=False): locals_var = locals().copy() kwargs = dict() @@ -655,10 +656,6 @@ def gelu(x, approximate=False): gelu.__doc__ = """ - :alias_main: paddle.nn.functional.gelu - :alias: paddle.nn.functional.gelu,paddle.nn.functional.activation.gelu - :old_api: paddle.fluid.layers.gelu - :strong:`GeLU Activation Operator` For more details, see [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415). diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index d4c260c41ef28..aef49df577fe5 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -118,7 +118,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.log(1 / (1 + np.exp(-x))) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': x} self.outputs = {'Out': out} def test_check_grad(self): @@ -127,6 +127,48 @@ def test_check_grad(self): self.check_grad(['X'], 'Out', max_relative_error=0.008) +class TestLogSigmoidAPI(unittest.TestCase): + # test paddle.nn.LogSigmoid, paddle.nn.functional.logsigmoid + def setUp(self): + self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32') + self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_static_api(self): + with paddle.static.program_guard(paddle.static.Program()): + x = paddle.data('X', [11, 17]) + out1 = F.logsigmoid(x) + m = paddle.nn.LogSigmoid() + out2 = m(x) + exe = paddle.static.Executor(self.place) + res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) + out_ref = np.log(1 / (1 + np.exp(-self.x_np))) + for r in res: + self.assertEqual(np.allclose(out_ref, r), True) + + def test_dygraph_api(self): + paddle.disable_static(self.place) + x = paddle.to_tensor(self.x_np) + out1 = F.logsigmoid(x) + m = paddle.nn.LogSigmoid() + out2 = m(x) + out_ref = np.log(1 / (1 + np.exp(-self.x_np))) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) + paddle.enable_static() + + def test_errors(self): + with paddle.static.program_guard(paddle.static.Program()): + # The input type must be Variable. + self.assertRaises(TypeError, F.logsigmoid, 1) + # The input dtype must be float16, float32, float64. + x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') + self.assertRaises(TypeError, F.logsigmoid, x_int32) + # support the input dtype is float16 + x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') + F.logsigmoid(x_fp16) + + class TestTanh(TestActivation, TestParameter): def setUp(self): self.op_type = "tanh" @@ -644,7 +686,7 @@ def setUp(self): x[np.abs(x) < 0.005] = 0.02 out = np.maximum(x, 0) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': x} self.outputs = {'Out': out} def test_check_grad(self): @@ -653,18 +695,46 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') -class TestReluOpError(unittest.TestCase): +class TestReluAPI(unittest.TestCase): + # test paddle.nn.ReLU, paddle.nn.functional.relu + def setUp(self): + self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32') + self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_static_api(self): + with paddle.static.program_guard(paddle.static.Program()): + x = paddle.data('X', [10, 12]) + out1 = F.relu(x) + m = paddle.nn.ReLU() + out2 = m(x) + exe = paddle.static.Executor(self.place) + res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) + out_ref = np.maximum(self.x_np, 0) + for r in res: + self.assertEqual(np.allclose(out_ref, r), True) + + def test_dygraph_api(self): + paddle.disable_static(self.place) + x = paddle.to_tensor(self.x_np) + out1 = F.relu(x) + m = paddle.nn.ReLU() + out2 = m(x) + out_ref = np.maximum(self.x_np, 0) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) + paddle.enable_static() + def test_errors(self): - with program_guard(Program()): + with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. - self.assertRaises(TypeError, fluid.layers.relu, 1) + self.assertRaises(TypeError, F.relu, 1) # The input dtype must be float16, float32, float64. - x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32') - self.assertRaises(TypeError, fluid.layers.relu, x_int32) + x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32') + self.assertRaises(TypeError, F.relu, x_int32) # support the input dtype is float16 - x_fp16 = fluid.layers.data( - name='x_fp16', shape=[12, 10], dtype='float16') - fluid.layers.relu(x_fp16) + x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16') + F.relu(x_fp16) class TestLeakyRelu(TestActivation): @@ -717,7 +787,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = gelu(x, approximate) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': x} self.outputs = {'Out': out} self.attrs = {"approximate": approximate} @@ -735,7 +805,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = gelu(x, approximate) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': x} self.outputs = {'Out': out} self.attrs = {"approximate": approximate} @@ -745,6 +815,55 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') +class TestGELUAPI(unittest.TestCase): + # test paddle.nn.GELU, paddle.nn.functional.gelu + def setUp(self): + self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32') + self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_static_api(self): + with paddle.static.program_guard(paddle.static.Program()): + x = paddle.data('X', [11, 17]) + out1 = F.gelu(x) + m = paddle.nn.GELU() + out2 = m(x) + exe = paddle.static.Executor(self.place) + res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) + out_ref = gelu(self.x_np, False) + for r in res: + self.assertEqual(np.allclose(out_ref, r), True) + + def test_dygraph_api(self): + paddle.disable_static(self.place) + x = paddle.to_tensor(self.x_np) + out1 = F.gelu(x) + m = paddle.nn.GELU() + out2 = m(x) + out_ref = gelu(self.x_np, False) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) + + out1 = F.gelu(x, True) + m = paddle.nn.GELU(True) + out2 = m(x) + out_ref = gelu(self.x_np, True) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) + paddle.enable_static() + + def test_errors(self): + with paddle.static.program_guard(paddle.static.Program()): + # The input type must be Variable. + self.assertRaises(TypeError, F.gelu, 1) + # The input dtype must be float16, float32, float64. + x_int32 = paddle.data(name='x_int32', shape=[11, 17], dtype='int32') + self.assertRaises(TypeError, F.gelu, x_int32) + # support the input dtype is float16 + x_fp16 = paddle.data(name='x_fp16', shape=[11, 17], dtype='float16') + F.gelu(x_fp16) + + class TestBRelu(TestActivation): def setUp(self): self.op_type = "brelu" @@ -894,6 +1013,11 @@ def test_errors(self): fluid.layers.soft_relu(x_fp16) +def elu(x, alpha): + out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) + return out_ref.astype(x.dtype) + + class TestELU(TestActivation): def setUp(self): self.op_type = "elu" @@ -901,7 +1025,7 @@ def setUp(self): x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype) alpha = 1. - out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) + out = elu(x, alpha) # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here self.inputs = {'X': x} @@ -914,16 +1038,53 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') -class TestELUOpError(unittest.TestCase): +class TestELUAPI(unittest.TestCase): + # test paddle.nn.ELU, paddle.nn.functional.elu + def setUp(self): + self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32') + self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \ + else paddle.CPUPlace() + + def test_static_api(self): + with paddle.static.program_guard(paddle.static.Program()): + x = paddle.data('X', [10, 12]) + out1 = F.elu(x) + m = paddle.nn.ELU() + out2 = m(x) + exe = paddle.static.Executor(self.place) + res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) + out_ref = elu(self.x_np, 1.0) + for r in res: + self.assertEqual(np.allclose(out_ref, r), True) + + def test_dygraph_api(self): + paddle.disable_static(self.place) + x = paddle.to_tensor(self.x_np) + out1 = F.elu(x) + m = paddle.nn.ELU() + out2 = m(x) + out_ref = elu(self.x_np, 1.0) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) + + out1 = F.elu(x, 0.2) + m = paddle.nn.ELU(0.2) + out2 = m(x) + out_ref = elu(self.x_np, 0.2) + for r in [out1, out2]: + self.assertEqual(np.allclose(out_ref, r.numpy()), True) + paddle.enable_static() + def test_errors(self): - with program_guard(Program(), Program()): - # The input type of elu_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace()) - self.assertRaises(TypeError, fluid.layers.elu, x1) - # The input dtype of elu_op must be float16 float32 or float64. - x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") - self.assertRaises(TypeError, fluid.layers.elu, x2) + with paddle.static.program_guard(paddle.static.Program()): + # The input type must be Variable. + self.assertRaises(TypeError, F.elu, 1) + # The input dtype must be float16, float32, float64. + x_int32 = paddle.data(name='x_int32', shape=[10, 12], dtype='int32') + self.assertRaises(TypeError, F.elu, x_int32) + # support the input dtype is float16 + x_fp16 = paddle.data(name='x_fp16', shape=[10, 12], dtype='float16') + F.elu(x_fp16) class TestReciprocal(TestActivation): @@ -1422,73 +1583,5 @@ def test_check_grad(self): create_test_act_fp16_class(TestSwish) create_test_act_fp16_class(TestHardSwish) - -class TestNNReluAPI(unittest.TestCase): - def setUp(self): - self.init_data() - - def init_data(self): - self.x_shape = [10, 12] - self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.y = self.ref_forward(self.x) - - def ref_forward(self, x): - return np.maximum(x, 0) - - def ref_backward(self, y, dy): - y_t = y.copy() - y_t[y_t > 0] = 1 - return y_t * dy - - def check_api(self, place=fluid.CPUPlace()): - main_program = Program() - myrelu = nn.ReLU() - with fluid.program_guard(main_program): - x = fluid.data(name='x', shape=self.x_shape) - x.stop_gradient = False - y = myrelu(x) - fluid.backward.append_backward(fluid.layers.mean(y)) - exe = fluid.Executor(place) - out = exe.run(main_program, - feed={'x': self.x}, - fetch_list=[y, y.grad_name, x.grad_name]) - self.assertTrue(np.allclose(out[0], self.y)) - self.assertTrue(np.allclose(out[2], self.ref_backward(self.y, out[1]))) - - with fluid.dygraph.guard(place): - x = fluid.dygraph.to_variable(self.x) - y = myrelu(x) - self.assertTrue(np.allclose(y.numpy(), self.y)) - - def test_check_api(self): - places = [fluid.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - for place in places: - self.check_api(place) - - -class TestNNFunctionalReluAPI(unittest.TestCase): - def setUp(self): - self.init_data() - - def init_data(self): - self.x_shape = [10, 12] - self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.y = self.ref_forward(self.x) - - def ref_forward(self, x): - return np.maximum(x, 0) - - def test_check_api(self): - main_program = Program() - with fluid.program_guard(main_program): - x = fluid.data(name='x', shape=self.x_shape) - y = F.relu(x) - exe = fluid.Executor(fluid.CPUPlace()) - out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y]) - self.assertTrue(np.allclose(out[0], self.y)) - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index a52d45521fd1b..6249ce59e21d3 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -51,11 +51,14 @@ from .decode import gather_tree #DEFINE_ALIAS from .input import data #DEFINE_ALIAS # from .input import Input #DEFINE_ALIAS +from .layer.activation import ELU +from .layer.activation import GELU from .layer.activation import Hardshrink # from .layer.activation import PReLU #DEFINE_ALIAS -from .layer.activation import ReLU #DEFINE_ALIAS +from .layer.activation import ReLU from .layer.activation import LeakyReLU #DEFINE_ALIAS from .layer.activation import Sigmoid #DEFINE_ALIAS +from .layer.activation import LogSigmoid # from .layer.activation import Softmax #DEFINE_ALIAS from .layer.activation import LogSoftmax #DEFINE_ALIAS from .layer.activation import HSigmoid #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 4dd770139db2a..61476a6172941 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -14,13 +14,10 @@ # TODO: define activation functions of neural network from ...fluid.layers import brelu #DEFINE_ALIAS -from ...fluid.layers import elu #DEFINE_ALIAS from ...fluid.layers import erf #DEFINE_ALIAS -from ...fluid.layers import gelu #DEFINE_ALIAS from ...fluid.layers import hard_sigmoid #DEFINE_ALIAS from ...fluid.layers import hard_swish #DEFINE_ALIAS from ...fluid.layers import leaky_relu #DEFINE_ALIAS -from ...fluid.layers import logsigmoid #DEFINE_ALIAS from ...fluid.layers import maxout #DEFINE_ALIAS from ...fluid.layers import relu6 #DEFINE_ALIAS from ...fluid.layers import selu #DEFINE_ALIAS @@ -69,6 +66,108 @@ import paddle +def elu(x, alpha=1.0, name=None): + """ + elu activation. + + .. math:: + + elu(x) = max(0, x) + min(0, \\alpha * (e^{x}-1)) + + Parameters: + x (Tensor): The input Tensor with data type float32, float64. + alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Returns: + A Tensor with the same data type and shape as ``x`` . + + Examples: + .. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) + out = F.elu(x, alpha=0.2) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] + """ + + if in_dygraph_mode(): + return core.ops.elu(x, 'alpha', alpha) + + check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu') + helper = LayerHelper("elu", **locals()) + out = helper.create_variable_for_type_inference(x.dtype) + helper.append_op( + type='elu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'alpha': alpha}) + return out + + +def gelu(x, approximate=False, name=None): + """ + gelu activation. + + if approximate is True + .. math:: + gelu(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3}))) + else + .. math:: + gelu(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}})) + + Parameters: + x (Tensor): The input Tensor with data type float32, float64. + approximate (bool, optional): Wether to enable approximation. Default is False. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Returns: + A Tensor with the same data type and shape as ``x`` . + + Examples: + .. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + data = np.random.randn(2, 3).astype("float32") + x = paddle.to_tensor(data) + + out = F.gelu(x) + + data + # array([[ 0.87165993, -1.0541513 , -0.37214822], + # [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32) + out + # array([[ 0.70456535, -0.15380788, -0.13207214], + # [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32) + """ + + if in_dygraph_mode(): + return core.ops.gelu(x, 'approximate', approximate) + + check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu') + helper = LayerHelper("gelu", **locals()) + out = helper.create_variable_for_type_inference(x.dtype) + helper.append_op( + type='gelu', + inputs={'X': x}, + outputs={'Out': out}, + attrs={'approximate': approximate}) + return out + + def hardshrink(x, threshold=0.5, name=None): """ hard shrinkage activation @@ -245,11 +344,8 @@ def hsigmoid(input, return out -def relu(input, inplace=False, name=None): +def relu(x, name=None): """ - :alias_main: paddle.nn.functional.relu - :alias: paddle.nn.functional.relu,paddle.nn.functional.activation.relu - ReLU Activation. .. math: @@ -257,44 +353,74 @@ def relu(input, inplace=False, name=None): out = max(x, 0) Parameters: - input (Variable): The input variable. A multi-dimension Tensor with type float16, float32, or float64. - inplace (bool, optional): If inplace is True, the input and output of ``ReLU`` are the same variable. - Otherwise, the input and output of ``ReLU`` are different variables. Default: False. Note that if x is - more than one OPs' input, inplace must be False. - name (str, optional): The default value is None. Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name` . + x (Tensor): The input Tensor with data type float32, float64. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. Returns: - Output of relu operator, a Tensor with shape same as input + A Tensor with the same data type and shape as ``x`` . Examples: .. code-block:: python - import paddle.fluid as fluid - import paddle.nn.functional as functional - import numpy as np + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() - data = np.array([-2, 0, 1]).astype('float32') - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) - res = functional.relu(data) # [0, 0, 1] + x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + out = F.relu(x) # [0., 0., 1.] """ if in_dygraph_mode(): - if inplace: - warnings.warn( - "Inplace on ReLU is not allowed and will be discarded in dygraph mode currently." - ) - return core.ops.relu(input) - - check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], - 'relu') + return core.ops.relu(x) + check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu') helper = LayerHelper('relu', **locals()) - outs = input if inplace else helper.create_variable_for_type_inference( - input.dtype) - helper.append_op(type='relu', inputs={'X': [input]}, outputs={'Out': outs}) - return outs + out = helper.create_variable_for_type_inference(x.dtype) + helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out}) + return out + + +def logsigmoid(x, name=None): + """ + logsigmoid activation. + + .. math: + + logsigmoid(x) = \log \frac{1}{1 + e^{-x}} + + Parameters: + x (Tensor): The input Tensor with data type float32, float64. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Returns: + A Tensor with the same data type and shape as ``x`` . + + Examples: + .. code-block:: python + + import paddle + import paddle.nn.functional as F + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) + out = F.logsigmoid(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376] + """ + + if in_dygraph_mode(): + return core.ops.logsigmoid(x) + + check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], + 'logsigmoid') + helper = LayerHelper("logsigmoid", **locals()) + out = helper.create_variable_for_type_inference(x.dtype) + helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out}) + return out def softmax(x, axis=-1, name=None): diff --git a/python/paddle/nn/layer/activation.py b/python/paddle/nn/layer/activation.py index a0b24eebf5555..6b965564fc9c3 100644 --- a/python/paddle/nn/layer/activation.py +++ b/python/paddle/nn/layer/activation.py @@ -15,12 +15,15 @@ # TODO: define activation functions of neural network __all__ = [ + 'ELU', + 'GELU', 'Hardshrink', # 'PReLU', 'ReLU', 'LeakyReLU', 'Sigmoid', # 'Softmax', + 'LogSigmoid', 'LogSoftmax', 'HSigmoid' ] @@ -31,6 +34,103 @@ from .. import functional as F +class ELU(layers.Layer): + """ + ELU Activation. + + .. math:: + + ELU(x) = max(0, x) + min(0, \\alpha * (e^{x}-1)) + + Parameters: + alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Shape: + - input: Tensor with any shape. + - output: Tensor with the same shape as input. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([[-1,6],[1,15.6]])) + m = paddle.nn.ELU(0.2) + out = m(x) + # [[-0.12642411 6. ] + # [ 1. 15.6 ]] + """ + + def __init__(self, alpha=1.0, name=None): + super(ELU, self).__init__() + self._alpha = alpha + self._name = name + + def forward(self, x): + return F.elu(x, self._alpha, self._name) + + +class GELU(layers.Layer): + """ + GELU Activation. + + If approximate is True + + .. math:: + + GELU(x) = 0.5 * x * (1 + tanh(\\sqrt{\\frac{2}{\\pi}} * (x + 0.044715x^{3}))) + + else + + .. math:: + + GELU(x) = 0.5 * x * (1 + erf(\\frac{x}{\\sqrt{2}})) + + Parameters: + approximate (bool, optional): Wether to enable approximation. Default is False. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Shape: + - input: Tensor with any shape. + - output: Tensor with the same shape as input. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + data = np.random.randn(2, 3).astype("float32") + x = paddle.to_tensor(data) + + m = paddle.nn.GELU() + out = m(x) + + data + # array([[ 0.87165993, -1.0541513 , -0.37214822], + # [ 0.15647964, 0.32496083, 0.33045998]], dtype=float32) + out + # array([[ 0.70456535, -0.15380788, -0.13207214], + # [ 0.08796856, 0.20387867, 0.2080159 ]], dtype=float32) + """ + + def __init__(self, approximate=False, name=None): + super(GELU, self).__init__() + self._approximate = approximate + self._name = name + + def forward(self, x): + return F.gelu(x, self._approximate, self._name) + + class Hardshrink(layers.Layer): """ Hardshrink Activation @@ -216,44 +316,39 @@ def forward(self, input, label, path_table=None, path_code=None): class ReLU(layers.Layer): """ - :alias_main: paddle.nn.ReLU - :alias: paddle.nn.ReLU,paddle.nn.layer.ReLU,paddle.nn.layer.activation.ReLU - ReLU Activation. .. math: - out = max(x, 0) + ReLU(x) = max(x, 0) Parameters: - inplace (bool, optional): If inplace is True, the input and output of - ``ReLU`` are the same variable. Otherwise, the input and output of - ``ReLU`` are different variables. Default False. Note that if x is - more than one OPs' input, inplace must be False. - - Returns: - None + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Shape: + - input: Tensor with any shape. + - output: Tensor with the same shape as input. Examples: .. code-block:: python - import paddle.fluid as fluid - import paddle.nn as nn - import numpy as np + import paddle + import numpy as np - data = np.array([-2, 0, 1]).astype('float32') - my_relu = nn.ReLU() - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) - res = my_relu(data) # [0, 0, 1] + paddle.disable_static() + + x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32')) + m = paddle.nn.ReLU() + out = m(x) # [0., 0., 1.] """ - def __init__(self, inplace=False): + def __init__(self, name=None): super(ReLU, self).__init__() - self._inplace = inplace + self._name = name - def forward(self, input): - return F.relu(input, self._inplace) + def forward(self, x): + return F.relu(x, self._name) class LeakyReLU(layers.Layer): @@ -336,6 +431,44 @@ def forward(self, x): return F.sigmoid(x, self.name) +class LogSigmoid(layers.Layer): + """ + LogSigmoid Activation. + + .. math: + + LogSigmoid(x) = \log \frac{1}{1 + e^{-x}} + + Parameters: + x (Tensor): The input Tensor with data type float32, or float64. + name (str, optional): Name for the operation (optional, default is None). + For more information, please refer to :ref:`api_guide_Name`. + + Shape: + - input: Tensor with any shape. + - output: Tensor with the same shape as input. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + + x = paddle.to_tensor(np.array([1.0, 2.0, 3.0, 4.0])) + m = paddle.nn.LogSigmoid() + out = m(x) # [0.7310586, 0.880797, 0.95257413, 0.98201376] + """ + + def __init__(self, name=None): + super(LogSigmoid, self).__init__() + self._name = name + + def forward(self, x): + return F.logsigmoid(x, self._name) + + class LogSoftmax(layers.Layer): """ This operator implements the log_softmax layer. The calculation process is as follows: From da29760d58df7a35f9e7e39e58974e87e4df407a Mon Sep 17 00:00:00 2001 From: Zhou Wei <52485244+zhouwei25@users.noreply.github.com> Date: Wed, 19 Aug 2020 10:58:51 +0800 Subject: [PATCH 13/37] add msvc log from quiet to minimal (#26383) --- paddle/scripts/paddle_build.bat | 36 ++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index ad0583c08cd38..d3f3fc79c4d97 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -25,12 +25,13 @@ mkdir build cd /d build rem ------initialize the virtual environment------ -if not defined PYTHON_ROOT set PYTHON_ROOT=c:\Python37 +if not defined PYTHON_ROOT set PYTHON_ROOT=C:\Python37 +set PATH=%PYTHON_ROOT%;%PYTHON_ROOT%\Scripts;%PATH% + set PYTHON_EXECUTABLE=%PYTHON_ROOT%\python.exe %PYTHON_EXECUTABLE% -m pip install virtualenv -if not exist paddle_winci (%PYTHON_EXECUTABLE% -m virtualenv paddle_winci) +%PYTHON_EXECUTABLE% -m virtualenv paddle_winci call paddle_winci\Scripts\activate.bat -if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL% rem ------pre install requirement---------- where python @@ -38,8 +39,11 @@ where pip pip install --upgrade pip pip install wheel pip install gym -pip install -r %work_dir%\python\requirements.txt -if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL% +pip install -U -r %work_dir%\python\requirements.txt +if %ERRORLEVEL% NEQ 0 ( + call paddle_winci\Scripts\deactivate.bat + exit /b %ERRORLEVEL% +) rem ------initialize common variable------ if not defined CUDA_TOOLKIT_ROOT_DIR set CUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0" @@ -57,7 +61,7 @@ dir %cache_dir% set INFERENCE_DEMO_INSTALL_DIR=%cache_dir:\=/%/inference_demo if not exist %cache_dir%\tools ( - git clone https://github.com/zhouwei25/tools %cache_dir%\tools + git clone https://github.com/zhouwei25/tools.git %cache_dir%\tools if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL% ) @@ -124,7 +128,7 @@ goto:eof :cmake_error call paddle_winci\Scripts\deactivate.bat -echo cmake failed! +echo Cmake failed, will exit! exit /b 1 rem --------------------------------------------------------------------------------------------- @@ -133,39 +137,41 @@ echo ======================================== echo Step 2. Buile Paddle ... echo ======================================== call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 -set build_times=1 +set build_times=1 :build_tp -echo BUILD THIRD_PARTY %build_times% +echo Build third_party the %build_times% time: msbuild /m /p:Configuration=Release /verbosity:quiet third_party.vcxproj -echo BUILD THIRD_PARTY RESULT %ERRORLEVEL% if %ERRORLEVEL% NEQ 0 ( set /a build_times=%build_times%+1 if %build_times% GTR 3 ( exit /b 1 ) else ( + echo Build third_party failed, will retry! goto :build_tp ) ) +echo Build third_party successfully! set build_times=1 :build_paddle -echo BUILD PADDLE %build_times% -msbuild /m /p:Configuration=Release /verbosity:quiet paddle.sln -echo BUILD PADDLE RESULT %ERRORLEVEL% +echo Build Paddle the %build_times% time: +msbuild /m /p:Configuration=Release /verbosity:minimal paddle.sln if %ERRORLEVEL% NEQ 0 ( set /a build_times=%build_times%+1 if %build_times% GTR 2 ( exit /b 1 ) else ( + echo Build Paddle failed, will retry! goto :build_paddle ) ) +echo Build Paddle successfully! goto:eof :build_error call paddle_winci\Scripts\deactivate.bat -echo build paddle failed! +echo Build Paddle failed, will exit! exit /b 7 rem --------------------------------------------------------------------------------------------- @@ -185,6 +191,7 @@ goto:eof :test_whl_pacakage_error call paddle_winci\Scripts\deactivate.bat +echo Pip install whl package failed, will exit! exit /b 3 rem --------------------------------------------------------------------------------------------- @@ -206,6 +213,7 @@ goto:eof :unit_test_error call paddle_winci\Scripts\deactivate.bat +echo Running unit tests failed, will exit! exit /b 8 rem --------------------------------------------------------------------------------------------- From 1fbee267d463c362662a69cbbf16f1e5c6250e80 Mon Sep 17 00:00:00 2001 From: GaoWei8 <53294385+GaoWei8@users.noreply.github.com> Date: Wed, 19 Aug 2020 10:59:42 +0800 Subject: [PATCH 14/37] remove scope in cudnn lstm (#25188) --- paddle/fluid/operators/cudnn_lstm_op.cc | 123 +++++---- paddle/fluid/operators/cudnn_lstm_op.cu.cc | 250 +++++++----------- paddle/fluid/operators/cudnn_rnn_cache.h | 151 +++++------ paddle/fluid/platform/dynload/cudnn.h | 1 + python/paddle/fluid/layers/rnn.py | 19 +- .../tests/unittests/test_lstm_cudnn_op.py | 91 ++++--- .../white_list/no_check_set_white_list.py | 1 + .../white_list/op_threshold_white_list.py | 3 +- 8 files changed, 311 insertions(+), 328 deletions(-) diff --git a/paddle/fluid/operators/cudnn_lstm_op.cc b/paddle/fluid/operators/cudnn_lstm_op.cc index 16e2ca464b5c4..7081490fd1bf0 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cc @@ -24,34 +24,62 @@ class CudnnLSTMOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("W"), - "Input(Weight) of LSTM should not be null."); - - PADDLE_ENFORCE(ctx->HasInput("InitH"), - "Input(init_h) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("InitC"), - "Input(init_c) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Cache"), - "Input(Cache) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("last_h"), - "Output(last_h) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("last_c"), - "Output(last_c) of LSTM should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasInput("W"), "Input", "W", "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasInput("InitH"), "Input", "InitH", "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasInput("InitC"), "Input", "InitC", "CudnnLSTM"); + + OP_INOUT_CHECK(ctx->HasOutput("Reserve"), "Output", "Reserve", "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasOutput("StateOut"), "Output", "StateOut", + "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasOutput("LastH"), "Output", "LastH", "CudnnLSTM"); + OP_INOUT_CHECK(ctx->HasOutput("LastC"), "Output", "LastC", "CudnnLSTM"); auto in_dims = ctx->GetInputDim("Input"); - PADDLE_ENFORCE_EQ(in_dims.size(), 3, "Input(X)'s rank must be 3."); + auto init_dims = ctx->GetInputDim("InitH"); + PADDLE_ENFORCE_EQ(in_dims.size(), 3, + platform::errors::InvalidArgument( + "The rank of Input in CudnnLSTM must be 3. But " + "received Input's rank is %d.", + in_dims.size())); + PADDLE_ENFORCE_EQ(init_dims.size(), 3, + platform::errors::InvalidArgument( + "The rank of InitH in CudnnLSTM must be 3. But " + "received InitH's rank is %d.", + init_dims.size())); + + PADDLE_ENFORCE_EQ(in_dims[1], init_dims[1], + platform::errors::InvalidArgument( + "The in_dims[1] (Input dims) and init_dims[1] (InitH " + "dims) should be equal. But " + "received in_dims[1] is %d and init_dims[1] is %d.", + in_dims[1], init_dims[1])); + PADDLE_ENFORCE_EQ(in_dims[2], init_dims[2], + platform::errors::InvalidArgument( + "The in_dims[2] (Input dims) and init_dims[2] (InitH " + "dims) should be equal. But " + "received in_dims[2] is %d and init_dims[2] is %d.", + in_dims[2], init_dims[2])); auto out_dims = in_dims; auto hidden_size = ctx->Attrs().Get("hidden_size"); - out_dims[2] = hidden_size; + bool is_bidirec = ctx->Attrs().Get("is_bidirec"); + out_dims[2] = is_bidirec ? hidden_size * 2 : hidden_size; + auto last_dims = init_dims; + last_dims[0] = is_bidirec ? last_dims[0] * 2 : last_dims[0]; ctx->SetOutputDim("Out", out_dims); - ctx->SetOutputDim("last_h", ctx->GetInputDim("InitH")); - ctx->SetOutputDim("last_c", ctx->GetInputDim("InitC")); + ctx->SetOutputDim("LastH", last_dims); + ctx->SetOutputDim("LastC", last_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + OperatorWithKernel::IndicateVarDataType(ctx, "Input"), + ctx.device_context()); } }; @@ -84,33 +112,31 @@ class CudnnLSTMOpMaker : public framework::OpProtoAndCheckerMaker { "(Tensor) the learnable hidden-hidden weights." " The shape is (N), where N is total weight size of the LSTM. " " cudnn concatenate all the weight to one Tensor"); - AddInput("Cache", - "The cache of dropout op, a RAW type variable including random " - "number generator states and some descriptors, which is used in " - "cudnn kernel.") - .AsDispensable(); + AddOutput("Reserve", + "(Tensor, a temporary output Tensor to store the reserve_data " + "of cudnn kernel.") + .AsIntermediate(); + AddOutput("StateOut", + "Share memory with State. " + "Store the global drop state when training"); AddOutput("Out", "(Tensor) the hidden state of LSTM operator. " "The shape is ( seq_len x batch_size x hidden_size) if " "is_bidirec is False" "and When is_bidirec is True, the shape will be ( seq_len x " "batch_size x hidden_size * 2) "); - AddOutput("last_h", + AddOutput("LastH", "(Tensor) the hidden state of the last step. " "The shape is ( num_layers x batch_size x hidden_size) if " "is_bidirec is False" "and When is_bidirec is True, the shape will be (num_layers*2 x " "batch_size x hidden_size)"); - AddOutput("last_c", + AddOutput("LastC", "(Tensor) the cell state of the last step" "The shape is ( num_layers x batch_size x hidden_size) if " "is_bidirec is False" "and When is_bidirect is True, the shape will be (num_layers*2 x " "batch_size x hidden_size*2)"); - AddAttr("max_len", - "max length of the LSTM op" - "the first dim of the Input can NOT be greater than max_len") - .SetDefault(20); AddAttr( "dropout_prob", "dropout prob of the dropout op" @@ -120,14 +146,14 @@ class CudnnLSTMOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("is_bidirec", "is_bidirec" "if it is bidirectional rnn" - "The will affect the shape of the Out, last_h, and last_c") + "The will affect the shape of the Out, LastH, and LastC") .SetDefault(false); AddAttr("input_size", "input size ot the Input Tensor").SetDefault(10); AddAttr("hidden_size", "hidden size of the LSTM").SetDefault(100); AddAttr("num_layers", "the total layer number of the LSTM") .SetDefault(1); AddAttr("is_test", "True if in test phase.").SetDefault(false); - AddAttr("seed", "seed to used if fix_seed is True").SetDefault(-1); + AddAttr("seed", "seed to used if fix_seed is True").SetDefault(0); AddComment(R"DOC( CUDNN LSTM implementation @@ -172,16 +198,10 @@ class CudnnLSTMGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Cache"), - "Input(last_c) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("InitH"), - "Input(init_h) of LSTM should not be null."); - - PADDLE_ENFORCE(ctx->HasInput("InitC"), - "Input(init_c) of LSTM should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "CudnnLSTMGrad"); + OP_INOUT_CHECK(ctx->HasInput("W"), "Input", "W", "CudnnLSTMGrad"); + OP_INOUT_CHECK(ctx->HasInput("InitH"), "Input", "InitH", "CudnnLSTMGrad"); + OP_INOUT_CHECK(ctx->HasInput("InitC"), "Input", "InitC", "CudnnLSTMGrad"); auto SetOutGradDim = [&ctx](const std::string& name) { auto g_name = framework::GradVarName(name); @@ -195,6 +215,12 @@ class CudnnLSTMGradOp : public framework::OperatorWithKernel { SetOutGradDim("InitH"); SetOutGradDim("InitC"); } + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType( + ctx, framework::GradVarName("Out")), + ctx.device_context()); + } }; template @@ -209,13 +235,12 @@ class CudnnLSTMGradOpMaker : public framework::SingleGradOpMaker { op->SetInput("InitH", this->Input("InitH")); op->SetInput("InitC", this->Input("InitC")); op->SetInput("W", this->Input("W")); - if (this->HasInput("Cache")) { - op->SetInput("Cache", this->Input("Cache")); - } + op->SetInput("Reserve", this->Output("Reserve")); + op->SetInput("StateOut", this->Output("StateOut")); op->SetInput("Out", this->Output("Out")); op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetInput(framework::GradVarName("last_c"), this->OutputGrad("last_c")); - op->SetInput(framework::GradVarName("last_h"), this->OutputGrad("last_h")); + op->SetInput(framework::GradVarName("LastC"), this->OutputGrad("LastC")); + op->SetInput(framework::GradVarName("LastH"), this->OutputGrad("LastH")); op->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input")); op->SetOutput(framework::GradVarName("W"), this->InputGrad("W")); diff --git a/paddle/fluid/operators/cudnn_lstm_op.cu.cc b/paddle/fluid/operators/cudnn_lstm_op.cu.cc index 579dddee8e821..37e5e518ea2af 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cu.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cu.cc @@ -15,6 +15,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/cudnn_rnn_cache.h" #include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/cudnn_desc.h" namespace paddle { namespace operators { @@ -33,8 +34,10 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { auto w = ctx.Input("W"); Tensor *out = ctx.Output("Out"); - Tensor *last_h = ctx.Output("last_h"); - Tensor *last_c = ctx.Output("last_c"); + Tensor *last_h = ctx.Output("LastH"); + Tensor *last_c = ctx.Output("LastC"); + Tensor *reserve = ctx.Output("Reserve"); + Tensor *state_out = ctx.Output("StateOut"); const T *x_data = x->data(); const T *init_h_data = init_h->data(); @@ -46,72 +49,56 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { T *last_h_data = last_h->mutable_data(ctx.GetPlace()); T *last_c_data = last_c->mutable_data(ctx.GetPlace()); - size_t max_len = ctx.Attr("max_len"); float dropout_prob = ctx.Attr("dropout_prob"); bool is_bidirec = ctx.Attr("is_bidirec"); - int input_size = ctx.Attr("input_size"); int hidden_size = ctx.Attr("hidden_size"); int num_layers = ctx.Attr("num_layers"); bool is_test = ctx.Attr("is_test"); + int seed = ctx.Attr("seed"); auto &dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); - auto *cache_var = ctx.InputVar("Cache"); - if (!cache_var) { - // The RAW type cache variable wouldn't be created and broadcasted on - // multi-devices before the first running. - // use parent scope to make cache persistable - auto *scope = const_cast(ctx.scope().parent()); - auto cache_var_name = ctx.InputNames("Cache")[0]; - cache_var = scope->Var(cache_var_name); - } - CudnnRNNCache *cudnn_rnn_cache = nullptr; - if (cache_var->IsInitialized()) { - // const_cast is usually bad. - cudnn_rnn_cache = const_cast(cache_var) - ->GetMutable(); - } else { - // const_cast is usually bad. - cudnn_rnn_cache = const_cast(cache_var) - ->GetMutable(); - std::random_device rnd; - int seed = ctx.Attr("seed"); - if (seed == -1) { - seed = rnd(); - } - - auto input_w_numel = w->numel(); - auto batch_size = x->dims()[1]; - cudnn_rnn_cache->init(handle, ctx.GetPlace(), max_len, batch_size, - input_size, hidden_size, num_layers, dropout_prob, - is_bidirec, seed, input_w_numel); - } - auto run_seq_len = x->dims()[0]; + CudnnRNNCache *cudnn_rnn_cache = new CudnnRNNCache(); + + auto input_w_numel = w->numel(); + auto seq_len = x->dims()[0]; + auto batch_size = x->dims()[1]; + auto input_dim = x->dims()[2]; + size_t reserve_size; + bool state_initialized = state_out->IsInitialized() ? true : false; + cudnnDataType_t cudnn_type = platform::ToCudnnDataType( + framework::ToDataType(std::type_index(typeid(T)))); + cudnn_rnn_cache->init(handle, ctx.GetPlace(), seq_len, batch_size, + input_dim, hidden_size, num_layers, dropout_prob, + is_bidirec, seed, input_w_numel, &reserve_size, + state_out, state_initialized, cudnn_type); + + auto *reserve_data = reserve->mutable_data( + {static_cast(reserve_size)}, ctx.GetPlace()); if (is_test) { // for inference PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardInference( - handle, cudnn_rnn_cache->rnn_desc_, run_seq_len, - cudnn_rnn_cache->x_desc_, x_data, cudnn_rnn_cache->hx_desc_, - init_h_data, cudnn_rnn_cache->cx_desc_, init_c_data, - cudnn_rnn_cache->w_desc_, w_data, cudnn_rnn_cache->y_desc_, out_data, - cudnn_rnn_cache->hy_desc_, last_h_data, cudnn_rnn_cache->cy_desc_, - last_c_data, cudnn_rnn_cache->workspace_data_.data(), + handle, cudnn_rnn_cache->rnn_desc_, seq_len, cudnn_rnn_cache->x_desc_, + x_data, cudnn_rnn_cache->hx_desc_, init_h_data, + cudnn_rnn_cache->cx_desc_, init_c_data, cudnn_rnn_cache->w_desc_, + w_data, cudnn_rnn_cache->y_desc_, out_data, cudnn_rnn_cache->hy_desc_, + last_h_data, cudnn_rnn_cache->cy_desc_, last_c_data, + cudnn_rnn_cache->workspace_data_.data(), cudnn_rnn_cache->workspace_size_)); } else { // for train PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNForwardTraining( - handle, cudnn_rnn_cache->rnn_desc_, run_seq_len, - cudnn_rnn_cache->x_desc_, x_data, cudnn_rnn_cache->hx_desc_, - init_h_data, cudnn_rnn_cache->cx_desc_, init_c_data, - cudnn_rnn_cache->w_desc_, w_data, cudnn_rnn_cache->y_desc_, out_data, - cudnn_rnn_cache->hy_desc_, last_h_data, cudnn_rnn_cache->cy_desc_, - last_c_data, cudnn_rnn_cache->workspace_data_.data(), - cudnn_rnn_cache->workspace_size_, - cudnn_rnn_cache->reserve_data_.data(), - cudnn_rnn_cache->reserve_size_)); + handle, cudnn_rnn_cache->rnn_desc_, seq_len, cudnn_rnn_cache->x_desc_, + x_data, cudnn_rnn_cache->hx_desc_, init_h_data, + cudnn_rnn_cache->cx_desc_, init_c_data, cudnn_rnn_cache->w_desc_, + w_data, cudnn_rnn_cache->y_desc_, out_data, cudnn_rnn_cache->hy_desc_, + last_h_data, cudnn_rnn_cache->cy_desc_, last_c_data, + cudnn_rnn_cache->workspace_data_.data(), + cudnn_rnn_cache->workspace_size_, reserve_data, reserve_size)); } + delete cudnn_rnn_cache; } }; @@ -123,15 +110,13 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { auto *weight = ctx.Input("W"); auto *init_h = ctx.Input("InitH"); auto *init_c = ctx.Input("InitC"); - // auto * last_h = ctx.Input("last_h"); - // auto * last_c = ctx.Input("last_c"); + auto *reserve = ctx.Input("Reserve"); + auto *state_out = ctx.Input("StateOut"); + auto *out = ctx.Input("Out"); auto *out_grad = ctx.Input(framework::GradVarName("Out")); - auto *last_h_grad = ctx.Input(framework::GradVarName("last_h")); - auto *last_c_grad = ctx.Input(framework::GradVarName("last_c")); - - // auto* init_h = ctx.Input("init_h"); - // auto* init_c = ctx.Input("init_c"); + auto *last_h_grad = ctx.Input(framework::GradVarName("LastH")); + auto *last_c_grad = ctx.Input(framework::GradVarName("LastC")); auto *in_grad = ctx.Output(framework::GradVarName("Input")); auto *weight_grad = ctx.Output(framework::GradVarName("W")); @@ -140,116 +125,75 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { auto &dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); - auto *cache_var = ctx.InputVar("Cache"); - PADDLE_ENFORCE(cache_var->IsInitialized()); - CudnnRNNCache *cudnn_rnn_cache = - const_cast(cache_var) - ->GetMutable(); auto input_dims = input->dims(); auto init_h_dims = init_h->dims(); auto init_c_dims = init_c->dims(); - in_grad->mutable_data(ctx.GetPlace()); - weight_grad->mutable_data(ctx.GetPlace()); - math::SetConstant zero; - zero(dev_ctx, in_grad, static_cast(0.0)); - zero(dev_ctx, weight_grad, static_cast(0.0)); - - T *init_h_grad_data = NULL; - if (init_h_grad == nullptr) { - Tensor init_h_grad_temp; - init_h_grad_temp.mutable_data(init_h_dims, ctx.GetPlace()); - zero(dev_ctx, &init_h_grad_temp, static_cast(0.0)); - - init_h_grad_data = init_h_grad_temp.data(); - } else { - init_h_grad->mutable_data(init_h_dims, ctx.GetPlace()); - zero(dev_ctx, init_h_grad, static_cast(0.0)); - init_h_grad_data = init_h_grad->data(); - } - - T *init_c_grad_data = NULL; - if (init_c_grad == nullptr) { - Tensor init_c_grad_temp; - init_c_grad_temp.mutable_data(init_c_dims, ctx.GetPlace()); - zero(dev_ctx, &init_c_grad_temp, static_cast(0.0)); - init_c_grad_data = init_c_grad_temp.data(); - } else { - init_c_grad->mutable_data(init_c_dims, ctx.GetPlace()); - zero(dev_ctx, init_c_grad, static_cast(0.0)); - init_c_grad_data = init_c_grad->data(); - } + auto *weight_data = weight->data(); + auto *init_h_data = init_h->data(); + auto *init_c_data = init_c->data(); + auto *out_data = out->data(); + auto *out_grad_data = out_grad->data(); + auto *last_h_grad_data = last_h_grad->data(); + auto *last_c_grad_data = last_c_grad->data(); - const T *last_h_grad_data = NULL; - if (last_h_grad == nullptr) { - Tensor last_h_grad_temp; - last_h_grad_temp.mutable_data(init_h_dims, ctx.GetPlace()); - zero(dev_ctx, &last_h_grad_temp, static_cast(0.0)); - - last_h_grad_data = (const T *)last_h_grad_temp.data(); - } else { - last_h_grad_data = last_h_grad->data(); - } - - const T *last_c_grad_data = NULL; - if (last_c_grad == nullptr) { - Tensor last_c_grad_temp; - last_c_grad_temp.mutable_data(init_c_dims, ctx.GetPlace()); - zero(dev_ctx, &last_c_grad_temp, static_cast(0.0)); - - last_c_grad_data = (const T *)last_c_grad_temp.data(); - } else { - last_c_grad_data = last_c_grad->data(); - } + math::SetConstant zero; + weight_grad->mutable_data(ctx.GetPlace()); + zero(dev_ctx, weight_grad, static_cast(0.0)); - const T *out_grad_data = NULL; - if (out_grad == nullptr) { - Tensor out_grad_temp; - out_grad_temp.mutable_data(out->dims(), ctx.GetPlace()); - zero(dev_ctx, &out_grad_temp, static_cast(0.0)); + in_grad->mutable_data(input_dims, ctx.GetPlace()); + auto *in_grad_data = in_grad->data(); - out_grad_data = (const T *)out_grad_temp.data(); - } else { - out_grad_data = out_grad->data(); - } + init_h_grad->mutable_data(init_h_dims, ctx.GetPlace()); + auto *init_h_grad_data = init_h_grad->data(); - // zero( dev_ctx, last_h_grad, static_cast(0.0)); - // zero( dev_ctx, last_c_grad, static_cast(0.0)); + init_c_grad->mutable_data(init_c_dims, ctx.GetPlace()); + auto *init_c_grad_data = init_c_grad->data(); - auto out_data = out->data(); - // auto out_grad_data = out_grad->data(); - auto weight_data = weight->data(); - auto init_h_data = init_h->data(); - auto init_c_data = init_c->data(); - auto in_grad_data = in_grad->data(); + float dropout_prob = ctx.Attr("dropout_prob"); + bool is_bidirec = ctx.Attr("is_bidirec"); + int hidden_size = ctx.Attr("hidden_size"); + int num_layers = ctx.Attr("num_layers"); + int seed = ctx.Attr("seed"); + + CudnnRNNCache *cudnn_rnn_cache = new CudnnRNNCache(); + + auto input_w_numel = weight->numel(); + auto seq_len = input_dims[0]; + auto batch_size = input->dims()[1]; + auto input_dim = input->dims()[2]; + size_t reserve_size; + cudnnDataType_t cudnn_type = platform::ToCudnnDataType( + framework::ToDataType(std::type_index(typeid(T)))); + cudnn_rnn_cache->init(handle, ctx.GetPlace(), seq_len, batch_size, + input_dim, hidden_size, num_layers, dropout_prob, + is_bidirec, seed, input_w_numel, &reserve_size, + const_cast(state_out), true, cudnn_type); auto work_data = cudnn_rnn_cache->workspace_data_.data(); - auto reserve_data = cudnn_rnn_cache->reserve_data_.data(); + const uint8_t *reserve_data = reserve->data(); - auto run_seq_len = input_dims[0]; - PADDLE_ENFORCE_LE((size_t)run_seq_len, cudnn_rnn_cache->max_length_, - "cudnn running seq_len CAN not greater max_lengh"); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardData( - handle, cudnn_rnn_cache->rnn_desc_, run_seq_len, - cudnn_rnn_cache->y_desc_, out_data, cudnn_rnn_cache->dy_desc_, - out_grad_data, cudnn_rnn_cache->dhy_desc_, last_h_grad_data, - cudnn_rnn_cache->dcy_desc_, last_c_grad_data, cudnn_rnn_cache->w_desc_, - weight_data, cudnn_rnn_cache->hx_desc_, init_h_data, - cudnn_rnn_cache->cx_desc_, init_c_data, cudnn_rnn_cache->dx_desc_, - in_grad_data, cudnn_rnn_cache->dhx_desc_, init_h_grad_data, - cudnn_rnn_cache->dcx_desc_, init_c_grad_data, work_data, - cudnn_rnn_cache->workspace_size_, reserve_data, - cudnn_rnn_cache->reserve_size_)); + handle, cudnn_rnn_cache->rnn_desc_, seq_len, cudnn_rnn_cache->y_desc_, + out_data, cudnn_rnn_cache->y_desc_, out_grad_data, + cudnn_rnn_cache->hy_desc_, last_h_grad_data, cudnn_rnn_cache->cy_desc_, + last_c_grad_data, cudnn_rnn_cache->w_desc_, weight_data, + cudnn_rnn_cache->hx_desc_, init_h_data, cudnn_rnn_cache->cx_desc_, + init_c_data, cudnn_rnn_cache->x_desc_, in_grad_data, + cudnn_rnn_cache->hx_desc_, init_h_grad_data, cudnn_rnn_cache->cx_desc_, + init_c_grad_data, work_data, cudnn_rnn_cache->workspace_size_, + const_cast(reserve_data), reserve_size)); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnRNNBackwardWeights( - handle, cudnn_rnn_cache->rnn_desc_, run_seq_len, - cudnn_rnn_cache->x_desc_, input->data(), cudnn_rnn_cache->hx_desc_, - init_h->data(), cudnn_rnn_cache->y_desc_, out->data(), + handle, cudnn_rnn_cache->rnn_desc_, seq_len, cudnn_rnn_cache->x_desc_, + input->data(), cudnn_rnn_cache->hx_desc_, init_h->data(), + cudnn_rnn_cache->y_desc_, out->data(), cudnn_rnn_cache->workspace_data_.data(), - cudnn_rnn_cache->workspace_size_, cudnn_rnn_cache->dw_desc_, - weight_grad->data(), cudnn_rnn_cache->reserve_data_.data(), - cudnn_rnn_cache->reserve_size_)); + cudnn_rnn_cache->workspace_size_, cudnn_rnn_cache->w_desc_, + weight_grad->data(), const_cast(reserve_data), + reserve_size)); + delete cudnn_rnn_cache; } }; @@ -257,5 +201,7 @@ class CudnnLSTMGPUGradKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(cudnn_lstm, ops::CudnnLSTMGPUKernel); -REGISTER_OP_CUDA_KERNEL(cudnn_lstm_grad, ops::CudnnLSTMGPUGradKernel); +REGISTER_OP_CUDA_KERNEL(cudnn_lstm, ops::CudnnLSTMGPUKernel, + ops::CudnnLSTMGPUKernel); +REGISTER_OP_CUDA_KERNEL(cudnn_lstm_grad, ops::CudnnLSTMGPUGradKernel, + ops::CudnnLSTMGPUGradKernel); diff --git a/paddle/fluid/operators/cudnn_rnn_cache.h b/paddle/fluid/operators/cudnn_rnn_cache.h index cd33338abc622..13a3e7d09b9f6 100644 --- a/paddle/fluid/operators/cudnn_rnn_cache.h +++ b/paddle/fluid/operators/cudnn_rnn_cache.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/cudnn_helper.h" @@ -24,16 +25,12 @@ struct CudnnRNNCache { CudnnRNNCache() { x_desc_ = NULL; y_desc_ = NULL; - dx_desc_ = NULL; - dy_desc_ = NULL; } ~CudnnRNNCache() { release(); } cudnnRNNDescriptor_t rnn_desc_; cudnnTensorDescriptor_t *x_desc_; cudnnTensorDescriptor_t *y_desc_; - cudnnTensorDescriptor_t *dx_desc_; - cudnnTensorDescriptor_t *dy_desc_; cudnnTensorDescriptor_t hx_desc_; cudnnTensorDescriptor_t cx_desc_; @@ -55,13 +52,9 @@ struct CudnnRNNCache { cudnnFilterDescriptor_t dw_desc_; size_t workspace_size_; - size_t reserve_size_; - framework::Tensor reserve_data_; framework::Tensor workspace_data_; - framework::Tensor dropout_state_; - - size_t max_length_; + size_t seq_length_; float dropout_prob_; bool is_bidirec_; @@ -72,10 +65,12 @@ struct CudnnRNNCache { int num_layers_; int seed_; - void init(cudnnHandle_t handle, const platform::Place &place, size_t max_len, + void init(cudnnHandle_t handle, const platform::Place &place, size_t seq_len, int batch_size, int input_size, int hidden_size, int num_layers, - float dropout_prob, bool is_bidirec, int seed, int weight_numel) { - max_length_ = max_len; + float dropout_prob, bool is_bidirec, int seed, int weight_numel, + size_t *reserve_size_, framework::Tensor *dropout_state_, + bool initialized, cudnnDataType_t cudnn_type) { + seq_length_ = seq_len; batch_size_ = batch_size; input_size_ = input_size; hidden_size_ = hidden_size; @@ -84,55 +79,34 @@ struct CudnnRNNCache { is_bidirec_ = is_bidirec; seed_ = seed; - x_desc_ = new cudnnTensorDescriptor_t[max_length_]; - y_desc_ = new cudnnTensorDescriptor_t[max_length_]; - dx_desc_ = new cudnnTensorDescriptor_t[max_length_]; - dy_desc_ = new cudnnTensorDescriptor_t[max_length_]; - int dim_a[3]; - int stride_a[3]; + const auto numDirections = is_bidirec_ ? 2 : 1; + auto cudnn_size = + cudnn_type == CUDNN_DATA_FLOAT ? sizeof(float) : sizeof(double); + + x_desc_ = new cudnnTensorDescriptor_t[seq_length_]; + y_desc_ = new cudnnTensorDescriptor_t[seq_length_]; + std::vector dims = {batch_size_, input_size_, 1}; + std::vector strides = {input_size_, 1, 1}; + + std::vector dims_y = {batch_size_, hidden_size_ * numDirections, 1}; + std::vector strides_y = {hidden_size_ * numDirections, 1, 1}; - for (size_t i = 0; i < max_length_; ++i) { + for (size_t i = 0; i < seq_length_; ++i) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&x_desc_[i])); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&y_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnCreateTensorDescriptor(&dx_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnCreateTensorDescriptor(&dy_desc_[i])); - dim_a[0] = batch_size_; - dim_a[1] = input_size_; - dim_a[2] = 1; - - stride_a[0] = dim_a[2] * dim_a[1]; - stride_a[1] = dim_a[2]; - stride_a[2] = 1; - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - x_desc_[i], CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - dx_desc_[i], CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); - - dim_a[0] = batch_size_; - dim_a[1] = is_bidirec_ ? hidden_size_ * 2 : hidden_size_; - dim_a[2] = 1; - - stride_a[0] = dim_a[2] * dim_a[1]; - stride_a[1] = dim_a[2]; - stride_a[2] = 1; PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - y_desc_[i], CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + x_desc_[i], cudnn_type, 3, dims.data(), strides.data())); + PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - dy_desc_[i], CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + y_desc_[i], cudnn_type, 3, dims_y.data(), strides_y.data())); } - dim_a[0] = num_layers_ * (is_bidirec_ ? 2 : 1); - dim_a[1] = batch_size_; - dim_a[2] = hidden_size_; - - stride_a[0] = dim_a[2] * dim_a[1]; - stride_a[1] = dim_a[2]; - stride_a[2] = 1; + std::vector dims_hx = {num_layers_ * numDirections, batch_size_, + hidden_size_}; + std::vector strides_hx = {hidden_size_ * batch_size_, hidden_size_, 1}; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&hx_desc_)); @@ -152,33 +126,44 @@ struct CudnnRNNCache { platform::dynload::cudnnCreateTensorDescriptor(&dcy_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - hx_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + hx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - cx_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + cx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - hy_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + hy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - cy_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + cy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - dhx_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + dhx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - dcx_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + dcx_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - dhy_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + dhy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( - dcy_desc_, CUDNN_DATA_FLOAT, 3, dim_a, stride_a)); + dcy_desc_, cudnn_type, 3, dims_hx.data(), strides_hx.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateDropoutDescriptor(&dropout_desc_)); size_t state_size; - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDropoutGetStatesSize(handle, &state_size)); - dropout_state_.Resize({static_cast(state_size)}); - auto *dropout_state_data = dropout_state_.mutable_data(place); - PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetDropoutDescriptor( - dropout_desc_, handle, dropout_prob_, dropout_state_data, state_size, - seed_)); + if (!initialized) { + PADDLE_ENFORCE_CUDA_SUCCESS( + platform::dynload::cudnnDropoutGetStatesSize(handle, &state_size)); + dropout_state_->Resize({static_cast(state_size)}); + uint8_t *dropout_state_data = + dropout_state_->mutable_data(place); + PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetDropoutDescriptor( + dropout_desc_, handle, dropout_prob_, dropout_state_data, state_size, + seed_)); + } else { + uint8_t *dropout_state_data = dropout_state_->data(); + auto dropout_state_dims = dropout_state_->dims(); + state_size = dropout_state_dims[0]; + PADDLE_ENFORCE_CUDA_SUCCESS( + platform::dynload::cudnnRestoreDropoutDescriptor( + dropout_desc_, handle, dropout_prob_, dropout_state_data, + state_size, 0)); + } PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateRNNDescriptor(&rnn_desc_)); @@ -188,12 +173,12 @@ struct CudnnRNNCache { handle, rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, - CUDNN_RNN_ALGO_STANDARD, CUDNN_DATA_FLOAT)); + CUDNN_RNN_ALGO_STANDARD, cudnn_type)); #else PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetRNNDescriptor( rnn_desc_, hidden_size_, num_layers_, dropout_desc_, CUDNN_LINEAR_INPUT, is_bidirec_ ? CUDNN_BIDIRECTIONAL : CUDNN_UNIDIRECTIONAL, CUDNN_LSTM, - CUDNN_DATA_FLOAT)); + cudnn_type)); #endif PADDLE_ENFORCE_CUDA_SUCCESS( @@ -202,48 +187,42 @@ struct CudnnRNNCache { platform::dynload::cudnnCreateFilterDescriptor(&dw_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNParamsSize( - handle, rnn_desc_, x_desc_[0], &weights_size_, CUDNN_DATA_FLOAT)); + handle, rnn_desc_, x_desc_[0], &weights_size_, cudnn_type)); + + PADDLE_ENFORCE_EQ( + weights_size_, cudnn_size * weight_numel, + platform::errors::InvalidArgument( + "The cudnn lstm and setting weight size should be same.")); - PADDLE_ENFORCE_EQ(weights_size_, sizeof(float) * weight_numel, - "cudnn lstm weight size should be SAME"); int dim_w[3]; - dim_w[0] = weights_size_ / sizeof(float); + dim_w[0] = weights_size_ / cudnn_size; dim_w[1] = 1; dim_w[2] = 1; PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( - w_desc_, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dim_w)); + w_desc_, cudnn_type, CUDNN_TENSOR_NCHW, 3, dim_w)); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetFilterNdDescriptor( - dw_desc_, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 3, dim_w)); + dw_desc_, cudnn_type, CUDNN_TENSOR_NCHW, 3, dim_w)); PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnGetRNNWorkspaceSize( - handle, rnn_desc_, max_length_, x_desc_, &workspace_size_)); + handle, rnn_desc_, seq_length_, x_desc_, &workspace_size_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnGetRNNTrainingReserveSize( - handle, rnn_desc_, max_length_, x_desc_, &reserve_size_)); - - reserve_data_.Resize({static_cast(reserve_size_)}); - reserve_data_.mutable_data(place); + handle, rnn_desc_, seq_length_, x_desc_, reserve_size_)); workspace_data_.Resize({static_cast(workspace_size_)}); workspace_data_.mutable_data(place); } void release() { - for (size_t i = 0; i < max_length_; ++i) { + for (size_t i = 0; i < seq_length_; ++i) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(x_desc_[i])); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(y_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDestroyTensorDescriptor(dx_desc_[i])); - PADDLE_ENFORCE_CUDA_SUCCESS( - platform::dynload::cudnnDestroyTensorDescriptor(dy_desc_[i])); } delete[] x_desc_; delete[] y_desc_; - delete[] dx_desc_; - delete[] dy_desc_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(hx_desc_)); diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 0eb28f0c0c356..ebeb14e940e5f 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -100,6 +100,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); __macro(cudnnCreateDropoutDescriptor); \ __macro(cudnnDropoutGetStatesSize); \ __macro(cudnnSetDropoutDescriptor); \ + __macro(cudnnRestoreDropoutDescriptor); \ __macro(cudnnCreateRNNDescriptor); \ __macro(cudnnGetRNNParamsSize); \ __macro(cudnnGetRNNWorkspaceSize); \ diff --git a/python/paddle/fluid/layers/rnn.py b/python/paddle/fluid/layers/rnn.py index 4ec0770aaf0a5..bc1368b562d7b 100644 --- a/python/paddle/fluid/layers/rnn.py +++ b/python/paddle/fluid/layers/rnn.py @@ -2213,9 +2213,9 @@ def lstm(input, input ( :ref:`api_guide_Variable_en` ): LSTM input tensor, 3-D Tensor of shape :math:`[batch\_size, seq\_len, input\_dim]` . Data type is float32 or float64 init_h( :ref:`api_guide_Variable_en` ): The initial hidden state of the LSTM, 3-D Tensor of shape :math:`[num\_layers, batch\_size, hidden\_size]` . If is_bidirec = True, shape should be :math:`[num\_layers*2, batch\_size, hidden\_size]` . Data type is float32 or float64. + max_len (int): This parameter has no effect and will be discarded. init_c( :ref:`api_guide_Variable_en` ): The initial cell state of the LSTM, 3-D Tensor of shape :math:`[num\_layers, batch\_size, hidden\_size]` . If is_bidirec = True, shape should be :math:`[num\_layers*2, batch\_size, hidden\_size]` . Data type is float32 or float64. - max_len (int): max length of LSTM. the first dim of input tensor CAN NOT greater than max_len. hidden_size (int): hidden size of the LSTM. num_layers (int): total layers number of the LSTM. dropout_prob(float, optional): dropout prob, dropout ONLY work between rnn layers, NOT between time steps @@ -2256,7 +2256,6 @@ def lstm(input, data = fluid.data(name='x', shape=[None, 100], dtype='int64') emb = fluid.embedding(input=data, size=[vocab_size, emb_dim], is_sparse=True) batch_size = 20 - max_len = 100 dropout_prob = 0.2 input_size = 100 hidden_size = 150 @@ -2309,9 +2308,11 @@ def lstm(input, out = helper.create_variable_for_type_inference(dtype) last_h = helper.create_variable_for_type_inference(dtype) last_c = helper.create_variable_for_type_inference(dtype) - - cache = helper.create_variable( - persistable=True, type=core.VarDesc.VarType.RAW, stop_gradient=True) + reserve = helper.create_variable_for_type_inference( + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + state_out = helper.create_variable_for_type_inference( + dtype=core.VarDesc.VarType.UINT8, stop_gradient=True) + state_out.persistable = True helper.append_op( type='cudnn_lstm', @@ -2320,15 +2321,15 @@ def lstm(input, 'InitH': init_h, 'InitC': init_c, 'W': weight, - 'Cache': cache, }, outputs={ 'Out': out, - 'last_h': last_h, - 'last_c': last_c, + 'LastH': last_h, + 'LastC': last_c, + 'Reserve': reserve, + 'StateOut': state_out, }, attrs={ - 'max_len': max_len, 'is_bidirec': is_bidirec, 'input_size': input_size, 'hidden_size': hidden_size, diff --git a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py index d4189eca03697..90430bbce4d18 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py @@ -20,15 +20,14 @@ import paddle.fluid.core as core from op_test import OpTest import paddle.fluid as fluid +import paddle.fluid.layers as layers SIGMOID_THRESHOLD_MIN = -40.0 SIGMOID_THRESHOLD_MAX = 13.0 EXP_MAX_INPUT = 40.0 -def lstm_naive( - input, - w, ): +def lstm_naive(input, w): seq_len, batch_size, hidden_size = input.shape offset = 0 @@ -86,8 +85,8 @@ def tanh(x): return (2. / (1. + np.exp(y))) - 1. output = [] - pre_h = np.zeros((batch_size, hidden_size), dtype=input.dtype) - pre_c = np.zeros((batch_size, hidden_size), dtype=input.dtype) + pre_h = np.zeros((1, batch_size, hidden_size), dtype=input.dtype) + pre_c = np.zeros((1, batch_size, hidden_size), dtype=input.dtype) for i in range(seq_len): emb_1 = input[i] @@ -110,7 +109,6 @@ def tanh(x): output = np.concatenate(output, -1) output = output.reshape((batch_size, -1, hidden_size)) - output = output.transpose((1, 0, 2)) return output, pre_h, pre_c @@ -119,11 +117,12 @@ def tanh(x): @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") class TestCUDNNLstmOp(OpTest): + # TODO(GaoWei8):when input dtype is fp64, precision threshold should be removed. def setUp(self): self.op_type = "cudnn_lstm" - self.dtype = np.float32 + self.dtype = np.float64 - num_steps = 20 + seq_length = 20 batch_size = 5 hidden_size = 20 @@ -133,33 +132,24 @@ def setUp(self): weight_size += hidden_size * 8 input = np.random.uniform( - low=-0.1, high=0.1, size=(num_steps, batch_size, + low=-0.1, high=0.1, size=(seq_length, batch_size, hidden_size)).astype(self.dtype) flat_w = np.random.uniform( low=-0.1, high=0.1, size=(weight_size)).astype(self.dtype) output, last_hidden, last_cell = lstm_naive(input, flat_w) - init_h = np.zeros((batch_size, hidden_size), dtype=np.float32) - init_c = np.zeros((batch_size, hidden_size), dtype=np.float32) - scope = core.Scope() - program = fluid.Program() - block = program.global_block() - - cache_temp = block.create_var( - name="Cache", - persistable=True, - type=core.VarDesc.VarType.RAW, - stop_gradient=True) + init_h = np.zeros((1, batch_size, hidden_size), dtype=np.float64) + init_c = np.zeros((1, batch_size, hidden_size), dtype=np.float64) + state_out = np.ndarray((300)).astype("uint8") + self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'W': OpTest.np_dtype_to_fluid_dtype(flat_w), - 'InitH': OpTest.np_dtype_to_fluid_dtype(init_h), - 'InitC': OpTest.np_dtype_to_fluid_dtype(init_c), + 'Input': input, + 'W': flat_w, + 'InitH': init_h, + 'InitC': init_c } - self.cache_name_list = ['Cache'] self.attrs = { - 'max_len': num_steps, 'dropout_prob': 0.0, 'is_bidirec': False, 'input_size': hidden_size, @@ -168,22 +158,61 @@ def setUp(self): } self.outputs = { 'Out': output, - "last_h": last_hidden, - 'last_c': last_cell + "LastH": last_hidden, + 'LastC': last_cell, + 'Reserve': np.ndarray((400)).astype("uint8"), + 'StateOut': state_out } def test_output_with_place(self): # depend on the scope structure place = core.CUDAPlace(0) - self.check_output_with_place(place, atol=1e-5, check_dygraph=False) + self.check_output_with_place( + place, no_check_set=['Reserve', 'StateOut']) def test_grad_with_place(self): # depend on the scope structure place = core.CUDAPlace(0) self.check_grad_with_place( place, - set(['Input', 'W', 'InitH', 'InitC']), ['Out', 'last_h', 'last_c'], - check_dygraph=False) + set(['Input', 'W', 'InitH', 'InitC']), ['Out', 'LastH', 'LastC'], + max_relative_error=1e-4) + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestCUDNNlstmAPI(unittest.TestCase): + def test_lstm(self): + seq_len = 20 + batch_size = 5 + hidden_size = 20 + dropout_prob = 0.0 + num_layers = 1 + input = fluid.data( + name='input', + shape=[seq_len, batch_size, hidden_size], + dtype='float64') + init_h = layers.fill_constant([num_layers, batch_size, hidden_size], + 'float64', 0.0) + init_c = layers.fill_constant([num_layers, batch_size, hidden_size], + 'float64', 0.0) + rnn_out, last_h, last_c = layers.lstm(input, init_h, init_c, seq_len, + hidden_size, num_layers, + dropout_prob) + exe = fluid.Executor(fluid.CUDAPlace(0)) + exe.run(fluid.default_startup_program()) + input_i = np.random.uniform( + low=-0.1, high=0.1, size=(seq_len, batch_size, + hidden_size)).astype("float64") + out = exe.run(fluid.default_main_program(), + feed={'input': input_i}, + fetch_list=[rnn_out, last_h, last_c, 'cudnn_lstm_0.w_0']) + + output, last_hidden, last_cell = lstm_naive(input_i, out[3]) + + self.assertTrue(np.allclose(output, out[0], atol=1e-5)) + self.assertTrue(np.allclose(last_hidden, out[1], atol=1e-5)) + self.assertTrue(np.allclose(last_cell, out[2], atol=1e-5)) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py b/python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py index b8258f3153a80..0de0eeb464ad7 100644 --- a/python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py @@ -26,4 +26,5 @@ 'cross_entropy2', 'seed', 'amp_check_finite_and_scale', + 'cudnn_lstm', ] diff --git a/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py index ce6868b5c70ae..5300ab935a340 100644 --- a/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py @@ -41,7 +41,8 @@ 'unpool', \ 'yolov3_loss', \ 'inverse', \ - 'bilateral_slice' + 'bilateral_slice',\ + 'cudnn_lstm' ] NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST = ['bilinear_interp'] From f3ea615673f13323eec89ccb6a50c759c9909201 Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Wed, 19 Aug 2020 11:31:29 +0800 Subject: [PATCH 15/37] [Dy2Stat] Fix bug: unwrap func in dy2Stat. (#26279) --- .../dygraph/dygraph_to_static/origin_info.py | 14 +------ .../dygraph_to_static/program_translator.py | 40 +++++++++++-------- .../fluid/dygraph/dygraph_to_static/utils.py | 16 ++++++++ 3 files changed, 41 insertions(+), 29 deletions(-) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py index aeece9513b577..13f38b0726c27 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/origin_info.py @@ -18,8 +18,8 @@ import inspect import gast - from paddle.fluid import core +from paddle.fluid.dygraph.dygraph_to_static.utils import unwrap from paddle.fluid.framework import Program # NOTE(liym27): Please use `getattr(ast_node, ORIGI_INFO)` instead of . operation to get the original information of ast node. @@ -197,18 +197,6 @@ def attach_origin_info(ast_node, func): return ast_node -# NOTE: inspect.unwrap() exits in PY3 but not in PY2. -def unwrap(func): - def _is_wrapped(f): - return hasattr(f, '__wrapped__') - - unwrapped_f = func - while (_is_wrapped(unwrapped_f)): - unwrapped_f = unwrapped_f.__wrapped__ - - return unwrapped_f - - def ast_walk(transformed_node, static_node): """ Recursively yield all descendant nodes in the trees starting at transformed_node and static_node (including itself) in parallel. diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py index 88562dd40a63b..ceacba25375c6 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/program_translator.py @@ -13,32 +13,38 @@ # limitations under the License. from __future__ import print_function -import gast + +import collections import inspect -import warnings import textwrap import threading -import collections +import warnings + +import gast import numpy as np -from paddle.fluid import core, scope_guard -from paddle.fluid import framework +from paddle.fluid import core from paddle.fluid import executor +from paddle.fluid import framework +from paddle.fluid import scope_guard from paddle.fluid import unique_name +from paddle.fluid.data_feeder import check_type from paddle.fluid.dygraph import layers -from paddle.fluid.layers.utils import flatten -from paddle.fluid.layers.utils import pack_sequence_as +from paddle.fluid.dygraph.base import param_guard from paddle.fluid.dygraph.base import switch_to_static_graph from paddle.fluid.dygraph.dygraph_to_static.ast_transformer import DygraphToStaticAst +from paddle.fluid.dygraph.dygraph_to_static.error import ERROR_DATA +from paddle.fluid.dygraph.dygraph_to_static.error import attach_error_data +from paddle.fluid.dygraph.dygraph_to_static.origin_info import attach_origin_info +from paddle.fluid.dygraph.dygraph_to_static.origin_info import create_and_update_origin_info_map +from paddle.fluid.dygraph.dygraph_to_static.origin_info import update_op_callstack_with_origin_info +from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from +from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code from paddle.fluid.dygraph.dygraph_to_static.utils import func_to_source_code -from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_func +from paddle.fluid.dygraph.dygraph_to_static.utils import unwrap +from paddle.fluid.layers.utils import flatten +from paddle.fluid.layers.utils import pack_sequence_as from paddle.fluid.wrapped_decorator import signature_safe_contextmanager -from paddle.fluid.dygraph.base import param_guard -from paddle.fluid.data_feeder import check_type -from paddle.fluid.dygraph.dygraph_to_static.partial_program import partial_program_from -from paddle.fluid.dygraph.dygraph_to_static.origin_info import attach_origin_info, create_and_update_origin_info_map -from paddle.fluid.dygraph.dygraph_to_static.origin_info import update_op_callstack_with_origin_info -from paddle.fluid.dygraph.dygraph_to_static.error import attach_error_data, ERROR_DATA __all__ = ['ProgramTranslator', 'convert_to_static'] @@ -89,7 +95,7 @@ def foo(x, y): """ # Note: In Python2, it will raise OSError when inspect function # with decorator directly and function.__wrapped__ holds the actual function. - func = getattr(func, '__wrapped__', func) + func = unwrap(func) source_code = func_to_source_code(func) # TODO(liym27): @@ -669,7 +675,9 @@ def func(x): dygraph_func ), "Input dygraph_func is not a callable in ProgramTranslator.get_code" # Gets AST from dygraph function - raw_code = inspect.getsource(dygraph_func) + + unwrap_func = unwrap(dygraph_func) + raw_code = inspect.getsource(unwrap_func) code = textwrap.dedent(raw_code) root = gast.parse(code) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/utils.py b/python/paddle/fluid/dygraph/dygraph_to_static/utils.py index def201cedc242..58cad1cfa4272 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/utils.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/utils.py @@ -1052,3 +1052,19 @@ def _parse_multi_target_assign(self, node): value_node = target return new_nodes + + +# NOTE: inspect.unwrap() exits in PY3 but not in PY2. +def unwrap(func): + """ + Returns the object wrapped by decorators. + """ + + def _is_wrapped(f): + return hasattr(f, '__wrapped__') + + unwrapped_f = func + while (_is_wrapped(unwrapped_f)): + unwrapped_f = unwrapped_f.__wrapped__ + + return unwrapped_f From 56890dc7292ccca3d96a3d17e753bf21e007fb77 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Wed, 19 Aug 2020 11:34:09 +0800 Subject: [PATCH 16/37] Add SyncBatchNorm (#26032) * add SyncBatchNorm,test=develop --- paddle/fluid/pybind/op_function_generator.cc | 4 + python/paddle/fluid/dygraph/nn.py | 216 +++++++++++++++++- .../fluid/tests/unittests/CMakeLists.txt | 1 + .../parallel_dygraph_sync_batch_norm.py | 108 +++++++++ .../fluid/tests/unittests/test_layers.py | 18 ++ .../test_parallel_dygraph_sync_batch_norm.py | 40 ++++ .../unittests/test_sync_batch_norm_op.py | 18 ++ python/paddle/nn/__init__.py | 1 + python/paddle/nn/layer/__init__.py | 1 + python/paddle/nn/layer/norm.py | 4 +- 10 files changed, 409 insertions(+), 2 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py create mode 100644 python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index 89770ccc8cec1..a770275bf08c0 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -57,6 +57,9 @@ std::map> op_outs_map = { {"batch_norm", {"Y", "MeanOut", "VarianceOut", "SavedMean", "SavedVariance", "ReserveSpace"}}, + {"sync_batch_norm", + {"Y", "MeanOut", "VarianceOut", "SavedMean", "SavedVariance", + "ReserveSpace"}}, }; // NOTE(zhiqiu): Commonly, the outputs in auto-generated OP function are @@ -76,6 +79,7 @@ std::map> op_passing_outs_map = { {"ParamOut", "Moment1Out", "Moment2Out", "Beta1PowOut", "Beta2PowOut"}}, {"momentum", {"ParamOut", "VelocityOut"}}, {"batch_norm", {"MeanOut", "VarianceOut"}}, + {"sync_batch_norm", {"MeanOut", "VarianceOut"}}, {"accuracy", {"Correct", "Total"}}, {"fill_constant", {"Out"}}, {"matmul", {"Out"}}, diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index a7ffc2dd63ae6..45744841fc5be 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -35,7 +35,7 @@ 'Conv2D', 'Conv3D', 'Pool2D', 'Linear', 'BatchNorm', 'Dropout', 'Embedding', 'GRUUnit', 'InstanceNorm', 'LayerNorm', 'NCE', 'PRelu', 'BilinearTensorProduct', 'Conv2DTranspose', 'Conv3DTranspose', 'GroupNorm', - 'SpectralNorm', 'TreeConv', 'Flatten' + 'SpectralNorm', 'TreeConv', 'Flatten', 'SyncBatchNorm' ] @@ -3202,6 +3202,220 @@ def forward(self, nodes_vector, edge_set): return self._helper.append_activation(pre_activation, act=self._act) +class SyncBatchNorm(layers.Layer): + """ + This interface is used to construct a callable object of the ``SyncBatchNorm`` class. + It implements the function of the Cross-GPU Synchronized Batch Normalization Layer, and can + be used as a normalizer function for other operations, such as conv2d and fully connected + operations. + The data is normalized by the mean and variance of the channel based on whole mini-batch + , which including data in all gpus. + Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing + Internal Covariate Shift `_ + for more details. + + When model in training mode, the :math:`\\mu_{\\beta}` + and :math:`\\sigma_{\\beta}^{2}` are the statistics of whole mini-batch data in all gpus. + Calculated as follows: + + .. math:: + + \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ + \ mini-batch\ mean \\\\ + \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ + \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ + + - :math:`x` : whole mini-batch data in all gpus + - :math:`m` : the size of the whole mini-batch data + + When model in evaluation mode, the :math:`\\mu_{\\beta}` + and :math:`\\sigma_{\\beta}^{2}` are global statistics (moving_mean and moving_variance, + which usually got from the pre-trained model). Global statistics calculated as follows: + + .. math:: + moving\_mean = moving\_mean * momentum + \mu_{\beta} * (1. - momentum) \quad &// global mean \\ + moving\_variance = moving\_variance * momentum + \sigma_{\beta}^{2} * (1. - momentum) \quad &// global variance \\ + + The formula of normalization is as follows: + + .. math:: + + \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ + \\sigma_{\\beta}^{2} + \\eps}} \\qquad &//\ normalize \\\\ + y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift + + - :math:`\\eps` : add a smaller value to the variance to prevent division by zero + - :math:`\\gamma` : trainable scale parameter vector + - :math:`\\beta` : trainable shift parameter vector + + Parameters: + num_features(int): Indicate the number of channels of the input ``Tensor``. + epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5. + momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. + weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale` + of this layer. If it is set to None or one attribute of ParamAttr, this layerr + will create ParamAttr as param_attr. If the Initializer of the param_attr + is not set, the parameter is initialized with Xavier. If it is set to False, + this layer will not have trainable scale parameter. Default: None. + bias_attr(ParamAttr|bool, optional): The parameter attribute for the bias of this layer. + If it is set to None or one attribute of ParamAttr, this layer + will create ParamAttr as bias_attr. If the Initializer of the bias_attr + is not set, the bias is initialized zero. If it is set to False, this layer will not + have trainable bias parameter. Default: None. + track_running_stats(bool, optional): Whether to compute global stats, which including running mean and + running variance. Default: True. + + Returns: + None + + Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + + x = np.array([[[[0.3, 0.4], [0.3, 0.07]], [[0.83, 0.37], [0.18, 0.93]]]]).astype('float32') + paddle.disable_static() + x = paddle.to_tensor(x) + if paddle.fluid.is_compiled_with_cuda(): + sync_batch_norm = nn.SyncBatchNorm(2) + hidden1 = sync_batch_norm(x) + print(hidden1.numpy()) + # [[[[0.26824948, 1.0936325],[0.26824948, -1.6301316]],[[ 0.8095662, -0.665287],[-1.2744656, 1.1301866 ]]]] + """ + + def __init__(self, + num_features, + epsilon=1e-05, + momentum=0.9, + track_running_stats=True, + weight_attr=None, + bias_attr=None, + data_format='NCHW', + name=None): + super(SyncBatchNorm, self).__init__() + self._weight_attr = weight_attr + self._bias_attr = bias_attr + self._num_features = num_features + self._data_layout = data_format + self._momentum = momentum + self._epsilon = epsilon + self._track_running_stats = track_running_stats + + if self._track_running_stats == False: + logging.warn( + "moving mean and moving variance will be calculated whether `track_running_stats` is set to `True` or `False`, we will fix it in the next version." + ) + + param_shape = [self._num_features] + + # create parameter + if weight_attr == False: + self.weight = self.create_parameter( + attr=None, shape=param_shape, default_initializer=Constant(1.0)) + self.weight.stop_gradient = True + else: + self.weight = self.create_parameter( + attr=self._weight_attr, + shape=param_shape, + default_initializer=Constant(1.0)) + self.weight.stop_gradient = self._weight_attr != None and self._weight_attr.learning_rate == 0. + + if bias_attr == False: + self.bias = self.create_parameter( + attr=None, + shape=param_shape, + default_initializer=Constant(0.0), + is_bias=True) + self.bias.stop_gradient = True + else: + self.bias = self.create_parameter( + attr=self._bias_attr, shape=param_shape, is_bias=True) + self.bias.stop_gradient = self._weight_attr != None and self._weight_attr.learning_rate == 0. + + self._mean = self.create_parameter( + attr=ParamAttr( + name=None, + initializer=Constant(0.0), + trainable=False, + do_model_average=True), + shape=param_shape, + dtype=self._dtype) + self._mean.stop_gradient = True + + self._variance = self.create_parameter( + attr=ParamAttr( + name=None, + initializer=Constant(1.0), + trainable=False, + do_model_average=True), + shape=param_shape, + dtype=self._dtype) + self._variance.stop_gradient = True + + def forward(self, x): + # create output + # mean and mean_out share the same memory + mean_out = self._mean + # variance and variance out share the same memory + variance_out = self._variance + + ### train mode: use mini-batch stats, eval mode: use global stats + if in_dygraph_mode(): + attrs = ("momentum", self._momentum, "epsilon", self._epsilon, + "is_test", not self.training, "data_layout", + self._data_layout, "use_mkldnn", False, "fuse_with_relu", + False, "use_global_stats", not self.training, + 'trainable_statistics', False) + sync_batch_norm_out, _, _, _, _, _ = core.ops.sync_batch_norm( + x, self.weight, self.bias, self._mean, self._variance, mean_out, + variance_out, *attrs) + + return sync_batch_norm_out + + check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], + 'BatchNorm') + + attrs = { + "momentum": self._momentum, + "epsilon": self._epsilon, + "is_test": not self.training, + "data_layout": self._data_layout, + "use_mkldnn": False, + "fuse_with_relu": False, + "use_global_stats": not self.training, + "trainable_statistics": False, + } + + inputs = { + "X": [x], + "Scale": [self.weight], + "Bias": [self.bias], + "Mean": [self._mean], + "Variance": [self._variance] + } + + saved_mean = self._helper.create_variable_for_type_inference( + dtype=self._dtype, stop_gradient=True) + saved_variance = self._helper.create_variable_for_type_inference( + dtype=self._dtype, stop_gradient=True) + sync_batch_norm_out = self._helper.create_variable_for_type_inference( + self._dtype) + + outputs = { + "Y": [sync_batch_norm_out], + "MeanOut": [mean_out], + "VarianceOut": [variance_out], + "SavedMean": [saved_mean], + "SavedVariance": [saved_variance] + } + + self._helper.append_op( + type="sync_batch_norm", inputs=inputs, outputs=outputs, attrs=attrs) + return sync_batch_norm_out + + class Flatten(layers.Layer): """ :alias_main: paddle.nn.Flatten diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 05f5d174f8774..126b4465eae48 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -106,6 +106,7 @@ if (NOT ${WITH_GPU}) list(REMOVE_ITEM TEST_OPS test_parallel_dygraph_se_resnext) LIST(REMOVE_ITEM TEST_OPS test_parallel_dygraph_sparse_embedding) LIST(REMOVE_ITEM TEST_OPS test_parallel_dygraph_transformer) + LIST(REMOVE_ITEM TEST_OPS test_parallel_dygraph_sync_batch_norm) LIST(REMOVE_ITEM TEST_OPS test_imperative_auto_mixed_precision) elseif(${CUDNN_VERSION} VERSION_LESS 7100) LIST(REMOVE_ITEM TEST_OPS test_conv2d_fusion_op) diff --git a/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py new file mode 100644 index 0000000000000..5e2059592b517 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/parallel_dygraph_sync_batch_norm.py @@ -0,0 +1,108 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +import contextlib +import unittest +import numpy as np +import six +import pickle + +import paddle +import paddle.fluid as fluid +import paddle.fluid.dygraph as dygraph +from paddle.fluid import core +from paddle.fluid.optimizer import SGDOptimizer +from paddle.nn import Conv2D, Pool2D, Linear, SyncBatchNorm +from paddle.fluid.dygraph.base import to_variable + +from test_dist_base import runtime_main, TestParallelDyGraphRunnerBase + + +class TestLayer(fluid.dygraph.Layer): + def __init__(self, + num_channels, + num_filters, + filter_size, + stride=1, + groups=1, + act=None): + super(TestLayer, self).__init__() + + self._conv = Conv2D( + num_channels=num_channels, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False) + + self._sync_batch_norm = SyncBatchNorm(num_filters) + + self._conv2 = Conv2D( + num_channels=num_filters, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) // 2, + groups=groups, + act=None, + bias_attr=False) + + self._sync_batch_norm2 = SyncBatchNorm( + num_filters, + weight_attr=False, + bias_attr=False, + track_running_stats=False) + + def forward(self, inputs): + y = self._conv(inputs) + y = self._sync_batch_norm(y) + y = self._conv2(y) + y = self._sync_batch_norm2(y) + + return y + + +class TestSyncBatchNorm(TestParallelDyGraphRunnerBase): + def get_model(self): + model = TestLayer(3, 64, 7) + train_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), + batch_size=32, + drop_last=True) + opt = fluid.optimizer.Adam( + learning_rate=1e-3, parameter_list=model.parameters()) + return model, train_reader, opt + + def run_one_loop(self, model, opt, data): + batch_size = len(data) + dy_x_data = np.array([x[0].reshape(3, 224, 224) + for x in data]).astype('float32') + img = to_variable(dy_x_data) + img.stop_gradient = False + + out = model(img) + + out = fluid.layers.mean(out) + + return out + + +if __name__ == "__main__": + runtime_main(TestSyncBatchNorm) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 9da70e85f01c0..91186b2e95ae0 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -283,6 +283,24 @@ def test_layer_norm(self): with self.assertRaises(ValueError): lm(base.to_variable(inp)) + def test_SyncBatchNorm(self): + if core.is_compiled_with_cuda(): + with self.static_graph(): + t = layers.data(name='t', shape=[-1, 3, 5, 5], dtype='float32') + my_sync_bn = nn.SyncBatchNorm(3) + ret = my_sync_bn(t) + static_ret = self.get_static_graph_result( + feed={'t': np.ones( + [3, 3, 5, 5], dtype='float32')}, + fetch_list=[ret])[0] + + with self.dynamic_graph(): + t = np.ones([3, 3, 5, 5], dtype='float32') + my_syncbn = paddle.nn.SyncBatchNorm(3) + dy_ret = my_syncbn(base.to_variable(t)) + dy_ret_value = dy_ret.numpy() + self.assertTrue(np.array_equal(static_ret, static_ret)) + def test_relu(self): with self.static_graph(): t = layers.data(name='t', shape=[3, 3], dtype='float32') diff --git a/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py new file mode 100644 index 0000000000000..5c34b35fc83a3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_dygraph_sync_batch_norm.py @@ -0,0 +1,40 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +from test_dist_base import TestDistBase +import paddle.fluid as fluid + +import os +flag_name = os.path.splitext(__file__)[0] + + +class TestParallelDygraphMnist(TestDistBase): + def _setup_config(self): + self._sync_mode = False + self._nccl2_mode = True + self._dygraph = False #True + + def test_mnist(self): + if fluid.core.is_compiled_with_cuda(): + self.check_with_place( + "parallel_dygraph_sync_batch_norm.py", + delta=1e-5, + check_error_log=True, + log_name=flag_name) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py index 8fd118c019303..806b6b90e7e2d 100644 --- a/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_sync_batch_norm_op.py @@ -25,6 +25,7 @@ import paddle.fluid.core as core import paddle.fluid as fluid from paddle.fluid import compiler +from paddle.fluid import Program, program_guard from op_test import OpTest, _set_use_system_allocator @@ -202,5 +203,22 @@ def setUp(self): self.atol = 1e-2 +class TestDygraphSyncBatchNormAPIError(unittest.TestCase): + def test_errors(self): + if not core.is_compiled_with_cuda(): + return + + with program_guard(Program(), Program()): + my_sync_batch_norm = fluid.dygraph.SyncBatchNorm(10) + x1 = fluid.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CUDAPlace(0)) + self.assertRaises(TypeError, my_sync_batch_norm, x1) + + # the input dtype of SyncBatchNorm must be float16 or float32 or float64 + # float16 only can be set on GPU place + x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32") + self.assertRaises(TypeError, my_sync_batch_norm, x2) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 6249ce59e21d3..7b6dcdf7f67de 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -92,6 +92,7 @@ from .layer.loss import KLDivLoss #DEFINE_ALIAS from .layer.loss import MarginRankingLoss #DEFINE_ALIAS from .layer.norm import BatchNorm #DEFINE_ALIAS +from .layer.norm import SyncBatchNorm #DEFINE_ALIAS from .layer.norm import GroupNorm #DEFINE_ALIAS from .layer.norm import LayerNorm #DEFINE_ALIAS from .layer.norm import SpectralNorm #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index 9fb8ea78a16ab..f64252da5428a 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -65,6 +65,7 @@ from .loss import KLDivLoss #DEFINE_ALIAS from .loss import MarginRankingLoss #DEFINE_ALIAS from .norm import BatchNorm #DEFINE_ALIAS +from .norm import SyncBatchNorm #DEFINE_ALIAS from .norm import GroupNorm #DEFINE_ALIAS from .norm import LayerNorm #DEFINE_ALIAS from .norm import SpectralNorm #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 1beba62c1809f..1d00f9c7b8b02 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -20,7 +20,9 @@ from ...fluid.dygraph import GroupNorm #DEFINE_ALIAS from ...fluid.dygraph import LayerNorm #DEFINE_ALIAS from ...fluid.dygraph import SpectralNorm #DEFINE_ALIAS +from ...fluid.dygraph import SyncBatchNorm #DEFINE_ALIAS __all__ = [ - 'BatchNorm', 'GroupNorm', 'LayerNorm', 'SpectralNorm', 'InstanceNorm' + 'BatchNorm', 'GroupNorm', 'LayerNorm', 'SpectralNorm', 'InstanceNorm', + 'SyncBatchNorm' ] From c45481d7be71e90c194c615b61db411463d43349 Mon Sep 17 00:00:00 2001 From: Kaipeng Deng Date: Wed, 19 Aug 2020 11:40:49 +0800 Subject: [PATCH 17/37] add Sampler/SequenceSampler/RandomSampler (#26375) * add Sampler/SequenceSampler/RandomSampler. test=develop --- python/paddle/fluid/dataloader/__init__.py | 6 +- .../paddle/fluid/dataloader/batch_sampler.py | 79 +++--- python/paddle/fluid/dataloader/sampler.py | 232 ++++++++++++++++++ .../tests/unittests/test_batch_sampler.py | 78 +++++- python/paddle/io/__init__.py | 6 +- 5 files changed, 349 insertions(+), 52 deletions(-) create mode 100644 python/paddle/fluid/dataloader/sampler.py diff --git a/python/paddle/fluid/dataloader/__init__.py b/python/paddle/fluid/dataloader/__init__.py index 2f15811e4f360..597f1f217483c 100644 --- a/python/paddle/fluid/dataloader/__init__.py +++ b/python/paddle/fluid/dataloader/__init__.py @@ -23,6 +23,10 @@ from . import dataloader_iter from .dataloader_iter import * +from . import sampler +from .sampler import * + __all__ = dataset.__all__ \ + batch_sampler.__all__ \ - + dataloader_iter.__all__ + + dataloader_iter.__all__ \ + + sampler.__all__ diff --git a/python/paddle/fluid/dataloader/batch_sampler.py b/python/paddle/fluid/dataloader/batch_sampler.py index 811468c523b2f..8043237c0d97d 100644 --- a/python/paddle/fluid/dataloader/batch_sampler.py +++ b/python/paddle/fluid/dataloader/batch_sampler.py @@ -16,12 +16,13 @@ from __future__ import division import numpy as np +from .sampler import Sampler, SequenceSampler from .dataset import Dataset, IterableDataset __all__ = ["BatchSampler"] -class BatchSampler(object): +class BatchSampler(Sampler): """ A base implement of batch sampler used by `paddle.io.DataLoader` which yield mini-batch indices(a list/tuple with length as @@ -41,10 +42,11 @@ class BatchSampler(object): implement or other python object which implemented :code:`__len__` for BatchSampler to get indices as the range of :attr:`dataset` length. Default None. - indices (list|tuple): a substitution parameter for - :attr:`dataset` either :attr:`dataset` or - :attr:`indices` should be set, give the whole - indices to sampler from directly. Default None. + sampler (Sampler): this could be a :code:`paddle.io.Dataset` + instance which implemented :code:`__iter__` to yield + sample indices. :attr:`sampler` and :attr:`dataset` + can not be set in the same time. If :attr:`sampler` + is set, :attr:`shuffle` should not be set. Default None. shuffle(bool): whether to shuffle indices order before genrating batch indices. Default False. batch_size(int): sample indice number in a mini-batch indices. @@ -58,16 +60,7 @@ class BatchSampler(object): .. code-block:: python - from paddle.io import BatchSampler, Dataset - - # init with indices - bs = BatchSampler(indices=list(range(100)), - shuffle=True, - batch_size=8, - drop_last=True) - - for batch_indices in bs: - print(batch_indices) + from paddle.io import RandomSampler, BatchSampler, Dataset # init with dataset class RandomDataset(Dataset): @@ -90,34 +83,42 @@ def __len__(self): for batch_indices in bs: print(batch_indices) + # init with sampler + sampler = RandomSampler(RandomDataset(100)) + bs = BatchSampler(sampler=sampler, + shuffle=True, + batch_size=8, + drop_last=True) + + for batch_indices in bs: + print(batch_indices) + + see `paddle.io.DataLoader` """ def __init__(self, dataset=None, - indices=None, + sampler=None, shuffle=False, batch_size=1, drop_last=False): if dataset is None: - assert indices is not None, \ - "either dataset or indices should be set" - assert isinstance(indices, list) or isinstance(indices, tuple), \ - "indices should be a list or tuple, but got {}".format(type(indices)) - self.indices = indices - self.sampler_iter = None + assert sampler is not None, \ + "either dataset or sampler should be set" + assert isinstance(sampler, Sampler), \ + "sampler should be a paddle.io.Sampler, but got {}".format(type(sampler)) + assert not shuffle, "shuffle should be False when sampler is set" + self.sampler = sampler else: - if isinstance(dataset, IterableDataset): - self.sampler_iter = iter( - _InfiniteIterableSampler(dataset, batch_size)) - else: - self.sampler_iter = None - assert isinstance(dataset, Dataset), \ - "dataset should be an instance of paddle.io.Dataset" - assert indices is None, \ - "should not set both dataset and indices" - self.indices = list(range(len(dataset))) + assert isinstance(dataset, Dataset), \ + "dataset should be a paddle.io.Dataset" + assert not isinstance(dataset, IterableDataset), \ + "dataset should not be a paddle.io.IterableDataset" + assert sampler is None, \ + "should not set both dataset and sampler" + self.sampler = SequenceSampler(dataset) assert isinstance(batch_size, int) and batch_size > 0, \ "batch_size should be a positive integer, but got {}".format(batch_size) @@ -130,15 +131,8 @@ def __init__(self, self.drop_last = drop_last def __iter__(self): - if self.sampler_iter: - yield next(self.sampler_iter) - - if self.shuffle: - np.random.shuffle(self.indices) - _iter = iter(self.indices) - batch_indices = [] - for idx in _iter: + for idx in self.sampler: batch_indices.append(idx) if len(batch_indices) == self.batch_size: yield batch_indices @@ -147,10 +141,7 @@ def __iter__(self): yield batch_indices def __len__(self): - if self.sampler_iter: - raise RuntimeError("'{}' should not be called for IterableDataset". - format('__len__')) - num_samples = len(self.indices) + num_samples = len(self.sampler) num_samples += int(not self.drop_last) * (self.batch_size - 1) return num_samples // self.batch_size diff --git a/python/paddle/fluid/dataloader/sampler.py b/python/paddle/fluid/dataloader/sampler.py new file mode 100644 index 0000000000000..d2f3231cc6b12 --- /dev/null +++ b/python/paddle/fluid/dataloader/sampler.py @@ -0,0 +1,232 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +from __future__ import division + +import numpy as np + +__all__ = ["Sampler", "SequenceSampler", "RandomSampler"] + + +class Sampler(object): + """ + An abstract class to encapsulate methods and behaviors of samplers. + + All sampler used by :code:`paddle.io.BatchSampler` should be a subclass + of :code:`paddle.io.Sampler`, BatchSampler subclasses should + implement following methods: + + :code:`__iter__`: return sample index iterably, which iterate over indices + of dataset elements + + :code:`__len__`: the number of sample in :attr:`data_source` + + + Args: + data_source(Dataset, optional): this could be an instance of + :code:`paddle.io.Dataset` other Python object which + implemented :code:`__len__` for Sampler to get indices + as the range of :attr:`dataset` length. Default None. + + Returns: + Sampler: an iterable object for sample indices iterating + + Examples: + + .. code-block:: python + + from paddle.io import Dataset, Sampler + + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + class MySampler(Sampler): + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source) + + sampler = MySampler(data_source=RandomDataset(100)) + + for index in sampler: + print(index) + + see `paddle.io.BatchSampler` + see `paddle.io.DataLoader` + + """ + + def __init__(self, data_source=None): + self.data_source = data_source + + def __iter__(self): + raise NotImplementedError + + # Not define __len__ method in this base class here for __len__ + # is not needed in same sence, e.g. paddle.io.IterableDataset + + +class SequenceSampler(Sampler): + """ + Iterate samples sequentially, yield :code:`0, 1, 2, ..., len(data_source) -1` + generally, + + Args: + data_source(Dataset): dataset to sample, this could be an + instance of :code:`paddle.io.Dataset` other Python + object which implemented :code:`__len__`. + + Returns: + Sampler: a Sampler yield sample index sequentially + + Examples: + + .. code-block:: python + + from paddle.io import Dataset, SequenceSampler + + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + sampler = SequenceSampler(data_source=RandomDataset(100)) + + for index in sampler: + print(index) + + see `paddle.io.Sampler` + """ + + def __init__(self, data_source): + self.data_source = data_source + + def __iter__(self): + return iter(range(len(self.data_source))) + + def __len__(self): + return len(self.data_source) + + +class RandomSampler(Sampler): + """ + Iterate samples randomly, yield shuffled indices, if :attr:`replacement=False`, + yield shuffled indices of the whole data souce, if :attr:`replacement=True`, + :attr:`num_samples` can set to specify the sample number to draw. + + Args: + data_source(Dataset): dataset to sample, this could be an + instance of :code:`paddle.io.Dataset` other Python + object which implemented :code:`__len__`. + replacement(bool): If False, sample the whole dataset, If False, + set :attr:`num_samples` for how many sample to draw. Default False. + num_samples(int): set sample number to draw if :attr:`replacement` + is True. Default None. + generator(Generator): specify a generator to sample the data source. Default None + + Returns: + Sampler: a Sampler yield sample index randomly + + Examples: + + .. code-block:: python + + from paddle.io import Dataset, RandomSampler + + class RandomDataset(Dataset): + def __init__(self, num_samples): + self.num_samples = num_samples + + def __getitem__(self, idx): + image = np.random.random([784]).astype('float32') + label = np.random.randint(0, 9, (1, )).astype('int64') + return image, label + + def __len__(self): + return self.num_samples + + sampler = RandomSampler(data_souce=RandomDataset(100)) + + for index in sampler: + print(index) + + see `paddle.io.Sampler` + """ + + def __init__(self, + data_source, + replacement=False, + num_samples=None, + generator=None): + self.data_source = data_source + self.replacement = replacement + self._num_samples = num_samples + self.generator = generator + + if not isinstance(self.replacement, bool): + raise TypeError("expect boolean value for replacement, but got " + "replacement={}".format(self.replacement)) + + if self._num_samples is not None and not replacement: + raise ValueError( + "num_samples should not be specified while replacement is False") + + if not isinstance(self.num_samples, int) or self.num_samples <= 0: + raise ValueError("num_samples should be a positive integer, " + "but got num_samples={}".format(self.num_samples)) + + @property + def num_samples(self): + if self._num_samples is None: + return len(self.data_source) + return self._num_samples + + def __iter__(self): + n = len(self.data_source) + if self.generator: + for index in self.generator: + yield index + else: + if self.replacement: + for index in np.random.choice( + np.arange(n), self.num_samples, replace=True).tolist(): + yield index + else: + for index in np.random.choice( + np.arange(n), n, replace=False).tolist(): + yield index + + def __len__(self): + return self.num_samples diff --git a/python/paddle/fluid/tests/unittests/test_batch_sampler.py b/python/paddle/fluid/tests/unittests/test_batch_sampler.py index 7d90bbd0357bc..2e2a6144fd011 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_sampler.py +++ b/python/paddle/fluid/tests/unittests/test_batch_sampler.py @@ -17,7 +17,7 @@ import unittest import paddle.fluid as fluid -from paddle.io import BatchSampler, Dataset +from paddle.io import BatchSampler, Dataset, Sampler, SequenceSampler, RandomSampler class RandomDataset(Dataset): @@ -35,6 +35,60 @@ def __len__(self): return self.sample_num +class TestSampler(unittest.TestCase): + def test_main(self): + dataset = RandomDataset(100, 10) + sampler = Sampler(dataset) + try: + iter(sampler) + self.assertTrue(False) + except NotImplementedError: + pass + + +class TestSequenceSampler(unittest.TestCase): + def test_main(self): + dataset = RandomDataset(100, 10) + sampler = SequenceSampler(dataset) + assert len(sampler) == 100 + + for i, index in enumerate(iter(sampler)): + assert i == index + + +class TestRandomSampler(unittest.TestCase): + def test_main(self): + dataset = RandomDataset(100, 10) + sampler = RandomSampler(dataset) + assert len(sampler) == 100 + + rets = [] + for i in iter(sampler): + rets.append(i) + assert tuple(sorted(rets)) == tuple(range(0, 100)) + + def test_with_num_samples(self): + dataset = RandomDataset(100, 10) + sampler = RandomSampler(dataset, num_samples=50, replacement=True) + assert len(sampler) == 50 + + rets = [] + for i in iter(sampler): + rets.append(i) + assert i >= 0 and i < 100 + + def test_with_generator(self): + dataset = RandomDataset(100, 10) + generator = iter(range(0, 60)) + sampler = RandomSampler(dataset, generator=generator) + assert len(sampler) == 100 + + rets = [] + for i in iter(sampler): + rets.append(i) + assert tuple(sorted(rets)) == tuple(range(0, 60)) + + class TestBatchSampler(unittest.TestCase): def setUp(self): self.num_samples = 1000 @@ -86,16 +140,18 @@ def setUp(self): self.drop_last = True -class TestBatchSamplerWithIndices(TestBatchSampler): +class TestBatchSamplerWithSampler(TestBatchSampler): def init_batch_sampler(self): + dataset = RandomDataset(1000, 10) + sampler = SequenceSampler(dataset) bs = BatchSampler( - indices=list(range(self.num_samples)), + sampler=sampler, batch_size=self.batch_size, drop_last=self.drop_last) return bs -class TestBatchSamplerWithIndicesAndDataSource(unittest.TestCase): +class TestBatchSamplerWithSamplerDropLast(unittest.TestCase): def setUp(self): self.num_samples = 1000 self.num_classes = 10 @@ -103,12 +159,22 @@ def setUp(self): self.shuffle = False self.drop_last = True + +class TestBatchSamplerWithSamplerShuffle(unittest.TestCase): + def setUp(self): + self.num_samples = 1000 + self.num_classes = 10 + self.batch_size = 32 + self.shuffle = True + self.drop_last = True + def test_main(self): try: dataset = RandomDataset(self.num_samples, self.num_classes) + sampler = RandomSampler(dataset) bs = BatchSampler( - dataset=dataset, - indices=list(range(self.num_samples)), + sampler=sampler, + shuffle=self.shuffle, batch_size=self.batch_size, drop_last=self.drop_last) self.assertTrue(False) diff --git a/python/paddle/io/__init__.py b/python/paddle/io/__init__.py index 875f3ff2e9155..89bbd5916578b 100644 --- a/python/paddle/io/__init__.py +++ b/python/paddle/io/__init__.py @@ -20,6 +20,9 @@ # 'Transform', 'DataLoader', 'get_worker_info', + 'Sampler', + 'SequenceSampler', + 'RandomSampler', 'load', 'save', 'load_program_state', @@ -38,7 +41,8 @@ ] from ..fluid.io import DataLoader -from ..fluid.dataloader import Dataset, IterableDataset, BatchSampler, get_worker_info +from ..fluid.dataloader import Dataset, IterableDataset, BatchSampler, get_worker_info, \ + Sampler, SequenceSampler, RandomSampler from ..fluid.io import load, save, load_program_state, set_program_state, \ load_inference_model, save_inference_model, batch from ..reader import shuffle, buffered, cache, chain, firstn, compose, map_readers, xmap_readers From b7a86e92a843ff53d7be9bad0737a7169d77977d Mon Sep 17 00:00:00 2001 From: Zhaolong Xing Date: Wed, 19 Aug 2020 13:04:18 +0800 Subject: [PATCH 18/37] fix dy shape bug in trt7.1 (#26273) test=develop --- paddle/fluid/inference/tensorrt/engine.h | 7 ++++++- .../tensorrt/plugin/emb_eltwise_layernorm_plugin.cu | 12 +++++++++++- .../tensorrt/plugin/emb_eltwise_layernorm_plugin.h | 7 ++++++- .../inference/tensorrt/plugin/prelu_op_plugin.cu | 6 ++++++ .../inference/tensorrt/plugin/prelu_op_plugin.h | 5 ++++- .../tensorrt/plugin/skip_layernorm_op_plugin.h | 5 ++++- 6 files changed, 37 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index fdd71b0d88400..1a3413657ce6f 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -83,7 +83,12 @@ nvinfer1::Dims Vec2TRT_Dims(const std::vector& shape, std::string input, } else if (shape.size() == 3UL) { return nvinfer1::Dims3(shape[0], shape[1], shape[2]); } - return nvinfer1::Dims4(shape[0], shape[1], 1, 1); + nvinfer1::Dims dims; + dims.nbDims = shape.size(); + for (size_t i = 0; i < shape.size(); i++) { + dims.d[i] = shape[i]; + } + return dims; } } } // NOLINT diff --git a/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.cu index e7f9381e97137..5e43be90de3db 100644 --- a/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.cu @@ -76,6 +76,16 @@ nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions( return ret; } +template +void EmbEltwiseLayernormPluginDynamic::terminate() { + for (auto ptr : embs_gpu_) { + if (ptr) cudaFree(ptr); + } + + if (bias_gpu_) cudaFree(bias_gpu_); + if (scale_gpu_) cudaFree(scale_gpu_); +} + template bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, @@ -153,7 +163,7 @@ int EmbEltwiseLayernormPluginDynamic::enqueue( int64_t *emb_ptr_gpu_d = emb_ptr_tensor.mutable_data(platform::CUDAPlace(device_id)); - std::vector in_ptr, emb_ptr; + std::vector in_ptr, emb_ptr; for (int i = 0; i < input_num; i++) { in_ptr.push_back(reinterpret_cast(inputs[i])); emb_ptr.push_back(reinterpret_cast(embs_gpu_[i])); diff --git a/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h b/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h index 8ac611cd7c62f..5babd87db0602 100644 --- a/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h @@ -81,9 +81,13 @@ class EmbEltwiseLayernormPluginDynamic : public DynamicPluginTensorRT { } nvinfer1::IPluginV2DynamicExt* clone() const override { - return new EmbEltwiseLayernormPluginDynamic( + auto ptr = new EmbEltwiseLayernormPluginDynamic( embs_, bias_, scale_, emb_sizes_, bias_size_, scale_size_, hidden_size_, eps_); + ptr->embs_gpu_ = embs_gpu_; + ptr->bias_gpu_ = bias_gpu_; + ptr->scale_gpu_ = scale_gpu_; + return ptr; } const char* getPluginType() const override { @@ -111,6 +115,7 @@ class EmbEltwiseLayernormPluginDynamic : public DynamicPluginTensorRT { return sum_num; } + void terminate() override; void serialize(void* buffer) const override { // SerializeValue(&buffer, with_fp16_); SerializeValue(&buffer, emb_sizes_); diff --git a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu index f1e11b6fba1f1..860f1039d5e10 100644 --- a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu @@ -80,6 +80,12 @@ int PReluPlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_GE(6000) +void PReluPluginDynamic::terminate() { + if (p_gpu_weight_) { + cudaFree(p_gpu_weight_); + } +} + int PReluPluginDynamic::initialize() { cudaMalloc(&p_gpu_weight_, sizeof(float) * weight_.size()); cudaMemcpy(p_gpu_weight_, weight_.data(), weight_.size() * sizeof(float), diff --git a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h index 4756ca2e02257..3126366c5fdd8 100644 --- a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h @@ -102,12 +102,15 @@ class PReluPluginDynamic : public DynamicPluginTensorRT { } ~PReluPluginDynamic() { cudaFree(p_gpu_weight_); } nvinfer1::IPluginV2DynamicExt* clone() const override { - return new PReluPluginDynamic(weight_.data(), weight_.size(), mode_); + auto ptr = new PReluPluginDynamic(weight_.data(), weight_.size(), mode_); + ptr->p_gpu_weight_ = p_gpu_weight_; + return ptr; } const char* getPluginType() const override { return "prelu_plugin"; } int getNbOutputs() const override { return 1; } int initialize() override; + void terminate() override; size_t getSerializationSize() const override; void serialize(void* buffer) const override; diff --git a/paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h index 8fe1edc4bf032..24cd8e0368182 100644 --- a/paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/skip_layernorm_op_plugin.h @@ -51,8 +51,11 @@ class SkipLayerNormPluginDynamic : public DynamicPluginTensorRT { } nvinfer1::IPluginV2DynamicExt* clone() const override { - return new SkipLayerNormPluginDynamic( + auto ptr = new SkipLayerNormPluginDynamic( bias_.data(), scale_.data(), bias_size_, scale_size_, eps_, ban_fp16_); + ptr->bias_gpu_ = bias_gpu_; + ptr->scale_gpu_ = bias_gpu_; + return ptr; } const char* getPluginType() const override { return "skip_layernorm_plugin"; } From db272ca5ef85c0230b100510ce4b1b1fde57ae4c Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Wed, 19 Aug 2020 13:09:09 +0800 Subject: [PATCH 19/37] rename file to according to reviews. (#26236) --- python/paddle/fluid/dygraph/dygraph_to_static/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/utils.py b/python/paddle/fluid/dygraph/dygraph_to_static/utils.py index 58cad1cfa4272..6636bf7c4b405 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/utils.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/utils.py @@ -369,13 +369,14 @@ def ast_to_func(ast_root, dyfunc, delete_on_exit=True): function, the other inner functions are invisible for the decorated function. """ - def remove_file(filepath): + def remove_if_exit(filepath): if os.path.exists(filepath): os.remove(filepath) source = ast_to_source_code(ast_root) import_fluid = "import paddle.fluid as fluid\n" source = import_fluid + source + if six.PY2: source = source.encode('utf-8') f = tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) @@ -387,8 +388,8 @@ def remove_file(filepath): f.write(source) if delete_on_exit: - atexit.register(lambda: remove_file(f.name)) - atexit.register(lambda: remove_file(f.name[:-3] + ".pyc")) + atexit.register(lambda: remove_if_exit(f.name)) + atexit.register(lambda: remove_if_exit(f.name[:-3] + ".pyc")) module = imp.load_source(module_name, f.name) func_name = dyfunc.__name__ From 1b03ab3899a3a0f9f2675a838fb99a20a302c14d Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Wed, 19 Aug 2020 14:11:38 +0800 Subject: [PATCH 20/37] set opencv-python <=4.2.0.32 (#26415) --- paddle/scripts/paddle_build.sh | 3 ++- python/requirements.txt | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 4bd93da72ef68..921455d52eccc 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -580,7 +580,8 @@ function generate_api_spec() { cd ${PADDLE_ROOT}/build/.check_api_workspace virtualenv .${spec_kind}_env source .${spec_kind}_env/bin/activate - pip install ${PADDLE_ROOT}/build/python/dist/*whl + pip install -r ${PADDLE_ROOT}/python/requirements.txt + pip --no-cache-dir install ${PADDLE_ROOT}/build/python/dist/*whl spec_path=${PADDLE_ROOT}/paddle/fluid/API_${spec_kind}.spec python ${PADDLE_ROOT}/tools/print_signatures.py paddle > $spec_path diff --git a/python/requirements.txt b/python/requirements.txt index 76e6b5dedbeaf..28c84c1d630a6 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,3 +1,4 @@ +opencv-python<=4.2.0.32 requests>=2.20.0 numpy>=1.13, <=1.16.4 ; python_version<"3.5" numpy>=1.13 ; python_version>="3.5" From 029390b1d2503b5b8656cb3ca5c82324ce64e656 Mon Sep 17 00:00:00 2001 From: tianshuo78520a <707759223@qq.com> Date: Wed, 19 Aug 2020 14:37:43 +0800 Subject: [PATCH 21/37] fix ci bug (#26276) --- paddle/scripts/paddle_build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 921455d52eccc..96c3fbba42efe 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -570,6 +570,7 @@ function generate_upstream_develop_api_spec() { } function generate_api_spec() { + set -e spec_kind=$2 if [ "$spec_kind" != "PR" ] && [ "$spec_kind" != "DEV" ]; then echo "Not supported $2" From 6cd67a816071ee0f29bf64af648535b3f88ae61d Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Wed, 19 Aug 2020 17:23:03 +0800 Subject: [PATCH 22/37] [API 2.0] Fix api sum:(1)input->x;(2)dim->axis;(3)keep_dim->keepdim (#26337) * 1.Fix api sum:(1) input->sum; (2)dim->axis; (3)keep_dim->keepdim. * 2. fix bug when len(axis) == len(x.shape). --- .../fluid/tests/unittests/test_reduce_op.py | 41 +++++-- python/paddle/incubate/complex/tensor/math.py | 4 +- python/paddle/tensor/math.py | 102 ++++++++++-------- 3 files changed, 89 insertions(+), 58 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 0531da2b06ec3..6a6ce8d329cb9 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -580,10 +580,10 @@ def test_dtype4(): class API_TestSumOp(unittest.TestCase): - def test_1(self): + def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.data("data", shape=[10, 10], dtype="float32") - result_sum = paddle.sum(input=data, dim=1, dtype="float64") + result_sum = paddle.sum(x=data, axis=1, dtype="float64") place = fluid.CPUPlace() exe = fluid.Executor(place) input_data = np.random.rand(10, 10).astype(np.float32) @@ -593,7 +593,7 @@ def test_1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.data("data", shape=[10, 10], dtype="int32") - result_sum = paddle.sum(input=data, dim=1, dtype="int64") + result_sum = paddle.sum(x=data, axis=1, dtype="int64") place = fluid.CPUPlace() exe = fluid.Executor(place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int32) @@ -603,7 +603,7 @@ def test_1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.data("data", shape=[10, 10], dtype="int32") - result_sum = paddle.sum(input=data, dim=1) + result_sum = paddle.sum(x=data, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int32) @@ -612,20 +612,41 @@ def test_1(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.data("data", shape=[10, 10], dtype="int32") - result_sum = paddle.sum(input=data, dim=1) + result_sum = paddle.sum(x=data, axis=1) place = fluid.CPUPlace() exe = fluid.Executor(place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int32) res, = exe.run(feed={"data": input_data}, fetch_list=[result_sum]) self.assertEqual((res == np.sum(input_data, axis=1)).all(), True) + with fluid.program_guard(fluid.Program(), fluid.Program()): + input_data = np.random.randint(10, size=(5, 5, 5)).astype(np.int32) + data = fluid.data("data", shape=[5, 5, 5], dtype="int32") + sum1 = paddle.sum(x=data, axis=[0, 1]) + sum2 = paddle.sum(x=data, axis=()) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + res1, res2 = exe.run(feed={"data": input_data}, + fetch_list=[sum1, sum2]) + + self.assertEqual((res1 == np.sum(input_data, axis=(0, 1))).all(), True) + self.assertEqual( + (res2 == np.sum(input_data, axis=(0, 1, 2))).all(), True) + + def test_dygraph(self): + np_x = np.random.random([2, 3, 4]).astype('int32') with fluid.dygraph.guard(): - np_x = np.array([10, 10]).astype('float64') x = fluid.dygraph.to_variable(np_x) - z = paddle.sum(x, dim=0) - np_z = z.numpy() - z_expected = np.array(np.sum(np_x, axis=0)) - self.assertEqual((np_z == z_expected).all(), True) + out0 = paddle.sum(x).numpy() + out1 = paddle.sum(x, axis=0).numpy() + out2 = paddle.sum(x, axis=(0, 1)).numpy() + out3 = paddle.sum(x, axis=(0, 1, 2)).numpy() + + self.assertTrue((out0 == np.sum(np_x, axis=(0, 1, 2))).all()) + self.assertTrue((out1 == np.sum(np_x, axis=0)).all()) + self.assertTrue((out2 == np.sum(np_x, axis=(0, 1))).all()) + self.assertTrue((out3 == np.sum(np_x, axis=(0, 1, 2))).all()) class API_TestReduceMeanOp(unittest.TestCase): diff --git a/python/paddle/incubate/complex/tensor/math.py b/python/paddle/incubate/complex/tensor/math.py index 231cbd9182812..465e4887a1f8a 100644 --- a/python/paddle/incubate/complex/tensor/math.py +++ b/python/paddle/incubate/complex/tensor/math.py @@ -330,8 +330,8 @@ def sum(input, dim=None, keep_dim=False, name=None): """ complex_variable_exists([input], "sum") - real = math.sum(input.real, dim=dim, keep_dim=keep_dim, name=name) - imag = math.sum(input.imag, dim=dim, keep_dim=keep_dim, name=name) + real = math.sum(input.real, axis=dim, keepdim=keep_dim, name=name) + imag = math.sum(input.imag, axis=dim, keepdim=keep_dim, name=name) return ComplexVariable(real, imag) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 14084b8295bbc..50f6757452648 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -532,75 +532,84 @@ def minimum(x, y, axis=-1, name=None): }) + """\n""" + str(func.__doc__) -def sum(input, dim=None, dtype=None, keep_dim=False, name=None): +def sum(x, axis=None, dtype=None, keepdim=False, name=None): """ - :alias_main: paddle.sum - :alias: paddle.sum,paddle.tensor.sum,paddle.tensor.math.sum - Computes the sum of tensor elements over the given dimension. Args: - input (Variable): The input variable which is a Tensor, the data type is float32, - float64, int32, int64. - dim (list|int, optional): The dimensions along which the sum is performed. If - :attr:`None`, sum all elements of :attr:`input` and return a + x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64. + axis (int|list|tuple, optional): The dimensions along which the sum is performed. If + :attr:`None`, sum all elements of :attr:`x` and return a Tensor variable with a single element, otherwise must be in the - range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, - the dimension to reduce is :math:`rank + dim[i]`. - dtype(str, optional): The dtype of output tensor. The default value is None, the dtype - of output is the same as input tensor. - keep_dim (bool, optional): Whether to reserve the reduced dimension in the - output Tensor. The result tensor will have one fewer dimension - than the :attr:`input` unless :attr:`keep_dim` is true, default + range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`, + the dimension to reduce is :math:`rank + axis[i]`. + dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype + of output is the same as input Tensor `x`. + keepdim (bool, optional): Whether to reserve the reduced dimension in the + output Tensor. The result Tensor will have one fewer dimension + than the :attr:`x` unless :attr:`keepdim` is true, default value is False. - name(str, optional): The default value is None. Normally there is no need for + name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: - Variable: Tensor, results of summation operation on the specified dim of input tensor, - it's data type is the same as input's Tensor. + Tensor: Results of summation operation on the specified axis of input Tensor `x`, + it's data type is the same as `x`. Raises: - ValueError, the :attr:`dtype` must be float64 or int64. + ValueError: The :attr:`dtype` must be float64 or int64. + TypeError: The type of :attr:`axis` must be int, list or tuple. Examples: .. code-block:: python + import numpy as np import paddle - import paddle.fluid as fluid + paddle.disable_static() + # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] # Each example is followed by the corresponding output tensor. - x = fluid.data(name='x', shape=[2, 4], dtype='float32') + x_data = np.array([[0.2, 0.3, 0.5, 0.9],[0.1, 0.2, 0.6, 0.7]]).astype('float32') + x = paddle.to_variable(x_data) out1 = paddle.sum(x) # [3.5] - out2 = paddle.sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] - out3 = paddle.sum(x, dim=-1) # [1.9, 1.6] - out4 = paddle.sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] + out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6] + out3 = paddle.sum(x, axis=-1) # [1.9, 1.6] + out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]] # y is a Tensor variable with shape [2, 2, 2] and elements as below: # [[[1, 2], [3, 4]], # [[5, 6], [7, 8]]] # Each example is followed by the corresponding output tensor. - y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32') - out5 = paddle.sum(y, dim=[1, 2]) # [10, 26] - out6 = paddle.sum(y, dim=[0, 1]) # [16, 20] - + y_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype('float32') + y = paddle.to_variable(y_data) + out5 = paddle.sum(y, axis=[1, 2]) # [10, 26] + out6 = paddle.sum(y, axis=[0, 1]) # [16, 20] """ - if dim is not None and not isinstance(dim, list): - dim = [dim] + if axis is not None and not isinstance(axis, (list, tuple)): + axis = [axis] + + if not axis: + reduce_all_flag = True + else: + if len(axis) == len(x.shape): + reduce_all_flag = True + else: + reduce_all_flag = False + attrs = { - 'dim': dim if dim != None and dim != [] else [0], - 'keep_dim': keep_dim, - 'reduce_all': True if dim == None or dim == [] else False, + 'dim': axis if axis != None and axis != [] and axis != () else [0], + 'keep_dim': keepdim, + 'reduce_all': reduce_all_flag } dtype_flag = False if dtype is not None: if dtype in ['float64', 'int64']: - if (convert_dtype(input.dtype) == "float32" and dtype == "float64") or \ - (convert_dtype(input.dtype) == "int32" and dtype == "int64"): + if (convert_dtype(x.dtype) == "float32" and dtype == "float64") or \ + (convert_dtype(x.dtype) == "int32" and dtype == "int64"): attrs.update({ - 'in_dtype': input.dtype, + 'in_dtype': x.dtype, 'out_dtype': convert_np_dtype_to_dtype_(dtype) }) dtype_flag = True @@ -610,27 +619,28 @@ def sum(input, dim=None, dtype=None, keep_dim=False, name=None): format(dtype)) if in_dygraph_mode(): - reduce_all = True if dim == None or dim == [] else False - dim = dim if dim != None and dim != [] else [0] + axis = axis if axis != None and axis != [] else [0] if dtype_flag: - return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, - 'reduce_all', reduce_all, 'in_dtype', - input.dtype, 'out_dtype', + return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, + 'reduce_all', reduce_all_flag, 'in_dtype', + x.dtype, 'out_dtype', convert_np_dtype_to_dtype_(dtype)) else: - return core.ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim, - 'reduce_all', reduce_all) + return core.ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, + 'reduce_all', reduce_all_flag) check_variable_and_dtype( - input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum') + x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sum') + check_type(axis, 'axis', (int, list, tuple, type(None)), 'sum') + helper = LayerHelper('sum', **locals()) if dtype_flag: out = helper.create_variable_for_type_inference( dtype=convert_np_dtype_to_dtype_(dtype)) else: - out = helper.create_variable_for_type_inference(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type='reduce_sum', - inputs={'X': input}, + inputs={'X': x}, outputs={'Out': out}, attrs=attrs) return out From 6cbeafb6c06da95f45c73d684423a127de95908a Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Wed, 19 Aug 2020 18:26:43 +0800 Subject: [PATCH 23/37] add zero norm, inf norm support for p_norm op (#26364) * add zero norm, inf norm support for p_norm op * fix the invalid argument check, fix the dtype problem in test case. --- paddle/fluid/operators/p_norm_op.cc | 79 +++++++----- paddle/fluid/operators/p_norm_op.cu | 118 ++++++++++++++++-- paddle/fluid/operators/p_norm_op.h | 34 +++-- .../fluid/tests/unittests/test_norm_all.py | 79 +++++++++++- 4 files changed, 257 insertions(+), 53 deletions(-) diff --git a/paddle/fluid/operators/p_norm_op.cc b/paddle/fluid/operators/p_norm_op.cc index 057a7a38e3f40..aa39821051eed 100644 --- a/paddle/fluid/operators/p_norm_op.cc +++ b/paddle/fluid/operators/p_norm_op.cc @@ -25,34 +25,49 @@ class PnormOpMaker : public framework::OpProtoAndCheckerMaker { void Make() override { AddInput("X", "(Tensor) A tensor of rank >= axis."); AddAttr("porder", - "The porder is the p order vector norm to calculate.") + "(float, default 2) The porder is the p order vector norm " + "to calculate. Available for porder=0, inf, -inf and any " + "real number.") .SetDefault(2.0f); AddAttr("axis", - "The axis on which to apply normalization. If axis < 0, " + "The axis on which to apply norm operation. If axis < 0, " "the dimension to pnorm is rank(X) + axis. -1 is " "the last dimension.") .SetDefault(-1); AddAttr("epsilon", - "(float, default 1e-10) The epsilon value is used " + "(float, default 1e-12) The epsilon value is used " "to avoid division by zero.") .SetDefault(1.0e-12f); AddAttr( "keepdim", - "(bool, default false) Whether to keep the dimensions as the input") + "(bool, default false) Whether to keep the dimensions as the input.") .SetDefault(false); - AddOutput( - "Out", - "(Tensor) Output tensor for the `(sum(x.pow(p)) + epsion).pow(1/p)`"); + AddOutput("Out", "(Tensor) Output result tensor of p-norm"); AddComment(R"DOC( +Pnorm Operator. +Given a tensor X, compute Lp-norm of X. -Given a tensor, apply 2-normalization along the provided axis. +When p = 0, defining $0^0 = 0$, the zero-norm of X is simply the number of non-zero elements of X. +$$ +||X||_{0} = \lim_{p \rightarrow 0} \sum_i |x_i|^p +$$ + +When p = inf, the inf-norm of X is the maximum element of X. +$$ +||X||_\infty = \max_i |x_i| +$$ + +When p = -inf, the negative-inf-norm of X is the minimum element of X. +$$ +||X||_{-\infty} = \min_i |x_i| +$$ +Otherwise, the p-norm of X follows the formula, $$ -pnorm = \(\sum_i {abs\(x_i\)^p} \)^{1/p} +||X||_{p} = (\sum_i |x_i|^p)^{1/p} $$ +where, $\sum_i $ is calculated along the `axis` dimension. -where, $\sum_i{x_i^p}$ is calculated along the `axis` dimension. - )DOC"); } }; @@ -63,31 +78,33 @@ class PnormOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "p_norm"); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "p_norm"); - auto porder = ctx->Attrs().Get("porder"); - PADDLE_ENFORCE_NE(porder, INFINITY, - platform::errors::Unimplemented( - "The input porder of p_norm is not support for " - "porder == 0, INFINITY, -INFINITY now.")); - PADDLE_ENFORCE_NE(porder, -INFINITY, - platform::errors::Unimplemented( - "The input porder of p_norm is not support for " - "porder == 0, INFINITY, -INFINITY now.")); - PADDLE_ENFORCE_GT(porder, 0.0f, - platform::errors::InvalidArgument( - "The input porder of p_norm is not support for " - "porder <= 0, But received porder=%f.", - porder)); - auto xdim = ctx->GetInputDim("X"); + auto x_dim = ctx->GetInputDim("X"); + auto x_rank = x_dim.size(); int axis = ctx->Attrs().Get("axis"); bool keepdim = ctx->Attrs().Get("keepdim"); - if (axis < 0) axis = xdim.size() + axis; + + PADDLE_ENFORCE_GE(axis, -x_rank, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], R is " + "the rank of Input(X). But received axis: %d, R: %d. " + "Current Input(X)'s shape is=[%s].", + axis, x_rank, x_dim)); + PADDLE_ENFORCE_LT(axis, x_rank, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], R is " + "the rank of Input(X). But received axis: %d, R: %d. " + "Current Input(X)'s shape is=[%s].", + axis, x_rank, x_dim)); + + if (axis < 0) axis = x_dim.size() + axis; std::vector reduce_dims; - for (int i = 0; i < xdim.size(); ++i) { - if (i != axis) reduce_dims.emplace_back(xdim[i]); + for (int i = 0; i < x_dim.size(); ++i) { + if (i != axis) reduce_dims.emplace_back(x_dim[i]); } - xdim[axis] = 1; + x_dim[axis] = 1; + if (keepdim) { - ctx->SetOutputDim("Out", xdim); + ctx->SetOutputDim("Out", x_dim); } else { ctx->SetOutputDim("Out", framework::make_ddim(reduce_dims)); } diff --git a/paddle/fluid/operators/p_norm_op.cu b/paddle/fluid/operators/p_norm_op.cu index d9ac98ff880bc..63f2a1c56c125 100644 --- a/paddle/fluid/operators/p_norm_op.cu +++ b/paddle/fluid/operators/p_norm_op.cu @@ -49,20 +49,70 @@ __global__ void Pnorm(const T* x, const int pre, for (int i = blockIdx.x; i < num; i += gridDim.x) { int base = (i / post) * post * axis_n + (i % post); - T sum = 0.0; - __shared__ T norm; for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { const T x_ij = x[base + j * post]; sum += inline_pow(inline_abs(x_ij), porder_t); } T reduce_result = BlockReduce(temp_storage).Sum(sum); + if (threadIdx.x == 0) out_norm[i] = inline_pow(reduce_result, porder_inv); + } +} - if (threadIdx.x == 0) { - norm = inline_pow(reduce_result, porder_inv); - out_norm[i] = norm; +template +__global__ void ZeorNorm(const T* x, const int pre, + const int axis_n, // dim in axis + const int post, T* out_norm) { + typedef cub::BlockReduce BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + int num = pre * post; + for (int i = blockIdx.x; i < num; i += gridDim.x) { + int base = (i / post) * post * axis_n + (i % post); + T sum = 0.0; + for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { + const T x_ij = x[base + j * post]; + sum += static_cast(x_ij != 0); } - __syncthreads(); + T reduce_result = BlockReduce(temp_storage).Sum(sum); + if (threadIdx.x == 0) out_norm[i] = reduce_result; + } +} + +template +__global__ void InfNorm(const T* x, const int pre, + const int axis_n, // dim in axis + const int post, T* out_norm) { + typedef cub::BlockReduce BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + int num = pre * post; + for (int i = blockIdx.x; i < num; i += gridDim.x) { + int base = (i / post) * post * axis_n + (i % post); + T cur_max = inline_abs(x[base]); + for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { + T x_ij_abs = inline_abs(x[base + j * post]); + if (cur_max < x_ij_abs) cur_max = x_ij_abs; + } + T reduce_result = BlockReduce(temp_storage).Reduce(cur_max, cub::Max()); + if (threadIdx.x == 0) out_norm[i] = reduce_result; + } +} + +template +__global__ void NegInfNorm(const T* x, const int pre, + const int axis_n, // dim in axis + const int post, T* out_norm) { + typedef cub::BlockReduce BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + int num = pre * post; + for (int i = blockIdx.x; i < num; i += gridDim.x) { + int base = (i / post) * post * axis_n + (i % post); + T cur_min = inline_abs(x[base]); + for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { + T x_ij_abs = inline_abs(x[base + j * post]); + if (cur_min > x_ij_abs) cur_min = x_ij_abs; + } + T reduce_result = BlockReduce(temp_storage).Reduce(cur_min, cub::Min()); + if (threadIdx.x == 0) out_norm[i] = reduce_result; } } @@ -89,8 +139,19 @@ class PnormCUDAKernel : public framework::OpKernel { int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid = std::min(max_blocks, pre * post); - Pnorm<<>>(x, pre, n, post, - porder, norm); + if (porder == 0) { + ZeorNorm<<>>(x, pre, n, post, + norm); + } else if (porder == INFINITY) { + InfNorm<<>>(x, pre, n, post, + norm); + } else if (porder == -INFINITY) { + NegInfNorm<<>>(x, pre, n, + post, norm); + } else { + Pnorm<<>>(x, pre, n, post, + porder, norm); + } } }; @@ -112,7 +173,6 @@ __global__ void PnormGradient(const T* x, const T* x_norm, const T* y_grad, pnorm_i = x_norm[i]; yout_i = y_grad[i]; } - __syncthreads(); for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { @@ -125,6 +185,33 @@ __global__ void PnormGradient(const T* x, const T* x_norm, const T* y_grad, } } +template +__global__ void InfNormGradient(const T* x, const T* x_norm, const T* y_grad, + const int pre, const int axis_n, const int post, + T* x_grad) { + int num = pre * post; + for (int i = blockIdx.x; i < num; i += gridDim.x) { + __shared__ T pnorm_i; + __shared__ T yout_i; + auto base = (i / post) * post * axis_n + (i % post); + if (threadIdx.x == 0) { + pnorm_i = x_norm[i]; + yout_i = y_grad[i]; + } + __syncthreads(); + + for (int j = threadIdx.x; j < axis_n; j += blockDim.x) { + int index = base + j * post; + const T x_ij = inline_abs(x[index]); + if (x_ij == pnorm_i) { + x_grad[index] = inline_sign(x[index]) * yout_i; + } else { + x_grad[index] = static_cast(0); + } + } + } +} + template class PnormGradCUDAKernel : public framework::OpKernel { public: @@ -153,8 +240,17 @@ class PnormGradCUDAKernel : public framework::OpKernel { int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid = std::min(max_blocks, pre * post); - PnormGradient<<>>( - x, x_norm, norm_dy, porder, pre, n, post, eps, dx); + if (porder == 0) { + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + set_zero(dev_ctx, out_dx, static_cast(0)); + } else if (porder == INFINITY || porder == -INFINITY) { + InfNormGradient<<>>( + x, x_norm, norm_dy, pre, n, post, dx); + } else { + PnormGradient<<>>( + x, x_norm, norm_dy, porder, pre, n, post, eps, dx); + } } }; diff --git a/paddle/fluid/operators/p_norm_op.h b/paddle/fluid/operators/p_norm_op.h index c5bdfe352723b..7620d1421e897 100644 --- a/paddle/fluid/operators/p_norm_op.h +++ b/paddle/fluid/operators/p_norm_op.h @@ -58,10 +58,20 @@ class PnormKernel : public framework::OpKernel { auto x = x_e.reshape(shape); auto norm = norm_e.reshape(norm_shape); + // p=0 means number of non-zero elements of (x) + // p=inf means the maximum of |x| + // p=-inf means the minimum of |x| + // otherwise, Lp-norm = pow(sum(pow(|x|, p)), 1/p) Eigen::DSizes rdim(1); - auto xp = (x.abs()).pow(porder); - auto sum = xp.sum(rdim); - norm.device(*place) = sum.pow(1.0f / porder); + if (porder == 0) { + norm.device(*place) = (x != x.constant(0)).template cast().sum(rdim); + } else if (porder == INFINITY) { + norm.device(*place) = x.abs().maximum(rdim); + } else if (porder == -INFINITY) { + norm.device(*place) = x.abs().minimum(rdim); + } else { + norm.device(*place) = x.abs().pow(porder).sum(rdim).pow(1.0f / porder); + } } }; @@ -102,10 +112,20 @@ class PnormGradKernel : public framework::OpKernel { Eigen::DSizes rdim(1); Eigen::DSizes bcast(1, n, 1); - dx.device(*place) = (x.abs()).pow(porder - 1.0f); - dx.device(*place) = - dx / ((norm.broadcast(bcast)).pow(porder - 1.0f) + x.constant(eps)); - dx.device(*place) = dx * norm_dy.broadcast(bcast) * x.sign(); + if (porder == 0) { + math::SetConstant set_zero; + auto& dev_ctx = ctx.template device_context(); + set_zero(dev_ctx, out_dx, static_cast(0)); + } else if (porder == INFINITY || porder == -INFINITY) { + dx.device(*place) = + (x.abs() == norm.broadcast(bcast)).template cast() * x.sign() * + norm_dy.broadcast(bcast); + } else { + dx.device(*place) = + (x.abs()).pow(porder - 1.0f) / + ((norm.broadcast(bcast)).pow(porder - 1.0f) + x.constant(eps)); + dx.device(*place) = dx * norm_dy.broadcast(bcast) * x.sign(); + } } }; } // namespace operators diff --git a/python/paddle/fluid/tests/unittests/test_norm_all.py b/python/paddle/fluid/tests/unittests/test_norm_all.py index e6b7a3e7603f5..0d083038c6131 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_all.py +++ b/python/paddle/fluid/tests/unittests/test_norm_all.py @@ -23,16 +23,16 @@ def p_norm(x, axis, porder, keepdims=False): if axis is None: axis = -1 - xp = np.power(np.abs(x), porder) - s = np.sum(xp, axis=axis, keepdims=keepdims) - r = np.power(s, 1.0 / porder) + r = np.linalg.norm( + x, ord=porder, axis=axis, keepdims=keepdims).astype(x.dtype) return r def frobenius_norm(x, axis=None, keepdims=False): if isinstance(axis, list): axis = tuple(axis) if axis is None: axis = (-2, -1) - r = np.linalg.norm(x, ord='fro', axis=axis, keepdims=keepdims) + r = np.linalg.norm( + x, ord='fro', axis=axis, keepdims=keepdims).astype(x.dtype) return r @@ -89,6 +89,7 @@ def setUp(self): 'porder': float(self.porder) } self.outputs = {'Out': norm} + self.gradient = self.calc_gradient() def test_check_output(self): self.check_output() @@ -104,6 +105,34 @@ def init_test_case(self): self.keepdim = False self.dtype = "float64" + def calc_gradient(self): + self.attrs = { + 'epsilon': self.epsilon, + 'axis': self.axis, + 'keepdim': self.keepdim, + 'porder': float(self.porder) + } + x = self.inputs["X"] + porder = self.attrs["porder"] + axis = self.attrs["axis"] + if porder == 0: + grad = np.zeros(x.shape).astype(x.dtype) + elif porder in [float("inf"), float("-inf")]: + norm = p_norm(x, axis=axis, porder=porder, keepdims=True) + x_abs = np.abs(x) + grad = np.sign(x) + grad[x_abs != norm] = 0.0 + else: + norm = p_norm(x, axis=axis, porder=porder, keepdims=True) + grad = np.power(norm, 1 - porder) * np.power( + np.abs(x), porder - 1) * np.sign(x) + + numel = 1 + for s in x.shape: + numel *= s + numel /= x.shape[axis] + return [grad.astype(x.dtype) * 1 / numel] + class TestPnormOp2(TestPnormOp): def init_test_case(self): @@ -118,6 +147,45 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') +class TestPnormOp3(TestPnormOp): + def init_test_case(self): + self.shape = [3, 20, 3] + self.axis = 2 + self.epsilon = 1e-12 + self.porder = np.inf + self.keepdim = True + self.dtype = "float32" + + def test_check_grad(self): + self.check_grad(['X'], 'Out', user_defined_grads=self.gradient) + + +class TestPnormOp4(TestPnormOp): + def init_test_case(self): + self.shape = [3, 20, 3] + self.axis = 2 + self.epsilon = 1e-12 + self.porder = -np.inf + self.keepdim = True + self.dtype = "float32" + + def test_check_grad(self): + self.check_grad(['X'], 'Out', user_defined_grads=self.gradient) + + +class TestPnormOp5(TestPnormOp): + def init_test_case(self): + self.shape = [3, 20, 3] + self.axis = 2 + self.epsilon = 1e-12 + self.porder = 0 + self.keepdim = True + self.dtype = "float32" + + def test_check_grad(self): + self.check_grad(['X'], 'Out', user_defined_grads=self.gradient) + + def run_out(self, p, axis, shape_x, shape_y, dtype): with fluid.program_guard(fluid.Program()): data1 = fluid.data(name="X", shape=shape_x, dtype=dtype) @@ -170,6 +238,9 @@ def test_basic(self): run_fro(self, p='fro', axis=[0, 1], shape_x=[3, 3, 4], dtype="float64") run_pnorm(self, p=2, axis=None, shape_x=[3, 4], dtype="float32") run_pnorm(self, p=2, axis=1, shape_x=[3, 4], dtype="float64") + run_pnorm(self, p=np.inf, axis=1, shape_x=[3, 4], dtype="float32") + run_pnorm(self, p=-np.inf, axis=1, shape_x=[3, 4], dtype="float64") + run_pnorm(self, p=0, axis=1, shape_x=[3, 4], dtype="float64") def test_name(self): with fluid.program_guard(fluid.Program()): From eeeef957c7607f31f14df274cc6478ec9f964628 Mon Sep 17 00:00:00 2001 From: Chengmo Date: Wed, 19 Aug 2020 19:28:42 +0800 Subject: [PATCH 24/37] Fix ps gpu (#26218) * support ps-gpu --- .../distributed/parameter_prefetch.cc | 71 +++++--- .../distributed_lookup_table_op.cc | 51 +----- .../distributed_lookup_table_op.cu.cc | 22 +++ .../distributed_lookup_table_op.h | 45 ++++++ .../operators/distributed_ops/recv_save_op.cc | 2 +- paddle/fluid/operators/strided_memcpy.h | 3 - .../distribute_transpiler/__init__.py | 14 +- .../tests/unittests/dist_fleet_ctr_ps_gpu.py | 152 ++++++++++++++++++ .../tests/unittests/test_dist_fleet_base.py | 17 ++ .../tests/unittests/test_dist_fleet_ctr.py | 35 ++++ 10 files changed, 335 insertions(+), 77 deletions(-) create mode 100644 paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cu.cc create mode 100644 paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.h create mode 100644 python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py diff --git a/paddle/fluid/operators/distributed/parameter_prefetch.cc b/paddle/fluid/operators/distributed/parameter_prefetch.cc index 5a67b358ddabb..a9378d61c3ca3 100644 --- a/paddle/fluid/operators/distributed/parameter_prefetch.cc +++ b/paddle/fluid/operators/distributed/parameter_prefetch.cc @@ -110,7 +110,7 @@ void prefetch_core( int pservers = context.Attr("pserver_num"); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto &actual_ctx = *pool.Get(context.GetPlace()); + auto &actual_ctx = *pool.Get(platform::CPUPlace()); std::unique_ptr local_scope = scope.NewTmpScope(); @@ -144,7 +144,6 @@ void prefetch_core( VLOG(3) << "don't send no-initialied variable: " << out_var_names[i]; } } - for (size_t i = 0; i < rets.size(); i++) { PADDLE_ENFORCE_NE(rets[i]->Wait(), 0U, platform::errors::ExecutionTimeout( "internal error in RPCClient")); @@ -167,6 +166,7 @@ void prefetch_core( for (int64_t i = 0; i < dims[0]; ++i) { auto origin_id = ids_in_this_section[i]; std::vector vecs(row_numel); + std::copy_n(out_var_data + i * row_numel, row_numel, vecs.begin()); (*recved_vec_map)[origin_id] = vecs; } @@ -213,18 +213,18 @@ void prefetchs(const std::vector &id_var_names, const auto place = scope.FindVar(id_var_names[0])->Get().place(); - if (!platform::is_cpu_place(place)) { - PADDLE_THROW("multi prefetch only support CPU currently"); - } - + std::vector> ids_group; std::vector ids_union; + std::vector ids_lods; TableAndEndpoints tables; for (auto &id_name : id_var_names) { - auto *in_var = scope.FindVar(id_name); - auto &id_tensor = in_var->Get(); - std::copy_n(id_tensor.data(), id_tensor.numel(), - back_inserter(ids_union)); + auto &id_tensor = scope.FindVar(id_name)->Get(); + std::vector ids; + TensorToVector(id_tensor, context.device_context(), &ids); + ids_union.insert(ids_union.end(), ids.begin(), ids.end()); + ids_group.push_back(ids); + ids_lods.push_back(id_tensor.lod()); } std::unordered_set s(ids_union.begin(), ids_union.end()); @@ -258,25 +258,48 @@ void prefetchs(const std::vector &id_var_names, } for (size_t i = 0; i < out_var_names.size(); i++) { - auto *in_var = scope.FindVar(id_var_names[i]); - auto &id_tensor = in_var->Get(); - auto ids_size = id_tensor.dims()[0]; - const auto *id_data = id_tensor.data(); - + std::vector ids = ids_group[i]; + auto ids_size = ids.size(); auto *out_t = scope.FindVar(out_var_names[i])->GetMutable(); - out_t->set_lod(id_tensor.lod()); - out_t->Resize(framework::make_ddim({ids_size, vec_dim_1})); + out_t->set_lod(ids_lods[i]); + out_t->Resize( + framework::make_ddim({static_cast(ids_size), vec_dim_1})); auto *out_d = out_t->mutable_data(place); - for (auto idx = 0; idx < static_cast(ids_size); idx++) { - const auto &id = id_data[idx]; - if (padding_idx != distributed::kNoPadding && id == padding_idx) { - memset(out_d + idx * vec_dim_1, 0, sizeof(float) * vec_dim_1); - } else { - std::copy_n(recved_vec_map[id].begin(), vec_dim_1, - out_d + idx * vec_dim_1); + if (platform::is_cpu_place(out_t->place())) { + for (auto idx = 0; idx < static_cast(ids_size); idx++) { + const auto &id = ids[idx]; + if (padding_idx != distributed::kNoPadding && id == padding_idx) { + memset(out_d + idx * vec_dim_1, 0, sizeof(float) * vec_dim_1); + } else { + std::copy_n(recved_vec_map[id].begin(), vec_dim_1, + out_d + idx * vec_dim_1); + } + } + } else { +#ifdef PADDLE_WITH_CUDA + for (auto idx = 0; idx < static_cast(ids_size); idx++) { + const auto &id = ids[idx]; + auto stream = context.cuda_device_context().stream(); + if (padding_idx != distributed::kNoPadding && id == padding_idx) { + platform::GpuMemsetAsync(out_d + idx * vec_dim_1, 0, + sizeof(float) * vec_dim_1, stream); + } else { + auto &cpu_place = + BOOST_GET_CONST(platform::CPUPlace, + paddle::platform::CPUDeviceContext().GetPlace()); + auto &gpu_place = + BOOST_GET_CONST(platform::CUDAPlace, out_t->place()); + memory::Copy(gpu_place, out_d + idx * vec_dim_1, cpu_place, + &recved_vec_map[id][0], sizeof(float) * vec_dim_1, + stream); + } } +#else + PADDLE_ENFORCE(true, platform::errors::PermissionDenied( + "Paddle is not compiled with GPU!")); +#endif } } } diff --git a/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc index 3037a63b0d7b4..8c093d1258598 100644 --- a/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc +++ b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -17,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/distributed/parameter_prefetch.h" +#include "paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { @@ -75,47 +73,6 @@ class DistributedLookupTableOp : public framework::OperatorWithKernel { } }; -template -class DistributedLookupTableKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &context) const override { - auto ids_vars = context.MultiInputVar("Ids"); - auto emb_vars = context.MultiOutput("Embeddings"); - - auto id_names = context.InputNames("Ids"); - auto embedding_name = context.InputNames("W").front(); - auto out_names = context.OutputNames("Outputs"); - auto lookup_tables = context.Attr>("table_names"); - auto endpoints = context.Attr>("endpoints"); - auto is_distributed = context.Attr("is_distributed"); - - auto lookup_table_version = - context.Attr("lookup_table_version"); - - operators::distributed::prefetchs(id_names, out_names, embedding_name, - is_distributed, lookup_tables, endpoints, - context, context.scope()); - - if (lookup_table_version == "lookup_table_v2") { - auto &scope = context.scope(); - auto emb_dim = - scope.FindVar(embedding_name)->Get().dims()[1]; - - for (size_t i = 0; i < id_names.size(); ++i) { - auto *id_var = scope.FindVar(id_names[i]); - auto *out_var = scope.FindVar(out_names[i]); - auto *id_tensor = id_var->GetMutable(); - auto *out_tensor = out_var->GetMutable(); - - auto id_dims = id_tensor->dims(); - out_tensor->Resize(framework::make_ddim( - {static_cast(id_dims[0]), static_cast(id_dims[1]), - static_cast(emb_dim)})); - } - } - } -}; - class DistributedLookupTableOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { @@ -170,15 +127,12 @@ class DistributedLookupTableOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Lookup Tablel Prefetch Operator. - This operator is used to perform lookup on parameter W, then concatenated into a sparse tensor. - The type of Ids(Input) is SelectedRows, the rows of Ids contains the ids to be looked up in W; if the Id is not in the sparse table, this operator will return a random value and set the value into the table for the next looking up. - )DOC"); } }; @@ -191,4 +145,5 @@ REGISTER_OPERATOR(distributed_lookup_table, ops::DistributedLookupTableOp, ops::DistributedLookupTableOpMaker); REGISTER_OP_CPU_KERNEL(distributed_lookup_table, - ops::DistributedLookupTableKernel); + ops::DistributedLookupTableKernel< + paddle::platform::CPUDeviceContext, float>); diff --git a/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cu.cc b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cu.cc new file mode 100644 index 0000000000000..54c894415096e --- /dev/null +++ b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cu.cc @@ -0,0 +1,22 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_CUDA_KERNEL( + distributed_lookup_table, + ops::DistributedLookupTableKernel); diff --git a/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.h b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.h new file mode 100644 index 0000000000000..a71451c78a870 --- /dev/null +++ b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.h @@ -0,0 +1,45 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once +#include +#include +#include +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/distributed/parameter_prefetch.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +template +class DistributedLookupTableKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto ids_vars = context.MultiInputVar("Ids"); + auto emb_vars = context.MultiOutput("Embeddings"); + + auto id_names = context.InputNames("Ids"); + auto embedding_name = context.InputNames("W").front(); + auto out_names = context.OutputNames("Outputs"); + auto lookup_tables = context.Attr>("table_names"); + auto endpoints = context.Attr>("endpoints"); + auto is_distributed = context.Attr("is_distributed"); + + operators::distributed::prefetchs(id_names, out_names, embedding_name, + is_distributed, lookup_tables, endpoints, + context, context.scope()); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed_ops/recv_save_op.cc b/paddle/fluid/operators/distributed_ops/recv_save_op.cc index ccc30d1ea082a..d194fcda36a47 100644 --- a/paddle/fluid/operators/distributed_ops/recv_save_op.cc +++ b/paddle/fluid/operators/distributed_ops/recv_save_op.cc @@ -44,7 +44,7 @@ class RecvSaveOp : public framework::OperatorWithKernel { const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( framework::proto::VarType::Type(ctx.Attr("dtype")), - ctx.GetPlace()); + platform::CPUPlace()); } }; diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 7528422fdc09b..f20bada8ab288 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py index d2c7397c85f8d..d555e9876a08c 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/distribute_transpiler/__init__.py @@ -393,6 +393,12 @@ def save_inference_model(self, "in fleet.save_inference_model() function, executor must be as Executor type" ) + # Todo(MrChengmo): support recv&save GPU-Kernel for ps-gpu model save + if not isinstance(executor.place, fluid.CPUPlace): + save_executor = Executor(fluid.CPUPlace()) + else: + save_executor = executor + if main_program is not None: if isinstance(main_program, CompiledProgram): raise TypeError( @@ -670,6 +676,11 @@ def save_persistables(self, executor, dirname, main_program=None, **kwargs): raise TypeError( "in fleet.save_persistables() function, executor must be as Executor type" ) + # Todo(MrChengmo): support recv&save GPU-Kernel for ps-gpu model save + if not isinstance(executor.place, fluid.CPUPlace): + save_executor = Executor(fluid.CPUPlace()) + else: + save_executor = executor if main_program is None: main_program = self.main_program @@ -679,7 +690,8 @@ def save_persistables(self, executor, dirname, main_program=None, **kwargs): "in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed" ) - self._save_distributed_persistables(executor, dirname, main_program) + self._save_distributed_persistables(save_executor, dirname, + main_program) @staticmethod def __exclude_vars(exclude_var_names=[]): diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py new file mode 100644 index 0000000000000..03d0fa447daf3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr_ps_gpu.py @@ -0,0 +1,152 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Distribute CTR model for test fleet api +""" + +from __future__ import print_function + +import shutil +import tempfile +import time + +import paddle +import paddle.fluid as fluid +import os +import numpy as np + +import ctr_dataset_reader +from test_dist_fleet_base import runtime_main, FleetDistRunnerBase +from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader +from paddle.distributed.fleet.base.util_factory import fleet_util + +# Fix seed for test +fluid.default_startup_program().random_seed = 1 +fluid.default_main_program().random_seed = 1 + + +class TestDistGpuPsCTR2x2(TestDistCTR2x2): + """ + For test CTR model, using Fleet api & PS-GPU + """ + + def check_model_right(self, dirname): + model_filename = os.path.join(dirname, "__model__") + + with open(model_filename, "rb") as f: + program_desc_str = f.read() + + program = fluid.Program.parse_from_string(program_desc_str) + with open(os.path.join(dirname, "__model__.proto"), "w") as wn: + wn.write(str(program)) + + def do_pyreader_training(self, fleet): + """ + do training using dataset, using fetch handler to catch variable + Args: + fleet(Fleet api): the fleet object of Parameter Server, define distribute training role + """ + device_id = int(os.getenv("FLAGS_selected_gpus", "0")) + place = fluid.CUDAPlace(device_id) + exe = fluid.Executor(place) + fleet.init_worker() + exe.run(fleet.startup_program) + + batch_size = 4 + train_reader = paddle.batch(fake_ctr_reader(), batch_size=batch_size) + self.reader.decorate_sample_list_generator(train_reader) + + for epoch_id in range(1): + self.reader.start() + try: + pass_start = time.time() + while True: + loss_val = exe.run(program=fleet.main_program, + fetch_list=[self.avg_cost.name]) + loss_val = np.mean(loss_val) + reduce_output = fleet_util.all_reduce( + np.array(loss_val), mode="sum") + loss_all_trainer = fleet_util.all_gather(float(loss_val)) + loss_val = float(reduce_output) / len(loss_all_trainer) + message = "TRAIN ---> pass: {} loss: {}\n".format(epoch_id, + loss_val) + fleet_util.print_on_rank(message, 0) + + pass_time = time.time() - pass_start + except fluid.core.EOFException: + self.reader.reset() + + model_dir = tempfile.mkdtemp() + fleet.save_inference_model( + exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost) + self.check_model_right(model_dir) + if fleet.is_first_worker(): + fleet.save_persistables(executor=exe, dirname=model_dir) + shutil.rmtree(model_dir) + fleet.stop_worker() + + def do_dataset_training(self, fleet): + dnn_input_dim, lr_input_dim, train_file_path = ctr_dataset_reader.prepare_data( + ) + + device_id = int(os.getenv("FLAGS_selected_gpus", "0")) + place = fluid.CUDAPlace(device_id) + exe = fluid.Executor(place) + + fleet.init_worker() + exe.run(fleet.startup_program) + + thread_num = 2 + batch_size = 128 + filelist = [] + for _ in range(thread_num): + filelist.append(train_file_path) + + # config dataset + dataset = paddle.fleet.DatasetFactory().create_dataset() + dataset.set_batch_size(batch_size) + dataset.set_use_var(self.feeds) + pipe_command = 'python ctr_dataset_reader.py' + dataset.set_pipe_command(pipe_command) + + dataset.set_filelist(filelist) + dataset.set_thread(thread_num) + + for epoch_id in range(1): + pass_start = time.time() + dataset.set_filelist(filelist) + exe.train_from_dataset( + program=fleet.main_program, + dataset=dataset, + fetch_list=[self.avg_cost], + fetch_info=["cost"], + print_period=2, + debug=int(os.getenv("Debug", "0"))) + pass_time = time.time() - pass_start + + if os.getenv("SAVE_MODEL") == "1": + model_dir = tempfile.mkdtemp() + fleet.save_inference_model(exe, model_dir, + [feed.name for feed in self.feeds], + self.avg_cost) + self.check_model_right(model_dir) + if fleet.is_first_worker(): + fleet.save_persistables(executor=exe, dirname=model_dir) + shutil.rmtree(model_dir) + + fleet.stop_worker() + + +if __name__ == "__main__": + runtime_main(TestDistGpuPsCTR2x2) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py index f72850f949715..8c700ff3217b5 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py @@ -278,6 +278,23 @@ def _run_cluster(self, model, envs): tr0_ret = tr0.returncode tr1_ret = tr0.returncode + if tr0_ret != 0: + print( + "========================Error tr0_err begin===========================" + ) + os.system("cat {}".format(tempfile.gettempdir() + "/tr0_err.log")) + print( + "========================Error tr0_err end===========================" + ) + + if tr1_ret != 0: + print( + "========================Error tr1_err begin===========================" + ) + os.system("cat {}".format(tempfile.gettempdir() + "/tr1_err.log")) + print( + "========================Error tr1_err end===========================" + ) self.assertEqual(tr0_ret, 0, "something wrong in tr0, please check") self.assertEqual(tr1_ret, 0, "something wrong in tr1, please check") diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py index 18629c4f996a6..e16e91192e1fe 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py @@ -156,5 +156,40 @@ def test_dist_train(self): "dist_fleet_ctr.py", delta=1e-5, check_error_log=True) +class TestDistCtrPsGpuPyreaderAsync2x2(TestFleetBase): + def _setup_config(self): + self._mode = "async" + self._reader = "pyreader" + + def check_with_place(self, + model_file, + delta=1e-3, + check_error_log=False, + need_envs={}): + required_envs = { + "PATH": os.getenv("PATH", ""), + "PYTHONPATH": os.getenv("PYTHONPATH", ""), + "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""), + "FLAGS_rpc_deadline": "30000", # 5sec to fail fast + "http_proxy": "", + "FLAGS_communicator_send_queue_size": "2", + "FLAGS_communicator_max_merge_var_num": "2", + "CPU_NUM": "2", + "SAVE_MODEL": "1" + } + + required_envs.update(need_envs) + + if check_error_log: + required_envs["GLOG_v"] = "3" + required_envs["GLOG_logtostderr"] = "1" + + tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs) + + def test_dist_train(self): + self.check_with_place( + "dist_fleet_ctr_ps_gpu.py", delta=1e-5, check_error_log=True) + + if __name__ == "__main__": unittest.main() From a39bd3acc9df3741246f4a94878347b0c66ccd38 Mon Sep 17 00:00:00 2001 From: lijianshe02 <48898730+lijianshe02@users.noreply.github.com> Date: Wed, 19 Aug 2020 19:59:11 +0800 Subject: [PATCH 25/37] reduce the execution time of affine_channel op unittests (#26393) * reduce the execution time of affine_channel op unittests --- .../paddle/fluid/tests/unittests/test_affine_channel_op.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_affine_channel_op.py b/python/paddle/fluid/tests/unittests/test_affine_channel_op.py index c524fb6930d97..6157314b1f060 100644 --- a/python/paddle/fluid/tests/unittests/test_affine_channel_op.py +++ b/python/paddle/fluid/tests/unittests/test_affine_channel_op.py @@ -63,7 +63,7 @@ def test_check_grad_stopgrad_dscale_dbias(self): self.check_grad(['X'], 'Out', no_grad_set=set(['Scale', 'Bias'])) def init_test_case(self): - self.shape = [2, 100, 12, 12] + self.shape = [2, 100, 3, 3] self.C = 100 self.layout = 'NCHW' @@ -102,7 +102,7 @@ def test_bias_type(): class TestAffineChannelNHWC(TestAffineChannelOp): def init_test_case(self): - self.shape = [2, 12, 12, 100] + self.shape = [2, 3, 3, 100] self.C = 100 self.layout = 'NHWC' @@ -115,7 +115,7 @@ def test_check_grad_stopgrad_dscale_dbias(self): class TestAffineChannel2D(TestAffineChannelOp): def init_test_case(self): - self.shape = [8, 100] + self.shape = [2, 100] self.C = 100 self.layout = 'NCHW' From bcf03273f6b266b5c770ff01d27031223a9af3ca Mon Sep 17 00:00:00 2001 From: littletomatodonkey <2120160898@bit.edu.cn> Date: Wed, 19 Aug 2020 20:34:09 +0800 Subject: [PATCH 26/37] add pad func (#26106) * add pad func * add pad * test=develop, add pad op and apis * restore pad2d * test=develop, fix paddl declare * fix pad interface * test=develop, fix pad * test=develop, add all pad api and cos_sim * test=develop, remove padding default value * test=develop, rename var to tensor * test=develop, add more tests * test=develop, rename tovar to totensor * test=develop, fix init * test=develop, add more test * test=develop, add more tests --- paddle/fluid/operators/pad3d_op.cc | 912 ++++++++++++++++++ paddle/fluid/operators/pad3d_op.cu | 788 +++++++++++++++ .../unittests/test_cosine_similarity_api.py | 121 +++ .../fluid/tests/unittests/test_pad3d_op.py | 670 +++++++++++++ python/paddle/nn/__init__.py | 10 + python/paddle/nn/functional/__init__.py | 1 + python/paddle/nn/functional/common.py | 246 ++++- python/paddle/nn/layer/__init__.py | 10 + python/paddle/nn/layer/common.py | 646 ++++++++++++- python/paddle/tensor/__init__.py | 1 + 10 files changed, 3390 insertions(+), 15 deletions(-) create mode 100644 paddle/fluid/operators/pad3d_op.cc create mode 100644 paddle/fluid/operators/pad3d_op.cu create mode 100644 python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py create mode 100644 python/paddle/fluid/tests/unittests/test_pad3d_op.py diff --git a/paddle/fluid/operators/pad3d_op.cc b/paddle/fluid/operators/pad3d_op.cc new file mode 100644 index 0000000000000..1d41b823b6551 --- /dev/null +++ b/paddle/fluid/operators/pad3d_op.cc @@ -0,0 +1,912 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using framework::Tensor; + +template +void ConstPad3DFuncNCDHW(const T* in_data, T* out_data, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, const int out_d, + const int out_h, const int out_w, const T value) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + out_data[out_d * out_height * out_width + out_h * out_width + out_w] = + (in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth || + in_h >= in_height || in_w >= in_width) + ? value + : in_data[in_d * in_height * in_width + in_h * in_width + in_w]; +} + +template +void ConstPad3DFuncNDHWC(const T* in_data, T* out_data, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, const int out_h, + const int out_w, const T value) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + if (in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth || + in_h >= in_height || in_w >= in_width) { + for (int c = 0; c < channels; ++c) { + out_data[out_index + c] = value; + } + } else { + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + out_data[out_index + c] = in_data[in_index + c]; + } + } +} + +template +void ReflectPad3DFuncNCDHW(const T* in_data, T* out_data, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const int out_d, const int out_h, const int out_w, + const T value) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = std::max(in_d, -in_d); // reflect by 0 + in_d = std::min(in_d, 2 * in_depth - in_d - 2); // reflect by in_depth + in_h = std::max(in_h, -in_h); // reflect by 0 + in_h = std::min(in_h, 2 * in_height - in_h - 2); // reflect by in_height + in_w = std::max(in_w, -in_w); // reflect by 0 + in_w = std::min(in_w, 2 * in_width - in_w - 2); // reflect by in_width + + out_data[out_d * out_height * out_width + out_h * out_width + out_w] = + in_data[in_d * in_height * in_width + in_h * in_width + in_w]; +} + +template +void ReflectPad3DFuncNDHWC(const T* in_data, T* out_data, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, const int out_h, + const int out_w, const T value) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = std::max(in_d, -in_d); + in_d = std::min(in_d, 2 * in_depth - in_d - 2); + in_h = std::max(in_h, -in_h); + in_h = std::min(in_h, 2 * in_height - in_h - 2); + in_w = std::max(in_w, -in_w); + in_w = std::min(in_w, 2 * in_width - in_w - 2); + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + out_data[out_index + c] = in_data[in_index + c]; + } +} + +template +void ReplicatePad3DFuncNCDHW(const T* in_data, T* out_data, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const int out_d, const int out_h, const int out_w, + const T value) { + int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0)); + int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0)); + int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0)); + + out_data[out_d * out_height * out_width + out_h * out_width + out_w] = + in_data[in_d * in_height * in_width + in_h * in_width + in_w]; +} + +template +void ReplicatePad3DFuncNDHWC(const T* in_data, T* out_data, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, + const int out_h, const int out_w, const T value) { + int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0)); + int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0)); + int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0)); + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + out_data[out_index + c] = in_data[in_index + c]; + } +} + +template +void CircularPad3DFuncNCDHW(const T* in_data, T* out_data, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const int out_d, const int out_h, const int out_w, + const T value) { + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + out_data[out_d * out_height * out_width + out_h * out_width + out_w] = + in_data[in_d * in_height * in_width + in_h * in_width + in_w]; +} + +template +void CircularPad3DFuncNDHWC(const T* in_data, T* out_data, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, + const int out_h, const int out_w, const T value) { + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + out_data[out_index + c] = in_data[in_index + c]; + } +} + +template +void Pad3DNCDHW(const T* in_data, const int num, const int channels, + const int in_depth, const int in_height, const int in_width, + const int out_depth, const int out_height, const int out_width, + const int pad_front, const int pad_top, const int pad_left, + T value, T* out_data, + void (*pad_func)(const T*, T*, const int, const int, const int, + const int, const int, const int, const int, + const int, const int, const int, const int, + const int, const T)) { + for (int n = 0; n < num; ++n) { + for (int c = 0; c < channels; ++c) { + for (int out_d = 0; out_d < out_depth; ++out_d) { + for (int out_h = 0; out_h < out_height; ++out_h) { + for (int out_w = 0; out_w < out_width; ++out_w) { + pad_func(in_data, out_data, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, + pad_left, out_d, out_h, out_w, value); + } + } + } + in_data += in_depth * in_height * in_width; + out_data += out_depth * out_height * out_width; + } + } +} + +template +void Pad3DNDHWC(const T* in_data, const int num, const int channels, + const int in_depth, const int in_height, const int in_width, + const int out_depth, const int out_height, const int out_width, + const int pad_front, const int pad_top, const int pad_left, + T value, T* out_data, + void (*pad_func)(const T*, T*, const int, const int, const int, + const int, const int, const int, const int, + const int, const int, const int, const int, + const int, const int, const T)) { + for (int n = 0; n < num; ++n) { + for (int out_d = 0; out_d < out_depth; ++out_d) { + for (int out_h = 0; out_h < out_height; ++out_h) { + for (int out_w = 0; out_w < out_width; ++out_w) { + pad_func(in_data, out_data, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, + pad_left, out_d, out_h, out_w, value); + } + } + } + in_data += in_depth * in_height * in_width * channels; + out_data += out_depth * out_height * out_width * channels; + } +} + +template +void ConstPad3DGradNCDHW(T* d_in_data, const T* d_out_data, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, const int out_d, + const int out_h, const int out_w) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + if (!(in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth || + in_h >= in_height || in_w >= in_width)) { + d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] = + d_out_data[out_d * out_height * out_width + out_h * out_width + out_w]; + } +} + +template +void ConstPad3DGradNDHWC(T* d_in_data, const T* d_out_data, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, const int out_h, + const int out_w) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + if (!(in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth || + in_h >= in_height || in_w >= in_width)) { + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + d_in_data[in_index + c] = d_out_data[out_index + c]; + } + } +} + +template +void ReflectPad3DGradNCDHW(T* d_in_data, const T* d_out_data, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, const int out_h, + const int out_w) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = std::max(in_d, -in_d); // reflect by 0 + in_d = std::min(in_d, 2 * in_depth - in_d - 2); // reflect by in_depth + in_h = std::max(in_h, -in_h); // reflect by 0 + in_h = std::min(in_h, 2 * in_height - in_h - 2); // reflect by in_height + in_w = std::max(in_w, -in_w); // reflect by 0 + in_w = std::min(in_w, 2 * in_width - in_w - 2); // reflect by in_width + + d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] += + d_out_data[out_d * out_height * out_width + out_h * out_width + out_w]; +} + +template +void ReflectPad3DGradNDHWC(T* d_in_data, const T* d_out_data, + const int channels, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const int out_d, const int out_h, const int out_w) { + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = std::max(in_d, -in_d); + in_d = std::min(in_d, 2 * in_depth - in_d - 2); + in_h = std::max(in_h, -in_h); + in_h = std::min(in_h, 2 * in_height - in_h - 2); + in_w = std::max(in_w, -in_w); + in_w = std::min(in_w, 2 * in_width - in_w - 2); + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + d_in_data[in_index + c] += d_out_data[out_index + c]; + } +} + +template +void ReplicatePad3DGradNCDHW(T* d_in_data, const T* d_out_data, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, + const int out_h, const int out_w) { + int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0)); + int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0)); + int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0)); + + d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] += + d_out_data[out_d * out_height * out_width + out_h * out_width + out_w]; +} + +template +void ReplicatePad3DGradNDHWC(T* d_in_data, const T* d_out_data, + const int channels, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const int out_d, const int out_h, + const int out_w) { + int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0)); + int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0)); + int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0)); + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + d_in_data[in_index + c] += d_out_data[out_index + c]; + } +} + +template +void CircularPad3DGradNCDHW(T* d_in_data, const T* d_out_data, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const int out_d, + const int out_h, const int out_w) { + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + d_in_data[in_d * in_height * in_width + in_h * in_width + in_w] += + d_out_data[out_d * out_height * out_width + out_h * out_width + out_w]; +} + +template +void CircularPad3DGradNDHWC(T* d_in_data, const T* d_out_data, + const int channels, const int in_depth, + const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const int out_d, const int out_h, const int out_w) { + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + const int out_index = + (out_d * out_height * out_width + out_h * out_width + out_w) * channels; + const int in_index = + (in_d * in_height * in_width + in_h * in_width + in_w) * channels; + for (int c = 0; c < channels; ++c) { + d_in_data[in_index + c] += d_out_data[out_index + c]; + } +} + +template +void Pad3DGradNCDHW(T* d_in_data, const int num, const int channels, + const int in_depth, const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, const int pad_top, + const int pad_left, const T* d_out_data, + void (*pad_func)(T*, const T*, const int, const int, + const int, const int, const int, const int, + const int, const int, const int, const int, + const int, const int)) { + for (int n = 0; n < num; ++n) { + for (int c = 0; c < channels; ++c) { + for (int out_d = 0; out_d < out_depth; ++out_d) { + for (int out_h = 0; out_h < out_height; ++out_h) { + for (int out_w = 0; out_w < out_width; ++out_w) { + pad_func(d_in_data, d_out_data, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, + pad_left, out_d, out_h, out_w); + } + } + } + d_in_data += in_depth * in_height * in_width; + d_out_data += out_depth * out_height * out_width; + } + } +} + +template +void Pad3DGradNDHWC(T* d_in_data, const int num, const int channels, + const int in_depth, const int in_height, const int in_width, + const int out_depth, const int out_height, + const int out_width, const int pad_front, const int pad_top, + const int pad_left, const T* d_out_data, + void (*pad_func)(T*, const T*, const int, const int, + const int, const int, const int, const int, + const int, const int, const int, const int, + const int, const int, const int)) { + for (int n = 0; n < num; ++n) { + for (int out_d = 0; out_d < out_depth; ++out_d) { + for (int out_h = 0; out_h < out_height; ++out_h) { + for (int out_w = 0; out_w < out_width; ++out_w) { + pad_func(d_in_data, d_out_data, channels, in_depth, in_height, + in_width, out_depth, out_height, out_width, pad_front, + pad_top, pad_left, out_d, out_h, out_w); + } + } + } + d_in_data += in_depth * in_height * in_width * channels; + d_out_data += out_depth * out_height * out_width * channels; + } +} + +static inline std::vector GetPaddings( + const framework::ExecutionContext& context) { + std::vector paddings(6); + auto* paddings_t = context.Input("Paddings"); + if (paddings_t) { + auto paddings_data = paddings_t->data(); + std::memcpy(paddings.data(), paddings_data, paddings.size() * sizeof(int)); + } else { + auto pads = context.Attr>("paddings"); + std::copy(pads.begin(), pads.end(), paddings.data()); + } + return paddings; +} + +template +class Pad3dCPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + std::vector pads = GetPaddings(context); + auto mode = context.Attr("mode"); + auto data_format = context.Attr("data_format"); + T value = static_cast(context.Attr("value")); + + auto* x = context.Input("X"); + auto in_dims = x->dims(); + const T* in_data = x->data(); + + auto* out = context.Output("Out"); + if (data_format == "NCDHW") { + out->Resize({in_dims[0], in_dims[1], in_dims[2] + pads[4] + pads[5], + in_dims[3] + pads[2] + pads[3], + in_dims[4] + pads[0] + pads[1]}); + } else { + out->Resize({in_dims[0], in_dims[1] + pads[4] + pads[5], + in_dims[2] + pads[2] + pads[3], + in_dims[3] + pads[0] + pads[1], in_dims[4]}); + } + auto out_dims = out->dims(); + T* out_data = out->mutable_data(context.GetPlace()); + + int channels = in_dims[1]; + int in_depth = in_dims[2]; + int in_height = in_dims[3]; + int in_width = in_dims[4]; + int out_depth = out_dims[2]; + int out_height = out_dims[3]; + int out_width = out_dims[4]; + if (data_format == "NDHWC") { + channels = in_dims[4]; + in_depth = in_dims[1]; + in_height = in_dims[2]; + in_width = in_dims[3]; + out_depth = out_dims[1]; + out_height = out_dims[2]; + out_width = out_dims[3]; + } + + if (mode == "reflect") { + PADDLE_ENFORCE_GT(in_depth, pads[4], + platform::errors::InvalidArgument( + "The depth of Input(X)'s dimension should be " + "greater than pad_front" + " in reflect mode" + ", but received depth(%d) and pad_front(%d).", + in_depth, pads[4])); + PADDLE_ENFORCE_GT(in_depth, pads[5], + platform::errors::InvalidArgument( + "The depth of Input(X)'s dimension should be " + "greater than pad_back" + " in reflect mode" + ", but received depth(%d) and pad_back(%d).", + in_depth, pads[5])); + + PADDLE_ENFORCE_GT(in_height, pads[2], + platform::errors::InvalidArgument( + "The height of Input(X)'s dimension should be " + "greater than pad_top" + " in reflect mode" + ", but received depth(%d) and pad_top(%d).", + in_height, pads[2])); + PADDLE_ENFORCE_GT(in_height, pads[3], + platform::errors::InvalidArgument( + "The height of Input(X)'s dimension should be " + "greater than pad_bottom" + " in reflect mode" + ", but received depth(%d) and pad_bottom(%d).", + in_height, pads[3])); + + PADDLE_ENFORCE_GT(in_width, pads[0], + platform::errors::InvalidArgument( + "The width of Input(X)'s dimension should be " + "greater than pad_left" + " in reflect mode" + ", but received depth(%d) and pad_left(%d).", + in_width, pads[0])); + PADDLE_ENFORCE_GT(in_width, pads[1], + platform::errors::InvalidArgument( + "The width of Input(X)'s dimension should be " + "greater than pad_right" + " in reflect mode" + ", but received depth(%d) and pad_right(%d).", + in_width, pads[1])); + } + + const int pad_left = pads[0]; + const int pad_top = pads[2]; + const int pad_front = pads[4]; + const int num = in_dims[0]; + if (data_format == "NCDHW") { + std::map + func_map; + + func_map["reflect"] = ReflectPad3DFuncNCDHW; + func_map["replicate"] = ReplicatePad3DFuncNCDHW; + func_map["circular"] = CircularPad3DFuncNCDHW; + func_map["constant"] = ConstPad3DFuncNCDHW; + Pad3DNCDHW(in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + value, out_data, func_map[mode]); + } else { + std::map + func_map; + + func_map["reflect"] = ReflectPad3DFuncNDHWC; + func_map["replicate"] = ReplicatePad3DFuncNDHWC; + func_map["circular"] = CircularPad3DFuncNDHWC; + func_map["constant"] = ConstPad3DFuncNDHWC; + Pad3DNDHWC(in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + value, out_data, func_map[mode]); + } + } +}; + +template +class Pad3dGradCPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + std::vector pads = GetPaddings(context); + auto mode = context.Attr("mode"); + auto data_format = context.Attr("data_format"); + auto* d_out = context.Input(framework::GradVarName("Out")); + auto* d_in = context.Output(framework::GradVarName("X")); + auto d_in_dims = d_in->dims(); + auto d_out_dims = d_out->dims(); + const T* d_out_data = d_out->data(); + T* d_in_data = d_in->mutable_data(context.GetPlace()); + math::SetConstant set_zero; + set_zero(context.template device_context(), + d_in, static_cast(0)); + const int pad_left = pads[0]; + const int pad_top = pads[2]; + const int pad_front = pads[4]; + const int num = d_in_dims[0]; + if (data_format == "NCDHW") { + const int channels = d_in_dims[1]; + const int in_depth = d_in_dims[2]; + const int in_height = d_in_dims[3]; + const int in_width = d_in_dims[4]; + const int out_depth = d_out_dims[2]; + const int out_height = d_out_dims[3]; + const int out_width = d_out_dims[4]; + + std::map + func_map; + + func_map["reflect"] = ReflectPad3DGradNCDHW; + func_map["replicate"] = ReplicatePad3DGradNCDHW; + func_map["circular"] = CircularPad3DGradNCDHW; + func_map["constant"] = ConstPad3DGradNCDHW; + + Pad3DGradNCDHW(d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, + pad_left, d_out_data, func_map[mode]); + } else { + const int channels = d_in_dims[4]; + const int in_depth = d_in_dims[1]; + const int in_height = d_in_dims[2]; + const int in_width = d_in_dims[3]; + const int out_depth = d_out_dims[1]; + const int out_height = d_out_dims[2]; + const int out_width = d_out_dims[3]; + + std::map + func_map; + + func_map["reflect"] = ReflectPad3DGradNDHWC; + func_map["replicate"] = ReplicatePad3DGradNDHWC; + func_map["circular"] = CircularPad3DGradNDHWC; + func_map["constant"] = ConstPad3DGradNDHWC; + + Pad3DGradNDHWC(d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, + pad_left, d_out_data, func_map[mode]); + } + } +}; + +class Pad3dOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Pad3d"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Pad3d"); + + auto x_dim = ctx->GetInputDim("X"); + PADDLE_ENFORCE_EQ(x_dim.size(), 5, + platform::errors::InvalidArgument( + "The size of Input(X)'s dimension should be equal to " + "5, but received %d. ", + x_dim.size())); + + std::vector out_dims(x_dim.size()); + auto data_format = ctx->Attrs().Get("data_format"); + out_dims[0] = x_dim[0]; + if (ctx->HasInput("Paddings")) { + auto paddings_dim = ctx->GetInputDim("Paddings"); + PADDLE_ENFORCE_EQ(paddings_dim.size(), 1, + platform::errors::InvalidArgument( + "Size of Input(Paddings)'s dimension should be " + "equal to 1, but received %d.", + paddings_dim.size())); + if (ctx->IsRuntime()) { + PADDLE_ENFORCE_EQ(paddings_dim[0], 6, + platform::errors::InvalidArgument( + "Shape of Input(Paddings) should be equal to " + "[6], but received [%d].", + paddings_dim[0])); + } + out_dims[1] = x_dim[1]; + out_dims[2] = x_dim[2]; + out_dims[3] = x_dim[3]; + } else { + auto paddings = ctx->Attrs().Get>("paddings"); + PADDLE_ENFORCE_EQ( + paddings.size(), 6, + platform::errors::InvalidArgument( + "Size of paddings should be equal to 4, but received %d.", + static_cast(paddings.size()))); + if (data_format == "NCDHW") { + out_dims[1] = x_dim[1]; // channel + out_dims[2] = ((!ctx->IsRuntime()) && (x_dim[2] < 0)) + ? x_dim[2] + : (x_dim[2] + paddings[4] + paddings[5]); // depth + + out_dims[3] = ((!ctx->IsRuntime()) && (x_dim[3] < 0)) + ? x_dim[3] + : (x_dim[3] + paddings[2] + paddings[3]); // height + + out_dims[4] = ((!ctx->IsRuntime()) && (x_dim[4] < 0)) + ? x_dim[4] + : (x_dim[4] + paddings[0] + paddings[1]); // width + } else { // NDHWC + out_dims[4] = x_dim[4]; // channel + + out_dims[1] = ((!ctx->IsRuntime()) && (x_dim[1] < 0)) + ? x_dim[1] + : (x_dim[1] + paddings[4] + paddings[5]); // depth + out_dims[2] = ((!ctx->IsRuntime()) && (x_dim[2] < 0)) + ? x_dim[2] + : (x_dim[2] + paddings[2] + paddings[3]); // height + out_dims[3] = ((!ctx->IsRuntime()) && (x_dim[3] < 0)) + ? x_dim[3] + : (x_dim[3] + paddings[0] + paddings[1]); // width + } + } + + ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); + ctx->ShareLoD("X", /*->*/ "Out"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace()); + } +}; + +class Pad3dOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "The input of pad3d op. " + "The input should be a 5-D tensor with formate NCDHW or NDHWC."); + AddOutput("Out", + "The output of pad3d op. " + "A tensor with the same shape as X."); + AddInput("Paddings", + "A 1-D tensor to describe the padding rules." + "paddings=[0, 1, 2, 3, 4, 5] means " + "padding 0 column to left, 1 column to right, " + "2 row to top, 3 row to bottom, 4 depth to front " + "and 5 depth to back. Size of paddings must be 6.") + .AsDispensable(); + AddAttr>( + "paddings", + "(vector) " + "A list to describe the padding rules." + "paddings=[0, 1, 2, 3, 4, 5] means " + "padding 0 column to left, 1 column to right, " + "2 row to top, 3 row to bottom, 4 depth to front " + "and 5 depth to back. Size of paddings must be 6."); + AddAttr("value", + "(float, default 0.0) " + "The value to fill the padded areas in constant mode.") + .SetDefault(0.0f); + AddAttr( + "mode", + "(string, default constant) " + "Four modes: constant(default), reflect, replicate, circular.") + .SetDefault("constant"); + AddAttr( + "data_format", + "(string, default NCDHW) Only used in " + "An optional string from: \"NDHWC\", \"NCDHW\". " + "Defaults to \"NDHWC\". Specify the data format of the input data.") + .SetDefault("NCDHW"); + AddComment(R"DOC( +Pad3d Operator. +Pad 3-d images according to 'paddings' and 'mode'. +If mode is 'reflect', paddings[0] and paddings[1] must be no greater +than width-1. The height and depth dimension have the same condition. + +Given that X is a channel of image from input: + +X = [[[[[1, 2, 3], + [4, 5, 6]]]]] + +Case 0: + +paddings = [2, 2, 1, 1, 0, 0], +mode = 'constant' +pad_value = 0 + +Out = [[[[[0. 0. 0. 0. 0. 0. 0.] + [0. 0. 1. 2. 3. 0. 0.] + [0. 0. 4. 5. 6. 0. 0.] + [0. 0. 0. 0. 0. 0. 0.]]]]] + +Case 1: + +paddings = [2, 2, 1, 1, 0, 0], +mode = 'reflect' + +Out = [[[[[6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.] + [6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.]]]]] + +Case 2: + +paddings = [2, 2, 1, 1, 0, 0], +mode = 'replicate' + +Out = [[[[[1. 1. 1. 2. 3. 3. 3.] + [1. 1. 1. 2. 3. 3. 3.] + [4. 4. 4. 5. 6. 6. 6.] + [4. 4. 4. 5. 6. 6. 6.]]]]] + +Case 3: + +paddings = [2, 2, 1, 1, 0, 0], +mode = 'circular' + +Out = [[[[[5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.] + [5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.]]]]] + +)DOC"); + } +}; + +class Pad3dOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Pad3d@Grad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "Pad3d@Grad"); + + auto x_dims = ctx->GetInputDim("X"); + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType( + ctx, framework::GradVarName("Out")), + ctx.GetPlace()); + } +}; + +template +class Pad3dOpGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + void Apply(GradOpPtr bind) const override { + bind->SetInput("X", this->Input("X")); + if (this->HasInput("Paddings")) { + bind->SetInput("Paddings", this->Input("Paddings")); + } + bind->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + bind->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + bind->SetAttrMap(this->Attrs()); + bind->SetType("pad3d_grad"); + } +}; + +DECLARE_NO_NEED_BUFFER_VARS_INFERER(Pad3dOpGradNoNeedBufferVarsInferer, "X"); + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(pad3d, ops::Pad3dOp, ops::Pad3dOpMaker, + ops::Pad3dOpGradMaker, + ops::Pad3dOpGradMaker); +REGISTER_OPERATOR(pad3d_grad, ops::Pad3dOpGrad, + ops::Pad3dOpGradNoNeedBufferVarsInferer); +REGISTER_OP_CPU_KERNEL(pad3d, ops::Pad3dCPUKernel, + ops::Pad3dCPUKernel, ops::Pad3dCPUKernel, + ops::Pad3dCPUKernel); +REGISTER_OP_CPU_KERNEL(pad3d_grad, ops::Pad3dGradCPUKernel, + ops::Pad3dGradCPUKernel); diff --git a/paddle/fluid/operators/pad3d_op.cu b/paddle/fluid/operators/pad3d_op.cu new file mode 100644 index 0000000000000..672a75389ccf1 --- /dev/null +++ b/paddle/fluid/operators/pad3d_op.cu @@ -0,0 +1,788 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/gpu_info.h" + +namespace paddle { +namespace operators { + +using platform::PADDLE_CUDA_NUM_THREADS; + +using framework::Tensor; + +template +__global__ void Pad3DConstNCDHW(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T value, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int nc = index / out_width; + + const int out_w = index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + out_data[index] = + (in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth || + in_h >= in_height || in_w >= in_width) + ? value + : in_data[nc * in_depth * in_height * in_width + + in_d * in_height * in_width + in_h * in_width + in_w]; + } +} + +template +__global__ void Pad3DConstNDHWC(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T value, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int n = index / channels; + const int c = index % channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + const int in_d = out_d - pad_front; + const int in_h = out_h - pad_top; + const int in_w = out_w - pad_left; + + out_data[index] = + (in_d < 0 || in_h < 0 || in_w < 0 || in_d >= in_depth || + in_h >= in_height || in_w >= in_width) + ? value + : in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c]; + } +} + +template +__global__ void Pad3DReflectNCDHW(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int nc = index / out_width; + + const int out_w = index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = max(in_d, -in_d); // reflect by 0 + in_d = min(in_d, 2 * in_depth - in_d - 2); // reflect by in_depth + in_h = max(in_h, -in_h); // reflect by 0 + in_h = min(in_h, 2 * in_height - in_h - 2); // reflect by in_height + in_w = max(in_w, -in_w); // reflect by 0 + in_w = min(in_w, 2 * in_width - in_w - 2); // reflect by in_width + out_data[index] = + in_data[(nc * in_depth * in_height + in_d * in_height + in_h) * + in_width + + in_w]; + } +} + +template +__global__ void Pad3DReflectNDHWC(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int n = index / channels; + const int c = index % channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = max(in_d, -in_d); + in_d = min(in_d, 2 * in_depth - in_d - 2); + in_h = max(in_h, -in_h); + in_h = min(in_h, 2 * in_height - in_h - 2); + in_w = max(in_w, -in_w); + in_w = min(in_w, 2 * in_width - in_w - 2); + + out_data[index] = in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c]; + } +} + +template +__global__ void Pad3DReplicateNCDHW(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int nc = index / out_width; + + const int out_w = index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + int in_d = min(in_depth - 1, max(out_d - pad_front, 0)); + int in_h = min(in_height - 1, max(out_h - pad_top, 0)); + int in_w = min(in_width - 1, max(out_w - pad_left, 0)); + + out_data[index] = + in_data[(nc * in_depth * in_height + in_d * in_height + in_h) * + in_width + + in_w]; + } +} + +template +__global__ void Pad3DReplicateNDHWC(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int n = index / channels; + const int c = index % channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + + int in_d = min(in_depth - 1, max(out_d - pad_front, 0)); + int in_h = min(in_height - 1, max(out_h - pad_top, 0)); + int in_w = min(in_width - 1, max(out_w - pad_left, 0)); + + out_data[index] = in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c]; + } +} + +template +__global__ void Pad3DCircularNCDHW(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int nc = index / out_width; + + const int out_w = index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + out_data[index] = + in_data[(nc * in_depth * in_height + in_d * in_height + in_h) * + in_width + + in_w]; + } +} + +template +__global__ void Pad3DCircularNDHWC(const int nthreads, const T* in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, T* out_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int n = index / channels; + const int c = index % channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + out_data[index] = in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c]; + } +} + +template +__global__ void Pad3DGradConstNCDHW(const int in_size, T* d_in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const T* d_out_data) { + CUDA_KERNEL_LOOP(in_index, in_size) { + const int in_w = in_index % in_width; + + int nc = in_index / in_width; + const int in_h = nc % in_height; + + nc /= in_height; + const int in_d = nc % in_depth; + + nc /= in_depth; + + const int out_d = in_d + pad_front; + const int out_h = in_h + pad_top; + const int out_w = in_w + pad_left; + d_in_data[in_index] = + d_out_data[nc * out_depth * out_height * out_width + + out_d * out_height * out_width + out_h * out_width + out_w]; + } +} + +template +__global__ void Pad3DGradConstNDHWC(const int in_size, T* d_in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const T* d_out_data) { + CUDA_KERNEL_LOOP(in_index, in_size) { + const int c = in_index % channels; + int n = in_index / channels; + + const int in_w = n % in_width; + n /= in_width; + + const int in_h = n % in_height; + n /= in_height; + + const int in_d = n % in_depth; + n /= in_depth; + + const int out_d = in_d + pad_front; + const int out_h = in_h + pad_top; + const int out_w = in_w + pad_left; + + d_in_data[in_index] = + d_out_data[n * out_depth * out_height * out_width * channels + + out_d * out_height * out_width * channels + + out_h * out_width * channels + out_w * channels + c]; + } +} + +template +__global__ void Pad3DGradReflectNCDHW(const int out_size, T* d_in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const T* d_out_data) { + CUDA_KERNEL_LOOP(out_index, out_size) { + int nc = out_index / out_width; + const int out_w = out_index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = max(in_d, -in_d); + in_h = max(in_h, -in_h); + in_w = max(in_w, -in_w); + + in_d = min(in_d, 2 * in_depth - in_d - 2); + in_h = min(in_h, 2 * in_height - in_h - 2); + in_w = min(in_w, 2 * in_width - in_w - 2); + + platform::CudaAtomicAdd( + &d_in_data[nc * in_depth * in_height * in_width + + in_d * in_height * in_width + in_h * in_width + in_w], + d_out_data[out_index]); + } +} + +template +__global__ void Pad3DGradReflectNDHWC(const int out_size, T* d_in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, const int out_width, + const int pad_front, const int pad_top, + const int pad_left, const T* d_out_data) { + CUDA_KERNEL_LOOP(out_index, out_size) { + const int c = out_index % channels; + int n = out_index / channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + + int in_d = out_d - pad_front; + int in_h = out_h - pad_top; + int in_w = out_w - pad_left; + + in_d = max(in_d, -in_d); + in_h = max(in_h, -in_h); + in_w = max(in_w, -in_w); + + in_d = min(in_d, in_depth * 2 - in_d - 2); + in_h = min(in_h, in_height * 2 - in_h - 2); + in_w = min(in_w, in_width * 2 - in_w - 2); + platform::CudaAtomicAdd( + &d_in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c], + d_out_data[out_index]); + } +} + +template +__global__ void Pad3DGradReplicateNCDHW( + const int out_size, T* d_in_data, const int num, const int channels, + const int in_depth, const int in_height, const int in_width, + const int out_depth, const int out_height, const int out_width, + const int pad_front, const int pad_top, const int pad_left, + const T* d_out_data) { + CUDA_KERNEL_LOOP(out_index, out_size) { + int nc = out_index / out_width; + const int out_w = out_index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + const int in_d = min(in_depth - 1, max(out_d - pad_front, 0)); + const int in_h = min(in_height - 1, max(out_h - pad_top, 0)); + const int in_w = min(in_width - 1, max(out_w - pad_left, 0)); + + platform::CudaAtomicAdd( + &d_in_data[nc * in_depth * in_height * in_width + + in_d * in_height * in_width + in_h * in_width + in_w], + d_out_data[out_index]); + } +} + +template +__global__ void Pad3DGradReplicateNDHWC( + const int out_size, T* d_in_data, const int num, const int channels, + const int in_depth, const int in_height, const int in_width, + const int out_depth, const int out_height, const int out_width, + const int pad_front, const int pad_top, const int pad_left, + const T* d_out_data) { + CUDA_KERNEL_LOOP(out_index, out_size) { + const int c = out_index % channels; + int n = out_index / channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + + const int in_d = min(in_depth - 1, max(out_d - pad_front, 0)); + const int in_h = min(in_height - 1, max(out_h - pad_top, 0)); + const int in_w = min(in_width - 1, max(out_w - pad_left, 0)); + + platform::CudaAtomicAdd( + &d_in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c], + d_out_data[out_index]); + } +} + +template +__global__ void Pad3DGradCircularNCDHW(const int out_size, T* d_in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const T* d_out_data) { + CUDA_KERNEL_LOOP(out_index, out_size) { + int nc = out_index / out_width; + const int out_w = out_index % out_width; + const int out_h = nc % out_height; + nc /= out_height; + const int out_d = nc % out_depth; + nc /= out_depth; + + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + platform::CudaAtomicAdd( + &d_in_data[nc * in_depth * in_height * in_width + + in_d * in_height * in_width + in_h * in_width + in_w], + d_out_data[out_index]); + } +} + +template +__global__ void Pad3DGradCircularNDHWC(const int out_size, T* d_in_data, + const int num, const int channels, + const int in_depth, const int in_height, + const int in_width, const int out_depth, + const int out_height, + const int out_width, const int pad_front, + const int pad_top, const int pad_left, + const T* d_out_data) { + CUDA_KERNEL_LOOP(out_index, out_size) { + const int c = out_index % channels; + int n = out_index / channels; + const int out_w = n % out_width; + n /= out_width; + const int out_h = n % out_height; + n /= out_height; + const int out_d = n % out_depth; + n /= out_depth; + + int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; + int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; + int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; + + platform::CudaAtomicAdd( + &d_in_data[n * in_depth * in_height * in_width * channels + + in_d * in_height * in_width * channels + + in_h * in_width * channels + in_w * channels + c], + d_out_data[out_index]); + } +} + +static inline std::vector GetPaddings( + const framework::ExecutionContext& context) { + std::vector paddings(6); + auto* paddings_data = context.Input("Paddings"); + if (paddings_data) { + Tensor pads; + framework::TensorCopySync(*paddings_data, platform::CPUPlace(), &pads); + auto pads_data = pads.data(); + std::memcpy(paddings.data(), pads_data, paddings.size() * sizeof(int)); + } else { + auto pads = context.Attr>("paddings"); + std::copy(pads.begin(), pads.end(), paddings.data()); + } + return paddings; +} + +template +class Pad3dCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + std::vector pads = GetPaddings(context); + auto mode = context.Attr("mode"); + auto data_format = context.Attr("data_format"); + T value = static_cast(context.Attr("value")); + + auto* x = context.Input("X"); + auto in_dims = x->dims(); + const T* in_data = x->data(); + auto* out = context.Output("Out"); + auto out_dims = out->dims(); + if (data_format == "NCDHW") { + out_dims[0] = in_dims[0]; + out_dims[1] = in_dims[1]; + out_dims[2] = in_dims[2] + pads[4] + pads[5]; + out_dims[3] = in_dims[3] + pads[2] + pads[3]; + out_dims[4] = in_dims[4] + pads[0] + pads[1]; + } else { + out_dims[0] = in_dims[0]; + out_dims[1] = in_dims[1] + pads[4] + pads[5]; + out_dims[2] = in_dims[2] + pads[2] + pads[3]; + out_dims[3] = in_dims[3] + pads[0] + pads[1]; + out_dims[4] = in_dims[4]; + } + T* out_data = out->mutable_data(out_dims, context.GetPlace()); + + int channels = in_dims[1]; + int in_depth = in_dims[2]; + int in_height = in_dims[3]; + int in_width = in_dims[4]; + int out_depth = out_dims[2]; + int out_height = out_dims[3]; + int out_width = out_dims[4]; + if (data_format == "NDHWC") { + channels = in_dims[4]; + in_depth = in_dims[1]; + in_height = in_dims[2]; + in_width = in_dims[3]; + out_depth = out_dims[1]; + out_height = out_dims[2]; + out_width = out_dims[3]; + } + + if (mode == "reflect") { + PADDLE_ENFORCE_GT(in_depth, pads[4], + platform::errors::InvalidArgument( + "The depth of Input(X)'s dimension should be " + "greater than pad_front" + " in reflect mode" + ", but received depth(%d) and pad_front(%d).", + in_depth, pads[4])); + PADDLE_ENFORCE_GT(in_depth, pads[5], + platform::errors::InvalidArgument( + "The depth of Input(X)'s dimension should be " + "greater than pad_back" + " in reflect mode" + ", but received depth(%d) and pad_back(%d).", + in_depth, pads[5])); + + PADDLE_ENFORCE_GT(in_height, pads[2], + platform::errors::InvalidArgument( + "The height of Input(X)'s dimension should be " + "greater than pad_top" + " in reflect mode" + ", but received depth(%d) and pad_top(%d).", + in_height, pads[2])); + PADDLE_ENFORCE_GT(in_height, pads[3], + platform::errors::InvalidArgument( + "The height of Input(X)'s dimension should be " + "greater than pad_bottom" + " in reflect mode" + ", but received depth(%d) and pad_bottom(%d).", + in_height, pads[3])); + + PADDLE_ENFORCE_GT(in_width, pads[0], + platform::errors::InvalidArgument( + "The width of Input(X)'s dimension should be " + "greater than pad_left" + " in reflect mode" + ", but received depth(%d) and pad_left(%d).", + in_width, pads[0])); + PADDLE_ENFORCE_GT(in_width, pads[1], + platform::errors::InvalidArgument( + "The width of Input(X)'s dimension should be " + "greater than pad_right" + " in reflect mode" + ", but received depth(%d) and pad_right(%d).", + in_width, pads[1])); + } + + const int pad_left = pads[0]; + const int pad_top = pads[2]; + const int pad_front = pads[4]; + const int num = in_dims[0]; + + auto stream = context.cuda_device_context().stream(); + int block = PADDLE_CUDA_NUM_THREADS; + const int out_size = out->numel(); + int grid = (out_size + block - 1) / block; + + if (data_format == "NCDHW") { + if (mode == "reflect") { + Pad3DReflectNCDHW<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + out_data); + } else if (mode == "replicate") { + Pad3DReplicateNCDHW<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + out_data); + } else if (mode == "circular") { + Pad3DCircularNCDHW<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + out_data); + } else { + Pad3DConstNCDHW<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + value, out_data); + } + } else { + if (mode == "reflect") { + Pad3DReflectNDHWC<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + out_data); + } else if (mode == "replicate") { + Pad3DReplicateNDHWC<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + out_data); + } else if (mode == "circular") { + Pad3DCircularNDHWC<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + out_data); + } else { + Pad3DConstNDHWC<<>>( + out_size, in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + value, out_data); + } + } + } +}; + +template +class Pad3dGradCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + std::vector pads = GetPaddings(context); + auto mode = context.Attr("mode"); + auto data_format = context.Attr("data_format"); + auto* d_out = context.Input(framework::GradVarName("Out")); + auto* d_in = context.Output(framework::GradVarName("X")); + auto d_in_dims = d_in->dims(); + auto d_out_dims = d_out->dims(); + const T* d_out_data = d_out->data(); + T* d_in_data = d_in->mutable_data(context.GetPlace()); + + math::SetConstant set_zero; + set_zero(context.template device_context(), + d_in, static_cast(0)); + + const int pad_left = pads[0]; + const int pad_top = pads[2]; + const int pad_front = pads[4]; + + const int num = d_in_dims[0]; + + auto stream = context.cuda_device_context().stream(); + int block = PADDLE_CUDA_NUM_THREADS; + const int out_size = d_out->numel(); + const int in_size = d_in->numel(); + int grid = (out_size + block - 1) / block; + + if (data_format == "NCDHW") { + const int channels = d_in_dims[1]; + const int in_depth = d_in_dims[2]; + const int in_height = d_in_dims[3]; + const int in_width = d_in_dims[4]; + const int out_depth = d_out_dims[2]; + const int out_height = d_out_dims[3]; + const int out_width = d_out_dims[4]; + + if (mode == "reflect") { + Pad3DGradReflectNCDHW<<>>( + out_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } else if (mode == "replicate") { + Pad3DGradReplicateNCDHW<<>>( + out_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } else if (mode == "circular") { + Pad3DGradCircularNCDHW<<>>( + out_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } else { + grid = (in_size + block - 1) / block; + Pad3DGradConstNCDHW<<>>( + in_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } + } else { + const int channels = d_in_dims[4]; + const int in_depth = d_in_dims[1]; + const int in_height = d_in_dims[2]; + const int in_width = d_in_dims[3]; + const int out_depth = d_out_dims[1]; + const int out_height = d_out_dims[2]; + const int out_width = d_out_dims[3]; + if (mode == "reflect") { + Pad3DGradReflectNDHWC<<>>( + out_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } else if (mode == "replicate") { + Pad3DGradReplicateNDHWC<<>>( + out_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } else if (mode == "circular") { + Pad3DGradCircularNDHWC<<>>( + out_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } else { + grid = (in_size + block - 1) / block; + Pad3DGradConstNDHWC<<>>( + in_size, d_in_data, num, channels, in_depth, in_height, in_width, + out_depth, out_height, out_width, pad_front, pad_top, pad_left, + d_out_data); + } + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_CUDA_KERNEL(pad3d, ops::Pad3dCUDAKernel, + ops::Pad3dCUDAKernel, + ops::Pad3dCUDAKernel, ops::Pad3dCUDAKernel, + ops::Pad3dCUDAKernel); +REGISTER_OP_CUDA_KERNEL(pad3d_grad, ops::Pad3dGradCUDAKernel, + ops::Pad3dGradCUDAKernel, + ops::Pad3dGradCUDAKernel); diff --git a/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py new file mode 100644 index 0000000000000..a0c26b512955e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_cosine_similarity_api.py @@ -0,0 +1,121 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.fluid.core as core + +from paddle.fluid import Program, program_guard, Executor, default_main_program + + +class TestCosineSimilarityAPI(unittest.TestCase): + def setUp(self): + self.places = [paddle.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + + def _get_numpy_out(self, x1, x2, dim=1, eps=1e-8): + w12 = np.sum(x1 * x2, axis=dim) + w1 = np.sum(x1 * x1, axis=dim) + w2 = np.sum(x2 * x2, axis=dim) + n12 = np.sqrt(np.clip(w1 * w2, eps * eps, None)) + cos_sim = w12 / n12 + return cos_sim + + def check_static_result(self, place): + paddle.enable_static() + + with program_guard(Program(), Program()): + shape = [10, 15] + dim = 1 + eps = 1e-8 + np.random.seed(0) + np_x1 = np.random.rand(*shape).astype(np.float32) + np_x2 = np.random.rand(*shape).astype(np.float32) + + x1 = paddle.data(name="x1", shape=shape) + x2 = paddle.data(name="x2", shape=shape) + result = F.cosine_similarity(x1, x2, dim=dim, eps=eps) + exe = Executor(place) + fetches = exe.run(default_main_program(), + feed={"x1": np_x1, + "x2": np_x2}, + fetch_list=[result]) + + np_out = self._get_numpy_out(np_x1, np_x2, dim=dim, eps=eps) + self.assertTrue(np.allclose(fetches[0], np_out)) + + def test_static(self): + for place in self.places: + self.check_static_result(place=place) + + def test_dygraph_1(self): + paddle.disable_static() + + shape = [10, 15] + dim = 1 + eps = 1e-8 + np.random.seed(1) + np_x1 = np.random.rand(*shape).astype(np.float32) + np_x2 = np.random.rand(*shape).astype(np.float32) + np_out = self._get_numpy_out(np_x1, np_x2, dim=dim, eps=eps) + + tesnor_x1 = paddle.to_variable(np_x1) + tesnor_x2 = paddle.to_variable(np_x2) + y = F.cosine_similarity(tesnor_x1, tesnor_x2, dim=dim, eps=eps) + + self.assertTrue(np.allclose(y.numpy(), np_out)) + + def test_dygraph_2(self): + paddle.disable_static() + + shape = [12, 13] + dim = 0 + eps = 1e-6 + np.random.seed(1) + np_x1 = np.random.rand(*shape).astype(np.float32) + np_x2 = np.random.rand(*shape).astype(np.float32) + np_out = self._get_numpy_out(np_x1, np_x2, dim=dim, eps=eps) + + tesnor_x1 = paddle.to_variable(np_x1) + tesnor_x2 = paddle.to_variable(np_x2) + y = F.cosine_similarity(tesnor_x1, tesnor_x2, dim=dim, eps=eps) + + self.assertTrue(np.allclose(y.numpy(), np_out)) + + def test_dygraph_3(self): + paddle.disable_static() + + shape1 = [10, 12, 10] + shape2 = [10, 1, 10] + dim = 2 + eps = 1e-6 + np.random.seed(1) + np_x1 = np.random.rand(*shape1).astype(np.float32) + np_x2 = np.random.rand(*shape2).astype(np.float32) + np_out = self._get_numpy_out(np_x1, np_x2, dim=dim, eps=eps) + + tesnor_x1 = paddle.to_variable(np_x1) + tesnor_x2 = paddle.to_variable(np_x2) + y = F.cosine_similarity(tesnor_x1, tesnor_x2, dim=dim, eps=eps) + + self.assertTrue(np.allclose(y.numpy(), np_out)) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pad3d_op.py b/python/paddle/fluid/tests/unittests/test_pad3d_op.py new file mode 100644 index 0000000000000..68589e6d8182f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_pad3d_op.py @@ -0,0 +1,670 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest +import paddle +import paddle.nn as nn +import paddle.nn.functional as F +import paddle.fluid.core as core + +from paddle.fluid import Program, program_guard, Executor, default_main_program + + +class TestPad3dOp(OpTest): + def setUp(self): + paddle.enable_static() + self.value = 0.0 + self.variable_paddings = False + self.initTestCase() + self.op_type = "pad3d" + self.inputs = {'X': np.random.random(self.shape).astype("float64")} + self.attrs = {} + if self.variable_paddings: + self.attrs['paddings'] = [] + self.inputs['Paddings'] = np.array(self.paddings).flatten().astype( + "int32") + else: + self.attrs['paddings'] = np.array(self.paddings).flatten().astype( + "int32") + self.attrs['value'] = self.value + self.attrs['mode'] = self.mode + self.attrs['data_format'] = self.data_format + if self.data_format == "NCDHW": + paddings = [ + (0, 0), + (0, 0), + (self.paddings[4], self.paddings[5]), + (self.paddings[2], self.paddings[3]), + (self.paddings[0], self.paddings[1]), + ] + else: + paddings = [ + (0, 0), + (self.paddings[4], self.paddings[5]), + (self.paddings[2], self.paddings[3]), + (self.paddings[0], self.paddings[1]), + (0, 0), + ] + if self.mode == "constant": + out = np.pad(self.inputs['X'], + paddings, + mode=self.mode, + constant_values=self.value) + elif self.mode == "reflect": + out = np.pad(self.inputs['X'], paddings, mode=self.mode) + elif self.mode == "replicate": + out = np.pad(self.inputs['X'], paddings, mode="edge") + elif self.mode == "circular": + out = np.pad(self.inputs['X'], paddings, mode="wrap") + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X'], 'Out') + + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.paddings = [0, 0, 0, 0, 0, 0] + self.mode = "constant" + self.data_format = "NCDHW" + self.pad_value = 0.0 + + +class TestCase1(TestPad3dOp): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.paddings = [0, 1, 2, 3, 4, 5] + self.mode = "constant" + self.data_format = "NCDHW" + self.value = 1.0 + + +class TestCase2(TestPad3dOp): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.paddings = [1, 1, 1, 1, 1, 1] + self.mode = "constant" + self.data_format = "NDHWC" + self.value = 1.0 + + +class TestCase3(TestPad3dOp): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.paddings = [0, 1, 1, 0, 2, 3] + self.mode = "reflect" + self.data_format = "NCDHW" + + +class TestCase4(TestPad3dOp): + def initTestCase(self): + self.shape = (4, 4, 4, 4, 4) + self.paddings = [0, 1, 2, 1, 2, 3] + self.mode = "reflect" + self.data_format = "NDHWC" + + +class TestCase5(TestPad3dOp): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.paddings = [0, 1, 2, 3, 2, 1] + self.mode = "replicate" + self.data_format = "NCDHW" + + +class TestCase6(TestPad3dOp): + def initTestCase(self): + self.shape = (4, 4, 4, 4, 4) + self.paddings = [5, 4, 2, 1, 2, 3] + self.mode = "replicate" + self.data_format = "NDHWC" + + +class TestCase7(TestPad3dOp): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.paddings = [0, 1, 2, 3, 2, 1] + self.mode = "circular" + self.data_format = "NCDHW" + + +class TestCase8(TestPad3dOp): + def initTestCase(self): + self.shape = (4, 4, 4, 4, 4) + self.paddings = [0, 1, 2, 1, 2, 3] + self.mode = "circular" + self.data_format = "NDHWC" + + +class TestPadAPI(unittest.TestCase): + def setUp(self): + self.places = [paddle.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + + def check_static_result_1(self, place): + paddle.enable_static() + with program_guard(Program(), Program()): + input_shape = (1, 2, 3, 4, 5) + pad = [1, 2, 1, 1, 3, 4] + mode = "constant" + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + result = F.pad(x=x, pad=pad, value=value, mode=mode) + exe = Executor(place) + fetches = exe.run(default_main_program(), + feed={"x": input_data}, + fetch_list=[result]) + + np_out = self._get_numpy_out(input_data, pad, mode, value) + self.assertTrue(np.allclose(fetches[0], np_out)) + + def check_static_result_2(self, place): + paddle.enable_static() + with program_guard(Program(), Program()): + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 1, 2] + mode = "reflect" + input_data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") + result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") + exe = Executor(place) + fetches = exe.run(default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2]) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, data_format="NCDHW") + np_out2 = self._get_numpy_out( + input_data, pad, mode, data_format="NDHWC") + self.assertTrue(np.allclose(fetches[0], np_out1)) + self.assertTrue(np.allclose(fetches[1], np_out2)) + + def check_static_result_3(self, place): + paddle.enable_static() + with program_guard(Program(), Program()): + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 3, 4] + mode = "replicate" + input_data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") + result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") + exe = Executor(place) + fetches = exe.run(default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2]) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, data_format="NCDHW") + np_out2 = self._get_numpy_out( + input_data, pad, mode, data_format="NDHWC") + self.assertTrue(np.allclose(fetches[0], np_out1)) + self.assertTrue(np.allclose(fetches[1], np_out2)) + + def check_static_result_4(self, place): + paddle.enable_static() + with program_guard(Program(), Program()): + input_shape = (2, 3, 4, 5, 6) + pad = [1, 2, 1, 1, 3, 4] + mode = "circular" + input_data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + result1 = F.pad(x=x, pad=pad, mode=mode, data_format="NCDHW") + result2 = F.pad(x=x, pad=pad, mode=mode, data_format="NDHWC") + exe = Executor(place) + fetches = exe.run(default_main_program(), + feed={"x": input_data}, + fetch_list=[result1, result2]) + + np_out1 = self._get_numpy_out( + input_data, pad, mode, data_format="NCDHW") + np_out2 = self._get_numpy_out( + input_data, pad, mode, data_format="NDHWC") + self.assertTrue(np.allclose(fetches[0], np_out1)) + self.assertTrue(np.allclose(fetches[1], np_out2)) + + def _get_numpy_out(self, + input_data, + pad, + mode, + value=0, + data_format="NCDHW"): + if data_format == "NCDHW": + pad = [ + (0, 0), + (0, 0), + (pad[4], pad[5]), + (pad[2], pad[3]), + (pad[0], pad[1]), + ] + elif data_format == "NDHWC": + pad = [ + (0, 0), + (pad[4], pad[5]), + (pad[2], pad[3]), + (pad[0], pad[1]), + (0, 0), + ] + elif data_format == "NCHW": + pad = [ + (0, 0), + (0, 0), + (pad[2], pad[3]), + (pad[0], pad[1]), + ] + elif data_format == "NHWC": + pad = [ + (0, 0), + (pad[2], pad[3]), + (pad[0], pad[1]), + (0, 0), + ] + elif data_format == "NCL": + pad = [ + (0, 0), + (0, 0), + (pad[0], pad[1]), + ] + elif data_format == "NLC": + pad = [ + (0, 0), + (pad[0], pad[1]), + (0, 0), + ] + + if mode == "constant": + out = np.pad(input_data, pad, mode=mode, constant_values=value) + elif mode == "reflect": + out = np.pad(input_data, pad, mode=mode) + elif mode == "replicate": + out = np.pad(input_data, pad, mode="edge") + elif mode == "circular": + out = np.pad(input_data, pad, mode="wrap") + + return out + + def test_static(self): + for place in self.places: + self.check_static_result_1(place=place) + self.check_static_result_2(place=place) + self.check_static_result_3(place=place) + self.check_static_result_4(place=place) + + def test_dygraph_1(self): + paddle.disable_static() + + input_shape = (1, 2, 3, 4, 5) + pad = [1, 2, 1, 1, 3, 4] + mode = "constant" + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCDHW") + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NDHWC") + tensor_data = paddle.to_tensor(input_data) + + y1 = F.pad(tensor_data, + pad=pad, + mode=mode, + value=value, + data_format="NCDHW") + y2 = F.pad(tensor_data, + pad=pad, + mode=mode, + value=value, + data_format="NDHWC") + + self.assertTrue(np.allclose(y1.numpy(), np_out1)) + self.assertTrue(np.allclose(y2.numpy(), np_out2)) + + def test_dygraph_2(self): + paddle.disable_static() + + input_shape = (2, 3, 4, 5) + pad = [1, 1, 3, 4] + mode = "constant" + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCHW") + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NHWC") + + tensor_data = paddle.to_tensor(input_data) + tensor_pad = paddle.to_tensor(pad, dtype="int32") + + y1 = F.pad(tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NCHW") + y2 = F.pad(tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NHWC") + + self.assertTrue(np.allclose(y1.numpy(), np_out1)) + self.assertTrue(np.allclose(y2.numpy(), np_out2)) + + def test_dygraph_2(self): + paddle.disable_static() + + input_shape = (2, 3, 4, 5) + pad = [1, 1, 3, 4] + mode = "constant" + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCHW") + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NHWC") + tensor_data = paddle.to_tensor(input_data) + tensor_pad = paddle.to_tensor(pad, dtype="int32") + + y1 = F.pad(tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NCHW") + y2 = F.pad(tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NHWC") + + self.assertTrue(np.allclose(y1.numpy(), np_out1)) + self.assertTrue(np.allclose(y2.numpy(), np_out2)) + + def test_dygraph_3(self): + paddle.disable_static() + + input_shape = (3, 4, 5) + pad = [3, 4] + mode = "constant" + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + np_out1 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NCL") + np_out2 = self._get_numpy_out( + input_data, pad, mode, value, data_format="NLC") + tensor_data = paddle.to_tensor(input_data) + tensor_pad = paddle.to_tensor(pad, dtype="int32") + + y1 = F.pad(tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NCL") + y2 = F.pad(tensor_data, + pad=tensor_pad, + mode=mode, + value=value, + data_format="NLC") + + self.assertTrue(np.allclose(y1.numpy(), np_out1)) + self.assertTrue(np.allclose(y2.numpy(), np_out2)) + + +class TestPad1dAPI(unittest.TestCase): + def _get_numpy_out(self, + input_data, + pad, + mode, + value=0.0, + data_format="NCL"): + if data_format == "NCL": + pad = [ + (0, 0), + (0, 0), + (pad[0], pad[1]), + ] + else: + pad = [ + (0, 0), + (pad[0], pad[1]), + (0, 0), + ] + + if mode == "constant": + out = np.pad(input_data, pad, mode=mode, constant_values=value) + elif mode == "reflect": + out = np.pad(input_data, pad, mode=mode) + elif mode == "replicate": + out = np.pad(input_data, pad, mode="edge") + + return out + + def setUp(self): + self.places = [paddle.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + + def test_class(self): + paddle.disable_static() + for place in self.places: + input_shape = (3, 4, 5) + pad = [1, 2] + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + + pad_reflection = nn.ReflectionPad1d(padding=pad) + pad_replication = nn.ReplicationPad1d(padding=pad) + pad_constant = nn.ConstantPad1d(padding=pad, value=value) + + data = paddle.to_tensor(input_data) + + output = pad_reflection(data) + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCL") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + output = pad_replication(data) + np_out = self._get_numpy_out( + input_data, pad, "replicate", data_format="NCL") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + output = pad_constant(data) + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCL") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + +class TestPad2dAPI(unittest.TestCase): + def _get_numpy_out(self, + input_data, + pad, + mode, + value=0.0, + data_format="NCHW"): + if data_format == "NCHW": + pad = [ + (0, 0), + (0, 0), + (pad[2], pad[3]), + (pad[0], pad[1]), + ] + else: + pad = [ + (0, 0), + (pad[2], pad[3]), + (pad[0], pad[1]), + (0, 0), + ] + + if mode == "constant": + out = np.pad(input_data, pad, mode=mode, constant_values=value) + elif mode == "reflect": + out = np.pad(input_data, pad, mode=mode) + elif mode == "replicate": + out = np.pad(input_data, pad, mode="edge") + + return out + + def setUp(self): + self.places = [paddle.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + + def test_class(self): + paddle.disable_static() + for place in self.places: + input_shape = (3, 4, 5, 6) + pad = [1, 2, 2, 1] + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + + pad_reflection = nn.ReflectionPad2d(padding=pad) + pad_replication = nn.ReplicationPad2d(padding=pad) + pad_constant = nn.ConstantPad2d(padding=pad, value=value) + pad_zero = nn.ZeroPad2d(padding=pad) + + data = paddle.to_tensor(input_data) + + output = pad_reflection(data) + np_out = self._get_numpy_out( + input_data, pad, "reflect", data_format="NCHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + output = pad_replication(data) + np_out = self._get_numpy_out( + input_data, pad, "replicate", data_format="NCHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + output = pad_constant(data) + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + output = pad_zero(data) + np_out = self._get_numpy_out( + input_data, pad, "constant", value=0, data_format="NCHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + +class TestPad3dAPI(unittest.TestCase): + def _get_numpy_out(self, + input_data, + pad, + mode, + value=0.0, + data_format="NCDHW"): + if data_format == "NCDHW": + pad = [ + (0, 0), + (0, 0), + (pad[4], pad[5]), + (pad[2], pad[3]), + (pad[0], pad[1]), + ] + else: + pad = [ + (0, 0), + (pad[4], pad[5]), + (pad[2], pad[3]), + (pad[0], pad[1]), + (0, 0), + ] + + if mode == "constant": + out = np.pad(input_data, pad, mode=mode, constant_values=value) + elif mode == "reflect": + out = np.pad(input_data, pad, mode=mode) + elif mode == "replicate": + out = np.pad(input_data, pad, mode="edge") + + return out + + def setUp(self): + self.places = [paddle.CPUPlace()] + if core.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + + def test_class(self): + paddle.disable_static() + for place in self.places: + input_shape = (3, 4, 5, 6, 7) + pad = [1, 2, 2, 1, 1, 0] + value = 100 + input_data = np.random.rand(*input_shape).astype(np.float32) + + pad_replication = nn.ReplicationPad3d(padding=pad) + pad_constant = nn.ConstantPad3d(padding=pad, value=value) + + data = paddle.to_tensor(input_data) + + output = pad_replication(data) + np_out = self._get_numpy_out( + input_data, pad, "replicate", data_format="NCDHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + output = pad_constant(data) + np_out = self._get_numpy_out( + input_data, pad, "constant", value=value, data_format="NCDHW") + self.assertTrue(np.allclose(output.numpy(), np_out)) + + +class TestPad3dOpError(unittest.TestCase): + def test_errors(self): + def test_variable(): + input_shape = (1, 2, 3, 4, 5) + data = np.random.rand(*input_shape).astype(np.float32) + F.pad(x=data, paddings=[1, 1, 1, 1, 1, 1]) + + def test_reflect_1(): + input_shape = (1, 2, 3, 4, 5) + data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + y = F.pad(x, pad=[5, 6, 1, 1, 1, 1], value=1, mode='reflect') + place = paddle.CPUPlace() + exe = Executor(place) + outputs = exe.run(feed={'x': data}, fetch_list=[y.name]) + + def test_reflect_2(): + input_shape = (1, 2, 3, 4, 5) + data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + y = F.pad(x, pad=[1, 1, 4, 3, 1, 1], value=1, mode='reflect') + place = paddle.CPUPlace() + exe = Executor(place) + outputs = exe.run(feed={'x': data}, fetch_list=[y.name]) + + def test_reflect_3(): + input_shape = (1, 2, 3, 4, 5) + data = np.random.rand(*input_shape).astype(np.float32) + x = paddle.data(name="x", shape=input_shape) + y = F.pad(x, pad=[1, 1, 1, 1, 2, 3], value=1, mode='reflect') + place = paddle.CPUPlace() + exe = Executor(place) + outputs = exe.run(feed={'x': data}, fetch_list=[y.name]) + + self.assertRaises(TypeError, test_variable) + + self.assertRaises(Exception, test_reflect_1) + + self.assertRaises(Exception, test_reflect_2) + + self.assertRaises(Exception, test_reflect_3) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 7b6dcdf7f67de..289bf40dce5a7 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -65,6 +65,16 @@ from .layer.common import BilinearTensorProduct #DEFINE_ALIAS from .layer.common import Pool2D #DEFINE_ALIAS from .layer.common import Pad2D #DEFINE_ALIAS +from .layer.common import ReflectionPad1d #DEFINE_ALIAS +from .layer.common import ReplicationPad1d #DEFINE_ALIAS +from .layer.common import ConstantPad1d #DEFINE_ALIAS +from .layer.common import ReflectionPad2d #DEFINE_ALIAS +from .layer.common import ReplicationPad2d #DEFINE_ALIAS +from .layer.common import ConstantPad2d #DEFINE_ALIAS +from .layer.common import ZeroPad2d #DEFINE_ALIAS +from .layer.common import ReplicationPad3d #DEFINE_ALIAS +from .layer.common import ConstantPad3d #DEFINE_ALIAS +from .layer.common import CosineSimilarity #DEFINE_ALIAS from .layer.common import Embedding #DEFINE_ALIAS from .layer.common import Linear #DEFINE_ALIAS from .layer.common import Flatten #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index bc71b8bdf06d2..e587466e76483 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -58,6 +58,7 @@ from .common import pad #DEFINE_ALIAS from .common import pad_constant_like #DEFINE_ALIAS from .common import pad2d #DEFINE_ALIAS +from .common import cosine_similarity #DEFINE_ALIAS from .common import unfold #DEFINE_ALIAS # from .common import bilinear_tensor_product #DEFINE_ALIAS from .common import assign #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index fe41cb6e64c34..e90db0b67d78f 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -13,17 +13,24 @@ # limitations under the License. import warnings +import paddle.fluid.core as core +from ...fluid.framework import in_dygraph_mode, core from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.layers.tensor import Variable, fill_constant +from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat # TODO: define the common functions to build a neural network from ...fluid.layers import dropout #DEFINE_ALIAS from ...fluid.layers import label_smooth #DEFINE_ALIAS from ...fluid import one_hot #DEFINE_ALIAS -from ...fluid.layers import pad #DEFINE_ALIAS from ...fluid.layers import pad2d #DEFINE_ALIAS from ...fluid.layers import unfold #DEFINE_ALIAS from ...fluid.layers import assign #DEFINE_ALIAS +from ...fluid.layers import squeeze #DEFINE_ALIAS +from ...fluid.layers import unsqueeze #DEFINE_ALIAS +from ...fluid.layers import elementwise_mul #DEFINE_ALIAS +from ...tensor import clamp #DEFINE_ALIAS +from ...tensor import sum #DEFINE_ALIAS +from ...tensor import sqrt #DEFINE_ALIAS #from ...fluid.layers import fc #DEFINE_ALIAS from ...fluid.layers import pad_constant_like #DEFINE_ALIAS @@ -40,7 +47,8 @@ 'unfold', # 'bilinear_tensor_product', 'assign', - 'interpolate' + 'interpolate', + 'cosine_similarity', ] @@ -446,3 +454,235 @@ def _is_list_or_turple_(data): outputs={"Out": out}, attrs=attrs) return out + + +def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None): + """ + Pad tensor according to 'pad' and 'mode'. + If mode is 'reflect', pad[0] and pad[1] must be no greater + than width-1. The height and depth dimension has the same condition. + + Parameters: + x (Tensor): The input tensor with data type float32/double/int32/int64_t. + pad (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. 1. If input dimension is 3, then the pad has the form (pad_left, + pad_right). 2. If the input dimension is 4, then the pad has the form (pad_left, pad_right, + pad_top, pad_bottom). 3. If the input dimension is 5, then the pad has the form + (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). + + mode (str): Four modes: 'constant' (default), 'reflect', 'replicate', 'circular'. + When in 'constant' mode, this op uses a constant value to pad the input tensor. + When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor. + When in 'replicate' mode, uses input boundaries to pad the input tensor. + When in 'circular' mode, uses circular input to pad the input tensor. + Default is 'constant' + value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0 + data_format (str): An string from: "NCL", "NLC", NHWC", "NCHW", "NCDHW", "NDHWC". Specify the data format of + the input data. + Default is "NCHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: a Tensor padded according to pad and mode and data type is same as input. + Return Type: Tensor + + Examples: + .. code-block:: text + + x = [[[[[1., 2., 3.], + [4., 5., 6.]]]]] + + Case 0: + pad = [2, 2, 1, 1, 0, 0], + mode = 'constant' + value = 0 + Out = [[[[[0. 0. 0. 0. 0. 0. 0.] + [0. 0. 1. 2. 3. 0. 0.] + [0. 0. 4. 5. 6. 0. 0.] + [0. 0. 0. 0. 0. 0. 0.]]]]] + + Case 1: + pad = [2, 2, 1, 1, 0, 0], + mode = 'reflect' + Out = [[[[[6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.] + [6. 5. 4. 5. 6. 5. 4.] + [3. 2. 1. 2. 3. 2. 1.]]]]] + + Case 2: + pad = [2, 2, 1, 1, 0, 0], + mode = 'replicate' + Out = [[[[[1. 1. 1. 2. 3. 3. 3.] + [1. 1. 1. 2. 3. 3. 3.] + [4. 4. 4. 5. 6. 6. 6.] + [4. 4. 4. 5. 6. 6. 6.]]]]] + + Case 3: + pad = [2, 2, 1, 1, 0, 0], + mode = 'circular' + Out = [[[[[5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.] + [5. 6. 4. 5. 6. 4. 5.] + [2. 3. 1. 2. 3. 1. 2.]]]]] + + Code Examples: + .. code-block:: python + import numpy as np + import paddle + import paddle.nn.functional as F + + paddle.disable_static() + + # example 1 + x_shape = (1, 1, 3) + x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 + tensor_x = paddle.to_tensor(x) + y = F.pad(tensor_x, pad=[2, 3], value=1, mode='constant') + print(y.numpy()) + # [[[1. 1. 1. 2. 3. 1. 1. 1.]]] + + # example 2 + x_shape = (1, 1, 2, 3) + x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1 + tensor_x = paddle.to_tensor(x) + y = F.pad(tensor_x, pad=[1, 2, 1, 1], value=1, mode='circular') + print(y.numpy()) + # [[[[6. 4. 5. 6. 4. 5.] + # [3. 1. 2. 3. 1. 2.] + # [6. 4. 5. 6. 4. 5.] + # [3. 1. 2. 3. 1. 2.]]]] + """ + assert mode in ['reflect', 'replicate', 'constant', 'circular'], \ + "mode should be one of constant, reflect, replicate, circular, but got {}.".format(mode) + + data_format = data_format.upper() + assert data_format in ["NCL", "NCHW", "NCDHW", "NLC", "NHWC", "NDHWC"], \ + "data_format should be in one of [NCL, NCHW, NCDHW, NLC, NHWC, NDHWC], " \ + "but got {}".format(data_format) + + x_dim = len(x.shape) + + original_data_format = data_format + unsqueezed_dim = [] + + if isinstance(pad, Variable): + if data_format in ["NCL", "NCHW", "NCDHW"]: + data_format = "NCDHW" + if x_dim == 3: + pad = concat([zeros((4, ), dtype="int32"), pad], axis=0) + unsqueezed_dim = [3, 4] + x = unsqueeze(x, axes=unsqueezed_dim) + elif x_dim == 4: + pad = concat([pad, zeros((2, ), dtype="int32")], axis=0) + unsqueezed_dim = [2] + x = unsqueeze(x, axes=unsqueezed_dim) + elif data_format in ["NLC", "NHWC", "NDHWC"]: + data_format = "NDHWC" + if x_dim == 3: + pad = concat([zeros((4, ), dtype="int32"), pad], axis=0) + unsqueezed_dim = [2, 3] + x = unsqueeze(x, axes=unsqueezed_dim) + elif x_dim == 4: + pad = concat([pad, zeros((2, ), dtype="int32")], axis=0) + unsqueezed_dim = [1] + x = unsqueeze(x, axes=unsqueezed_dim) + else: + if data_format in ["NCL", "NCHW", "NCDHW"]: + data_format = "NCDHW" + if x_dim == 3: + pad = [0, 0, 0, 0] + pad + unsqueezed_dim = [3, 4] + x = unsqueeze(x, axes=unsqueezed_dim) + elif x_dim == 4: + pad = pad + [0, 0] + unsqueezed_dim = [2] + x = unsqueeze(x, axes=unsqueezed_dim) + elif data_format in ["NLC", "NHWC", "NDHWC"]: + data_format = "NDHWC" + if x_dim == 3: + pad = [0, 0, 0, 0] + pad + unsqueezed_dim = [2, 3] + x = unsqueeze(x, axes=unsqueezed_dim) + elif x_dim == 4: + pad = pad + [0, 0] + unsqueezed_dim = [1] + x = unsqueeze(x, axes=unsqueezed_dim) + + if in_dygraph_mode(): + if isinstance(pad, Variable): + pad = pad.numpy() + out = core.ops.pad3d(x, "paddings", pad, "mode", mode, "value", value, + "data_format", data_format, "name", name) + else: + attrs = {'mode': mode, 'value': value, 'data_format': data_format} + inputs = {'X': [x]} + if isinstance(pad, Variable): + inputs['Paddings'] = [pad] + attrs['paddings'] = [] + else: + attrs['paddings'] = pad + + helper = LayerHelper('pad3d', **locals()) + + dtype = helper.input_dtype(input_param_name='input') + out = helper.create_variable_for_type_inference(dtype) + helper.append_op( + type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs) + + if len(unsqueezed_dim) != 0: + out = squeeze(out, axes=unsqueezed_dim) + + return out + + +def cosine_similarity(x1, x2, dim=1, eps=1e-8): + """ + Compute cosine similarity between x1 and x2 along dim. + + Parameters: + x1 (Tensor): First input. float32/double. + x2 (Tensor): Second input. float32/double. + dim (int): Dimension of vectors to compute cosine similarity. Default is 1. + eps(float): Small value to avoid division by zero. Default is 1e-8. + + Returns: a Tensor representing cosine similarity between x1 and x2 along dim. + Return Type: Tensor + + Examples: + .. code-block:: text + Case 0: + x1 = [[0.8024077 0.9927354 0.27238318 0.8344984 ] + [0.48949873 0.5797396 0.65444374 0.66510963] + [0.1031398 0.9614342 0.08365563 0.6796464 ] + [0.10760343 0.7461209 0.7726148 0.5801006 ]] + x2 = [[0.62913156 0.1536727 0.9847992 0.04591406] + [0.9098952 0.15715368 0.8671125 0.3156102 ] + [0.4427798 0.54136837 0.5276275 0.32394758] + [0.3769419 0.8535014 0.48041078 0.9256797 ]] + dim = 1 + eps = 1e-8 + Out: [0.5275037 0.8368967 0.75037485 0.9245899] + + Code Examples: + .. code-block:: python + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + np.random.seed(0) + x1 = np.random.rand(2,3) + x2 = np.random.rand(2,3) + x1 = paddle.to_tensor(x1) + x2 = paddle.to_tensor(x2) + result = paddle.nn.functional.cosine_similarity(x1, x2, dim=0) + print(result.numpy()) + # [0.99806249 0.9817672 0.94987036] + + """ + w12 = sum(elementwise_mul(x1, x2), dim=dim) + w1 = sum(elementwise_mul(x1, x1), dim=dim) + w2 = sum(elementwise_mul(x2, x2), dim=dim) + n12 = sqrt(clamp(w1 * w2, min=eps * eps)) + cos_sim = w12 / n12 + return cos_sim diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index f64252da5428a..ee057be40f1d2 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -38,6 +38,16 @@ from .common import BilinearTensorProduct #DEFINE_ALIAS from .common import Pool2D #DEFINE_ALIAS from .common import Pad2D #DEFINE_ALIAS +from .common import ReflectionPad1d #DEFINE_ALIAS +from .common import ReplicationPad1d #DEFINE_ALIAS +from .common import ConstantPad1d #DEFINE_ALIAS +from .common import ReflectionPad2d #DEFINE_ALIAS +from .common import ReplicationPad2d #DEFINE_ALIAS +from .common import ConstantPad2d #DEFINE_ALIAS +from .common import ZeroPad2d #DEFINE_ALIAS +from .common import ReplicationPad3d #DEFINE_ALIAS +from .common import ConstantPad3d #DEFINE_ALIAS +from .common import CosineSimilarity #DEFINE_ALIAS from .common import Embedding #DEFINE_ALIAS from .common import Linear #DEFINE_ALIAS from .common import Flatten #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/common.py b/python/paddle/nn/layer/common.py index 45259bea49d42..c4823298f2035 100644 --- a/python/paddle/nn/layer/common.py +++ b/python/paddle/nn/layer/common.py @@ -22,8 +22,22 @@ from .. import functional as F __all__ = [ - 'BilinearTensorProduct', 'Pool2D', 'Embedding', 'Linear', 'UpSample', - 'Pad2D' + 'BilinearTensorProduct', + 'Pool2D', + 'Embedding', + 'Linear', + 'UpSample', + 'Pad2D', + 'ReflectionPad1d', + 'ReplicationPad1d', + 'ConstantPad1d', + 'ReflectionPad2d', + 'ReplicationPad2d', + 'ConstantPad2d', + 'ZeroPad2d', + 'ConstantPad3d', + 'ReplicationPad3d', + 'CosineSimilarity', ] @@ -258,12 +272,10 @@ class Pad2D(layers.Layer): """ :alias_main: paddle.nn.Pad2D :alias: paddle.nn.Pad2D,paddle.nn.layer.Pad2D,paddle.nn.layer.common.Pad2D - This interface is used to construct a callable object of the ``Pad2D`` class. The Pad2D layer pads the input tensor boundaries according to 'paddings' and 'mode'. If mode is 'reflect', paddings[0] and paddings[1] must be no greater than height-1. And the width dimension has the same condition. - Parameters: paddings (int | List[int32]): The padding size. If padding is a int, uses the same padding in all boundaries, if padding is a List, it must contain four integers, @@ -278,16 +290,12 @@ class Pad2D(layers.Layer): data_format (str): An string from: "NHWC", "NCHW". Specify the data format of the input data. Default is "NCHW" - Returns: None - Examples: .. code-block:: text - Input = [[[[1., 2., 3.], [4., 5., 6.]]]] - Case 0: paddings = [0, 1, 2, 3], mode = 'constant' @@ -295,24 +303,20 @@ class Pad2D(layers.Layer): Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.], [0., 0., 4., 5., 6., 0., 0., 0.], [0., 0., 0., 0., 0., 0., 0., 0.]]]] - Case 1: paddings = [0, 1, 2, 1], mode = 'reflect' Out = [[[[3., 2., 1., 2., 3., 2.], [6., 5., 4., 5., 6., 5.], [3., 2., 1., 2., 3., 2.]]]] - Case 2: paddings = [0, 1, 2, 1], mode = 'edge' Out = [[[[1., 1., 1., 2., 3., 3.], [4., 4., 4., 5., 6., 6.], [4., 4., 4., 5., 6., 6.]]]] - Code Examples: .. code-block:: python - import paddle.fluid as fluid import paddle.nn as nn import numpy as np @@ -342,3 +346,621 @@ def forward(self, input): mode=self._mode, pad_value=self._pad_value, data_format=self._data_format) + + +class ReflectionPad1d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ReflectionPad1d`` class. + Uses reflection of the input boundaries to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right). + data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. + Default is "NCL" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[1., 2., 3.], + [4., 5., 6.]]] + padding = [1, 2], + Out = [[[2. 1. 2. 3. 2. 1.] + [5. 4. 5. 6. 5. 4.]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReflectionPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[2. 1. 2. 3. 2. 1.] + # [5. 4. 5. 6. 5. 4.]]] + """ + + def __init__(self, padding, data_format="NCL", name=None): + super(ReflectionPad1d, self).__init__() + self._mode = "reflect" + self._data_format = data_format + self._pad = padding + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + data_format=self._data_format, + name=self._name) + + +class ReplicationPad1d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ReplicationPad1d`` class. + Uses input boundaries to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right). + data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. + Default is "NCL" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[1., 2., 3.], + [4., 5., 6.]]] + padding = [1, 2], + Out = [[[2. 1. 2. 3. 2. 1.] + [5. 4. 5. 6. 5. 4.]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[1. 1. 2. 3. 3. 3.] + # [1. 4. 5. 6. 6. 6.]]] + """ + + def __init__(self, padding, data_format="NCL", name=None): + super(ReplicationPad1d, self).__init__() + self._mode = "replicate" + self._data_format = data_format + self._pad = padding + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + data_format=self._data_format, + name=self._name) + + +class ConstantPad1d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ConstantPad1d`` class. + Uses a constant value to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right). + value (float32): The value to fill the padded areas. Default is 0.0 + data_format (str): An string from: "NCL", "NLC". Specify the data format of the input data. + Default is "NCL" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[1., 2., 3.], + [4., 5., 6.]]] + padding = [1, 2], + value = 0.0 + Out = [[[0. 1. 2. 3. 0. 0.] + [0. 4. 5. 6. 0. 0.]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 2, 3) + pad = [1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad1d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[0. 1. 2. 3. 0. 0.] + # [0. 4. 5. 6. 0. 0.]]] + """ + + def __init__(self, padding, value=0.0, data_format="NCL", name=None): + super(ConstantPad1d, self).__init__() + self._mode = "constant" + self._data_format = data_format + self._pad = padding + self._value = value + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name) + + +class ConstantPad2d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ConstantPad2d`` class. + Uses a constant value to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). + value (float32): The value to fill the padded areas. Default is 0.0 + data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. + Default is "NCHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[[1., 2., 3.], + [4., 5., 6.]]]] + padding = [1, 1, 0, 0] + value = 0.0 + Out = [[[[0. 1. 2. 3. 0.] + [0. 4. 5. 6. 0.]]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]] + """ + + def __init__(self, padding, value=0.0, data_format="NCHW", name=None): + super(ConstantPad2d, self).__init__() + self._mode = "constant" + self._data_format = data_format + self._pad = padding + self._value = value + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name) + + +class ZeroPad2d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ZeroPad2d`` class. + Uses 0 to pad the input tensor. + + Parameters: + padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). + data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. + Default is "NCHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[[1., 2., 3.], + [4., 5., 6.]]]] + padding = [1, 1, 0, 0] + Out = [[[[0. 1. 2. 3. 0.] + [0. 4. 5. 6. 0.]]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ZeroPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]] + """ + + def __init__(self, padding, data_format="NCHW", name=None): + super(ZeroPad2d, self).__init__() + self._mode = "constant" + self._data_format = data_format + self._pad = padding + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + data_format=self._data_format, + name=self._name) + + +class ReplicationPad2d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ReplicationPad2d`` class. + Uses input boundaries to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). + data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. + Default is "NCHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[[1., 2., 3.], + [4., 5., 6.]]]] + padding = [1, 1, 0, 0] + Out = [[[[1. 1. 2. 3. 3.] + [4. 4. 5. 6. 6.]]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 2, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[1. 1. 2. 3.] + # [1. 1. 2. 3.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.]]]] + """ + + def __init__(self, padding, data_format="NCHW", name=None): + super(ReplicationPad2d, self).__init__() + self._mode = "replicate" + self._data_format = data_format + self._pad = padding + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + data_format=self._data_format, + name=self._name) + + +class ReflectionPad2d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ReflectionPad2d`` class. + Uses reflection of the input boundaries to pad the input tensor. + + Parameters: + padding (Variable | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom). + data_format (str): An string from: "NCHW", "NHWC". Specify the data format of the input data. + Default is "NCHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[[1., 2., 3.], + [4., 5., 6.]]]] + padding = [1, 1, 0, 0] + Out = [[[[2. 1. 2. 3. 2.] + [5. 4. 5. 6. 5.]]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 4, 3) + pad = [1, 0, 1, 2] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReflectionPad2d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[ 5. 4. 5. 6.] + # [ 2. 1. 2. 3.] + # [ 5. 4. 5. 6.] + # [ 8. 7. 8. 9.] + # [11. 10. 11. 12.] + # [ 8. 7. 8. 9.] + # [ 5. 4. 5. 6.]]]] + """ + + def __init__(self, padding, data_format="NCHW", name=None): + super(ReflectionPad2d, self).__init__() + self._mode = "reflect" + self._data_format = data_format + self._pad = padding + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + data_format=self._data_format, + name=self._name) + + +class ConstantPad3d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ConstantPad3d`` class. + Uses a constant value to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). + value (float32): The value to fill the padded areas. Default is 0.0 + data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data. + Default is "NCDHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[[[1., 2., 3.], + [4., 5., 6.]]]]] + padding = [1, 2, 0, 0, 0, 0] + value = 0.0 + Out = [[[[[0. 1. 2. 3. 0. 0.] + [0. 4. 5. 6. 0. 0.]]]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 1, 2, 3) + pad = [1, 0, 1, 2, 0, 0] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ConstantPad3d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[[0. 0. 0. 0.] + # [0. 1. 2. 3.] + # [0. 4. 5. 6.] + # [0. 0. 0. 0.] + # [0. 0. 0. 0.]]]]] + """ + + def __init__(self, padding, value=0.0, data_format="NCDHW", name=None): + super(ConstantPad3d, self).__init__() + self._mode = "constant" + self._data_format = data_format + self._pad = padding + self._value = value + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + value=self._value, + data_format=self._data_format, + name=self._name) + + +class ReplicationPad3d(layers.Layer): + """ + This interface is used to construct a callable object of the ``ReplicationPad3d`` class. + Uses input boundaries to pad the input tensor. + + Parameters: + padding (Tensor | List[int32]): The padding size with data type int32. [len(padding)/2] dimensions + of input will be padded. The pad has the form (pad_left, pad_right, pad_top, pad_bottom, pad_front, pad_back). + data_format (str): An string from: "NCDHW", "NDHWC". Specify the data format of the input data. + Default is "NCDHW" + name (str, optional) : The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + None + + Examples: + .. code-block:: text + + x = [[[[[1., 2., 3.], + [4., 5., 6.]]]]] + padding = [1, 2, 0, 0, 0, 0] + Out = [[[[[1. 1. 2. 3. 3. 3.] + [4. 4. 5. 6. 6. 6.]]]]] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + input_shape = (1, 1, 1, 2, 3) + pad = [1, 0, 1, 2, 0, 0] + data = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) + 1 + my_pad = nn.ReplicationPad3d(padding=pad) + data = paddle.to_tensor(data) + result = my_pad(data) + print(result.numpy()) + # [[[[[1. 1. 2. 3.] + # [1. 1. 2. 3.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.] + # [4. 4. 5. 6.]]]]] + """ + + def __init__(self, padding, data_format="NCDHW", name=None): + super(ReplicationPad3d, self).__init__() + self._mode = "replicate" + self._data_format = data_format + self._pad = padding + self._name = name + + def forward(self, x): + return F.pad(x, + pad=self._pad, + mode=self._mode, + data_format=self._data_format, + name=self._name) + + +class CosineSimilarity(layers.Layer): + """ + This interface is used to compute cosine similarity between x1 and x2 along dim. + + Parameters: + dim (int): Dimension of vectors to compute cosine similarity. Default is 1. + eps(float): Small value to avoid division by zero. Default is 1e-8. + Returns: + None + + Examples: + .. code-block:: text + + Case 0: + x1 = [[0.8024077 0.9927354 0.27238318 0.8344984 ] + [0.48949873 0.5797396 0.65444374 0.66510963] + [0.1031398 0.9614342 0.08365563 0.6796464 ] + [0.10760343 0.7461209 0.7726148 0.5801006 ]] + x2 = [[0.62913156 0.1536727 0.9847992 0.04591406] + [0.9098952 0.15715368 0.8671125 0.3156102 ] + [0.4427798 0.54136837 0.5276275 0.32394758] + [0.3769419 0.8535014 0.48041078 0.9256797 ]] + dim = 1 + eps = 1e-8 + Out: [0.5275037 0.8368967 0.75037485 0.9245899] + + Code Examples: + .. code-block:: python + + import paddle + import paddle.nn as nn + import numpy as np + paddle.disable_static() + + np.random.seed(0) + x1 = np.random.rand(2,3) + x2 = np.random.rand(2,3) + x1 = paddle.to_tensor(x1) + x2 = paddle.to_tensor(x2) + + cos_sim_func = nn.CosineSimilarity(dim=0) + result = cos_sim_func(x1, x2) + print(result.numpy()) + # [0.99806249 0.9817672 0.94987036] + """ + + def __init__(self, dim=1, eps=1e-8): + super(CosineSimilarity, self).__init__() + self._dim = dim + self._eps = eps + + def forward(self, x1, x2): + return F.cosine_similarity(x1, x2, dim=self._dim, eps=self._eps) diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 77d821d56b8e1..8833bf5735c9e 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -111,6 +111,7 @@ from .math import elementwise_add #DEFINE_ALIAS from .math import elementwise_div #DEFINE_ALIAS from .math import elementwise_floordiv #DEFINE_ALIAS +from .math import elementwise_mul #DEFINE_ALIAS from .math import elementwise_mod #DEFINE_ALIAS from .math import elementwise_pow #DEFINE_ALIAS from .math import elementwise_sub #DEFINE_ALIAS From a7cd61fdd1c960e0da2d9818d9c10be00aee29fd Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 19 Aug 2020 20:48:13 +0800 Subject: [PATCH 27/37] fix DataParallel code samples, test=document_fix (#26423) --- python/paddle/fluid/dygraph/parallel.py | 106 +++++++++++++----------- 1 file changed, 57 insertions(+), 49 deletions(-) diff --git a/python/paddle/fluid/dygraph/parallel.py b/python/paddle/fluid/dygraph/parallel.py index 24e7f64cebf60..54d2cda4ca685 100644 --- a/python/paddle/fluid/dygraph/parallel.py +++ b/python/paddle/fluid/dygraph/parallel.py @@ -242,41 +242,38 @@ class DataParallel(layers.Layer): Examples: .. code-block:: python - import numpy as np - import paddle.fluid as fluid - import paddle.fluid.dygraph as dygraph - from paddle.fluid.optimizer import AdamOptimizer - from paddle.fluid.dygraph.nn import Linear - from paddle.fluid.dygraph.base import to_variable + import numpy as np + import paddle.fluid as fluid - place = place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) - with fluid.dygraph.guard(place=place): + place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) + with fluid.dygraph.guard(place): - # prepare the data parallel context - strategy=dygraph.prepare_context() + # prepare the data parallel context + strategy = fluid.dygraph.prepare_context() - linear = Linear(1, 10, act="softmax") - adam = fluid.optimizer.AdamOptimizer() + linear = fluid.dygraph.Linear(1, 10, act="softmax") + adam = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, parameter_list=linear.parameters()) - # make the module become the data parallelism module - linear = dygraph.DataParallel(linear, strategy) + # make the module become the data parallelism module + linear = fluid.dygraph.DataParallel(linear, strategy) - x_data = np.random.random(size=[10, 1]).astype(np.float32) - data = to_variable(x_data) + x_data = np.random.random(size=[10, 1]).astype(np.float32) + data = fluid.dygraph.to_variable(x_data) - hidden = linear(data) - avg_loss = fluid.layers.mean(hidden) + hidden = linear(data) + avg_loss = fluid.layers.mean(hidden) - # scale the loss according to the number of trainers. - avg_loss = linear.scale_loss(avg_loss) + # scale the loss according to the number of trainers. + avg_loss = linear.scale_loss(avg_loss) - avg_loss.backward() + avg_loss.backward() - # collect the gradients of trainers. - linear.apply_collective_grads() + # collect the gradients of trainers. + linear.apply_collective_grads() - adam.minimize(avg_loss) - linear.clear_gradients() + adam.minimize(avg_loss) + linear.clear_gradients() """ def __init__(self, layers, strategy): @@ -306,20 +303,23 @@ def scale_loss(self, loss): import numpy as np import paddle.fluid as fluid - import paddle.fluid.dygraph as dygraph - from paddle.fluid.optimizer import AdamOptimizer - from paddle.fluid.dygraph.nn import Linear - from paddle.fluid.dygraph.base import to_variable - - place = place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) - with fluid.dygraph.guard(place=place): - strategy=dygraph.prepare_context() - linear = Linear(1, 10, act="softmax") - adam = fluid.optimizer.AdamOptimizer() - linear = dygraph.DataParallel(linear, strategy) + + place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) + with fluid.dygraph.guard(place): + + # prepare the data parallel context + strategy = fluid.dygraph.prepare_context() + + linear = fluid.dygraph.Linear(1, 10, act="softmax") + adam = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, parameter_list=linear.parameters()) + + # make the module become the data parallelism module + linear = fluid.dygraph.DataParallel(linear, strategy) x_data = np.random.random(size=[10, 1]).astype(np.float32) - data = to_variable(x_data) + data = fluid.dygraph.to_variable(x_data) + hidden = linear(data) avg_loss = fluid.layers.mean(hidden) @@ -327,6 +327,8 @@ def scale_loss(self, loss): avg_loss = linear.scale_loss(avg_loss) avg_loss.backward() + + # collect the gradients of trainers. linear.apply_collective_grads() adam.minimize(avg_loss) @@ -390,23 +392,29 @@ def apply_collective_grads(self): import numpy as np import paddle.fluid as fluid - import paddle.fluid.dygraph as dygraph - from paddle.fluid.optimizer import AdamOptimizer - from paddle.fluid.dygraph.nn import Linear - from paddle.fluid.dygraph.base import to_variable - - place = place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) - with fluid.dygraph.guard(place=place): - strategy=dygraph.prepare_context() - linear = Linear(1, 10, act="softmax") - adam = fluid.optimizer.AdamOptimizer() - linear = dygraph.DataParallel(linear, strategy) + + place = fluid.CUDAPlace(fluid.dygraph.ParallelEnv().dev_id) + with fluid.dygraph.guard(place): + + # prepare the data parallel context + strategy = fluid.dygraph.prepare_context() + + linear = fluid.dygraph.Linear(1, 10, act="softmax") + adam = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, parameter_list=linear.parameters()) + + # make the module become the data parallelism module + linear = fluid.dygraph.DataParallel(linear, strategy) x_data = np.random.random(size=[10, 1]).astype(np.float32) - data = to_variable(x_data) + data = fluid.dygraph.to_variable(x_data) + hidden = linear(data) avg_loss = fluid.layers.mean(hidden) + + # scale the loss according to the number of trainers. avg_loss = linear.scale_loss(avg_loss) + avg_loss.backward() # collect the gradients of trainers. From 7e71ae92bd5b80ef77cdb8214196cfd53d2c43cb Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Thu, 20 Aug 2020 09:48:36 +0800 Subject: [PATCH 28/37] fix test_cosine_similarity_api failed (#26467) --- python/paddle/nn/functional/common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index e90db0b67d78f..cc5ff8de5e99d 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -680,9 +680,9 @@ def cosine_similarity(x1, x2, dim=1, eps=1e-8): # [0.99806249 0.9817672 0.94987036] """ - w12 = sum(elementwise_mul(x1, x2), dim=dim) - w1 = sum(elementwise_mul(x1, x1), dim=dim) - w2 = sum(elementwise_mul(x2, x2), dim=dim) + w12 = sum(elementwise_mul(x1, x2), axis=dim) + w1 = sum(elementwise_mul(x1, x1), axis=dim) + w2 = sum(elementwise_mul(x2, x2), axis=dim) n12 = sqrt(clamp(w1 * w2, min=eps * eps)) cos_sim = w12 / n12 return cos_sim From e3612de8d7d4e64fc30aa121e75641fb040cf722 Mon Sep 17 00:00:00 2001 From: YUNSHEN XIE <1084314248@qq.com> Date: Thu, 20 Aug 2020 10:25:35 +0800 Subject: [PATCH 29/37] add failed unittests retry (#26342) --- paddle/scripts/paddle_build.sh | 97 ++++++++++++++++++++++++++++++---- 1 file changed, 88 insertions(+), 9 deletions(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 96c3fbba42efe..4468ebad7c830 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -936,17 +936,96 @@ set +x card_test "$multiple_card_tests" 2 # run cases with two GPUs card_test "$exclusive_tests" # run cases exclusively, in this cases would be run with 4/8 GPUs collect_failed_tests - if [ -n "${failed_test_lists}" ];then - failed_test_lists_ult=`echo "${failed_test_lists}" |grep -Po '[^ ].*$'` - echo "========================================" - echo "Summary Failed Tests... " - echo "========================================" - echo "The following tests FAILED: " - echo "${failed_test_lists_ult}" - fi rm -f $tmp_dir/* + exec_times=0 + retry_unittests_record='' + retry_time=3 + exec_time_array=('first' 'second' 'third') + if [ -n "$failed_test_lists" ];then + while ( [ $exec_times -lt $retry_time ] && [ -n "${failed_test_lists}" ] ) + do + + retry_unittests_record="$retry_unittests_record$failed_test_lists" + failed_test_lists_ult=`echo "${failed_test_lists}" |grep -Po '[^ ].*$'` + read retry_unittests <<< $(echo "$failed_test_lists" | grep -oEi "\-.+\(\w+\)" | sed 's/(.\+)//' | sed 's/- //' ) + echo "=========================================" + echo "This is the ${exec_time_array[$exec_times]} time to re-run" + echo "=========================================" + echo "The following unittest will be re-run:" + echo "${failed_test_lists_ult}" + + for line in ${retry_unittests[@]} ; + do + + one_card_tests=$single_card_tests'|'$single_card_tests_1 + + read tmp_one_tmp <<< "$( echo $one_card_tests | grep -oEi $line )" + read tmp_mul_tmp <<< "$( echo $multiple_card_tests | grep -oEi $line )" + read exclusive_tmp <<< "$( echo $exclusive_tests | grep -oEi $line )" + + if [[ "$tmp_one_tmp" != "" ]]; then + if [[ "$one_card_retry" == "" ]]; then + one_card_retry="^$line$" + else + one_card_retry="$one_card_retry|^$line$" + fi + elif [[ "$tmp_mul_tmp" != "" ]]; then + if [[ "$multiple_card_retry" == "" ]]; then + multiple_card_retry="^$line$" + else + multiple_card_retry="$multiple_card_retry|^$line$" + fi + else + if [[ "$exclusive_retry" == "" ]];then + exclusive_retry="^$line$" + else + exclusive_retry="$exclusive_retry|^$line$" + fi + fi + + done + + if [[ "$one_card_retry" != "" ]]; then + card_test "$one_card_retry" 1 + fi + + if [[ "$multiple_card_retry" != "" ]]; then + card_test "$multiple_card_retry" 2 + fi + + if [[ "$exclusive_retry" != "" ]]; then + card_test "$exclusive_retry" + fi + + exec_times=$[$exec_times+1] + failed_test_lists='' + collect_failed_tests + rm -f $tmp_dir/* + one_card_retry='' + multiple_card_retry='' + exclusive_retry='' + retry_unittests='' + done + fi + + + if [[ "$EXIT_CODE" != "0" ]]; then - exit 8; + if [[ "$failed_test_lists" == "" ]]; then + echo "========================================" + echo "There are failed tests, which have been successful after re-run:" + echo "========================================" + echo "The following tests have been re-ran:" + echo "${retry_unittests_record}" + else + failed_test_lists_ult=`echo "${failed_test_lists}" |grep -Po '[^ ].*$'` + echo "========================================" + echo "Summary Failed Tests... " + echo "========================================" + echo "The following tests FAILED: " + echo "${failed_test_lists_ult}" + exit 8; + fi fi set -ex fi From e52e7944cf005d7d3e19bef504bd4d74d89ff161 Mon Sep 17 00:00:00 2001 From: Guanghua Yu <742925032@qq.com> Date: Thu, 20 Aug 2020 10:48:08 +0800 Subject: [PATCH 30/37] add paddle.nn.SmoothL1Loss,test=develop (#26398) --- .../tests/unittests/test_smooth_l1_loss.py | 181 ++++++++++++++++++ python/paddle/nn/__init__.py | 1 + python/paddle/nn/functional/__init__.py | 1 + python/paddle/nn/functional/loss.py | 79 ++++++++ python/paddle/nn/layer/__init__.py | 1 + python/paddle/nn/layer/loss.py | 81 +++++++- 6 files changed, 342 insertions(+), 2 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py diff --git a/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py new file mode 100644 index 0000000000000..9a97f57aaae5f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_smooth_l1_loss.py @@ -0,0 +1,181 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import paddle +import paddle.fluid as fluid +import numpy as np +import unittest + + +def smooth_l1_loss_forward(val, delta): + abs_val = abs(val) + if abs_val <= delta: + return 0.5 * val * val + else: + return delta * (abs_val - 0.5 * delta) + + +def smooth_l1_loss_np(input, label, reduction='mean', delta=1.0): + diff = input - label + out = np.vectorize(smooth_l1_loss_forward)(diff, delta) + if reduction == 'sum': + return np.sum(out) + elif reduction == 'mean': + return np.mean(out) + elif reduction == 'none': + return out + + +class SmoothL1Loss(unittest.TestCase): + def setUp(self): + np.random.seed(123) + + def test_smooth_l1_loss_mean(self): + input_np = np.random.random([100, 200]).astype(np.float32) + label_np = np.random.random([100, 200]).astype(np.float32) + prog = fluid.Program() + startup_prog = fluid.Program() + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + with fluid.program_guard(prog, startup_prog): + input = fluid.data(name='input', shape=[100, 200], dtype='float32') + label = fluid.data(name='label', shape=[100, 200], dtype='float32') + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() + ret = smooth_l1_loss(input, label) + + exe = fluid.Executor(place) + static_ret = exe.run(prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret]) + self.assertIsNotNone(static_ret) + with fluid.dygraph.guard(): + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np)) + dy_ret_value = dy_ret.numpy() + self.assertIsNotNone(dy_ret_value) + expected = smooth_l1_loss_np(input_np, label_np, reduction='mean') + self.assertTrue(np.allclose(static_ret, dy_ret_value)) + self.assertTrue(np.allclose(static_ret, expected)) + self.assertTrue(np.allclose(dy_ret_value, expected)) + + def test_smooth_l1_loss_sum(self): + input_np = np.random.random([100, 200]).astype(np.float32) + label_np = np.random.random([100, 200]).astype(np.float32) + prog = fluid.Program() + startup_prog = fluid.Program() + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + with fluid.program_guard(prog, startup_prog): + input = fluid.data(name='input', shape=[100, 200], dtype='float32') + label = fluid.data(name='label', shape=[100, 200], dtype='float32') + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') + ret = smooth_l1_loss(input, label) + + exe = fluid.Executor(place) + static_ret = exe.run(prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret]) + self.assertIsNotNone(static_ret) + with fluid.dygraph.guard(): + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np)) + dy_ret_value = dy_ret.numpy() + self.assertIsNotNone(dy_ret_value) + expected = smooth_l1_loss_np(input_np, label_np, reduction='sum') + self.assertTrue(np.allclose(static_ret, dy_ret_value)) + self.assertTrue(np.allclose(static_ret, expected)) + self.assertTrue(np.allclose(dy_ret_value, expected)) + + def test_smooth_l1_loss_none(self): + input_np = np.random.random([100, 200]).astype(np.float32) + label_np = np.random.random([100, 200]).astype(np.float32) + prog = fluid.Program() + startup_prog = fluid.Program() + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + with fluid.program_guard(prog, startup_prog): + input = fluid.data(name='input', shape=[100, 200], dtype='float32') + label = fluid.data(name='label', shape=[100, 200], dtype='float32') + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') + ret = smooth_l1_loss(input, label) + + exe = fluid.Executor(place) + static_ret = exe.run(prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret]) + self.assertIsNotNone(static_ret) + with fluid.dygraph.guard(): + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np)) + dy_ret_value = dy_ret.numpy() + self.assertIsNotNone(dy_ret_value) + expected = smooth_l1_loss_np(input_np, label_np, reduction='none') + self.assertTrue(np.allclose(static_ret, dy_ret_value)) + self.assertTrue(np.allclose(static_ret, expected)) + self.assertTrue(np.allclose(dy_ret_value, expected)) + + def test_smooth_l1_loss_delta(self): + input_np = np.random.random([100, 200]).astype(np.float32) + label_np = np.random.random([100, 200]).astype(np.float32) + delta = np.random.rand() + prog = fluid.Program() + startup_prog = fluid.Program() + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + with fluid.program_guard(prog, startup_prog): + input = fluid.data(name='input', shape=[100, 200], dtype='float32') + label = fluid.data(name='label', shape=[100, 200], dtype='float32') + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) + ret = smooth_l1_loss(input, label) + + exe = fluid.Executor(place) + static_ret = exe.run(prog, + feed={ + 'input': input_np, + 'label': label_np, + }, + fetch_list=[ret]) + self.assertIsNotNone(static_ret) + with fluid.dygraph.guard(): + smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) + dy_ret = smooth_l1_loss( + fluid.dygraph.to_variable(input_np), + fluid.dygraph.to_variable(label_np)) + dy_ret_value = dy_ret.numpy() + self.assertIsNotNone(dy_ret_value) + expected = smooth_l1_loss_np(input_np, label_np, delta=delta) + self.assertTrue(np.allclose(static_ret, dy_ret_value)) + self.assertTrue(np.allclose(static_ret, expected)) + self.assertTrue(np.allclose(dy_ret_value, expected)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index 289bf40dce5a7..cc8658f9d73a4 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -101,6 +101,7 @@ from .layer.loss import BCELoss #DEFINE_ALIAS from .layer.loss import KLDivLoss #DEFINE_ALIAS from .layer.loss import MarginRankingLoss #DEFINE_ALIAS +from .layer.loss import SmoothL1Loss #DEFINE_ALIAS from .layer.norm import BatchNorm #DEFINE_ALIAS from .layer.norm import SyncBatchNorm #DEFINE_ALIAS from .layer.norm import GroupNorm #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index e587466e76483..bd1d334f663d8 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -140,6 +140,7 @@ from .loss import sigmoid_cross_entropy_with_logits #DEFINE_ALIAS from .loss import sigmoid_focal_loss #DEFINE_ALIAS from .loss import smooth_l1 #DEFINE_ALIAS +from .loss import smooth_l1_loss #DEFINE_ALIAS from .loss import softmax_with_cross_entropy #DEFINE_ALIAS from .loss import square_error_cost #DEFINE_ALIAS from .loss import ssd_loss #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index f8bc0b1b54e96..44dcd5458b7b2 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -65,6 +65,7 @@ 'sigmoid_cross_entropy_with_logits', 'sigmoid_focal_loss', 'smooth_l1', + 'smooth_l1_loss', 'softmax_with_cross_entropy', 'square_error_cost', 'ssd_loss', @@ -72,6 +73,84 @@ ] +def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): + """ + This operator calculates smooth_l1_loss. Creates a criterion that uses a squared + term if the absolute element-wise error falls below 1 and an L1 term otherwise. + In some cases it can prevent exploding gradients and it is more robust and less + sensitivity to outliers. Also known as the Huber loss: + + .. math:: + + loss(x,y)=\\frac{1}{n}\\sum_{i}z_i + + + where z_i is given by: + + .. math:: + + \\mathop{z_i}=\\left\\{\\begin{array}{rcl} + 0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\ + delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise} + \\end{array} \\right. + + Parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is + (N, C), where C is number of classes, and if shape is more than 2D, this + is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is float32 or float64. The shape of label + is the same as the shape of input. + reduction (str, optional): Indicate how to average the loss by batch_size, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned. + Default is ``'mean'``. + delta (float, optional): Specifies the hyperparameter delta to be used. + The value determines how large the errors need to be to use L1. Errors + smaller than delta are minimized with L2. Parameter is ignored for + negative/zero values. Default = 1.0 + name (str, optional): Name for the operation (optional, default is + None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + The tensor variable storing the smooth_l1_loss of input and label. + + Return type: Tensor. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + + paddle.disable_static() + input_data = np.random.rand(3,3).astype("float32") + label_data = np.random.rand(3,3).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + output = paddle.nn.functioanl.smooth_l1_loss(input, label) + print(output.numpy()) + """ + fluid.data_feeder.check_variable_and_dtype( + input, 'input', ['float32', 'float64'], 'smooth_l1_loss') + fluid.data_feeder.check_variable_and_dtype( + label, 'label', ['float32', 'float64'], 'smooth_l1_loss') + + out = huber_loss(input=input, label=label, delta=delta) + + if reduction not in ['sum', 'mean', 'none']: + raise ValueError( + "The value of 'reduction' in smooth_l1_loss should be 'sum', 'mean' or" + " 'none', but received %s, which is not allowed." % reduction) + if reduction == 'none': + return out + elif reduction == 'mean': + return fluid.layers.reduce_mean(out) + elif reduction == 'sum': + return fluid.layers.reduce_sum(out) + + def margin_ranking_loss(input, other, label, diff --git a/python/paddle/nn/layer/__init__.py b/python/paddle/nn/layer/__init__.py index ee057be40f1d2..84d13c2211bd7 100644 --- a/python/paddle/nn/layer/__init__.py +++ b/python/paddle/nn/layer/__init__.py @@ -74,6 +74,7 @@ from .loss import BCELoss #DEFINE_ALIAS from .loss import KLDivLoss #DEFINE_ALIAS from .loss import MarginRankingLoss #DEFINE_ALIAS +from .loss import SmoothL1Loss #DEFINE_ALIAS from .norm import BatchNorm #DEFINE_ALIAS from .norm import SyncBatchNorm #DEFINE_ALIAS from .norm import GroupNorm #DEFINE_ALIAS diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 5067264ee792d..8503cdce96726 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -27,7 +27,8 @@ 'NLLLoss', 'BCELoss', 'KLDivLoss', - 'MarginRankingLoss' + 'MarginRankingLoss', + 'SmoothL1Loss', ] @@ -700,7 +701,7 @@ class MarginRankingLoss(fluid.dygraph.Layer): def __init__(self, margin=0.0, reduction='mean', name=None): if reduction not in ['sum', 'mean', 'none']: raise ValueError( - "The value of 'reduction' in L1Loss should be 'sum', 'mean' or 'none', but " + "The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but " "received %s, which is not allowed." % reduction) super(MarginRankingLoss, self).__init__() self.margin = margin @@ -711,3 +712,79 @@ def forward(self, input, other, label): out = paddle.nn.functional.margin_ranking_loss( input, other, label, self.margin, self.reduction, self.name) return out + + +class SmoothL1Loss(fluid.dygraph.Layer): + """ + This operator calculates smooth_l1_loss. Creates a criterion that uses a squared + term if the absolute element-wise error falls below 1 and an L1 term otherwise. + In some cases it can prevent exploding gradients and it is more robust and less + sensitivity to outliers. Also known as the Huber loss: + + .. math:: + + loss(x,y)=\\frac{1}{n}\\sum_{i}z_i + + where z_i is given by: + + .. math:: + + \\mathop{z_i}=\\left\\{\\begin{array}{rcl} + 0.5(x_i - y_i)^2 & & {if |x_i - y_i| < delta} \\\\ + delta * |x_i - y_i| - 0.5 * delta^2 & & {otherwise} + \\end{array} \\right. + + Parameters: + reduction (str, optional): Indicate how to average the loss by batch_size, + the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; + If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned. + Default is ``'mean'``. + delta (float, optional): Specifies the hyperparameter delta to be used. + The value determines how large the errors need to be to use L1. Errors + smaller than delta are minimized with L2. Parameter is ignored for + negative/zero values. Default = 1.0 + name (str, optional): Name for the operation (optional, default is + None). For more information, please refer to :ref:`api_guide_Name`. + + Call Parameters: + input (Tensor): Input tensor, the data type is float32 or float64. Shape is + (N, C), where C is number of classes, and if shape is more than 2D, this + is (N, C, D1, D2,..., Dk), k >= 1. + label (Tensor): Label tensor, the data type is float32 or float64. The shape of label + is the same as the shape of input. + + Returns: + The tensor variable storing the smooth_l1_loss of input and label. + + Return type: Tensor. + + Examples: + .. code-block:: python + + import paddle + import numpy as np + paddle.disable_static() + input_data = np.random.rand(3,3).astype("float32") + label_data = np.random.rand(3,3).astype("float32") + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + loss = paddle.nn.SmoothL1Loss() + output = loss(input, label) + print(output.numpy()) + """ + + def __init__(self, reduction='mean', delta=1.0, name=None): + super(SmoothL1Loss, self).__init__() + self.reduction = reduction + self.delta = delta + self.name = name + + def forward(self, input, label): + return F.smooth_l1_loss( + input, + label, + reduction=self.reduction, + delta=self.delta, + name=self.name) From 6914a12f8205ac8ed7c4673f9a2191ad965c6059 Mon Sep 17 00:00:00 2001 From: Zhang Ting Date: Thu, 20 Aug 2020 10:50:36 +0800 Subject: [PATCH 31/37] rename the inputs of allclose (#26360) * rename input * add unittest, test=develop * use paddle.data instead of fluid.data, test=develop --- paddle/fluid/operators/allclose_op.cc | 15 +-- .../fluid/tests/unittests/test_allclose_op.py | 54 ++++++++++ python/paddle/tensor/logic.py | 100 +++++++++--------- 3 files changed, 113 insertions(+), 56 deletions(-) diff --git a/paddle/fluid/operators/allclose_op.cc b/paddle/fluid/operators/allclose_op.cc index 911757007266c..736483c3304ac 100644 --- a/paddle/fluid/operators/allclose_op.cc +++ b/paddle/fluid/operators/allclose_op.cc @@ -22,9 +22,11 @@ namespace operators { class AllcloseOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { - AddInput("Input", "The first input tensor to compare."); - AddInput("Other", "The second input tensor to compare."); - AddOutput("Out", "The output tensor of allclose op."); + AddInput("Input", + "The input tensor, it's data type should be float32, float64."); + AddInput("Other", + "The input tensor, it's data type should be float32, float64."); + AddOutput("Out", "The output tensor, it's data type is bool."); AddAttr("rtol", "The relative tolerance. Default: :math:`1e-5` .") .SetDefault(1e-5); @@ -36,11 +38,12 @@ class AllcloseOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(false); AddComment(R"DOC( -This operator checks if all :math:`input` and :math:`other` satisfy the condition: +This operator checks if all :math:`x` and :math:`y` satisfy the condition: -:math:`\left| input - other \right| \leq atol + rtol \times \left| other \right|` +.. math:: + \left| x - y \right| \leq atol + rtol \times \left| y \right| -elementwise, for all elements of :math:`input` and :math:`other`. The behaviour of this +elementwise, for all elements of :math:`x` and :math:`y`. The behaviour of this operator is analogous to :math:`numpy.allclose`, namely that it returns :math:`True` if two tensors are elementwise equal within a tolerance. )DOC"); diff --git a/python/paddle/fluid/tests/unittests/test_allclose_op.py b/python/paddle/fluid/tests/unittests/test_allclose_op.py index 5b5ed2641880a..dc50e569f8043 100644 --- a/python/paddle/fluid/tests/unittests/test_allclose_op.py +++ b/python/paddle/fluid/tests/unittests/test_allclose_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle class TestAllcloseOp(OpTest): @@ -76,5 +77,58 @@ def set_args(self): self.equal_nan = True +class TestAllcloseDygraph(unittest.TestCase): + def test_api_case(self): + paddle.disable_static() + x_data = np.random.rand(10, 10) + y_data = np.random.rand(10, 10) + x = paddle.to_tensor(x_data) + y = paddle.to_tensor(y_data) + out = paddle.allclose(x, y, rtol=1e-05, atol=1e-08) + expected_out = np.allclose(x_data, y_data, rtol=1e-05, atol=1e-08) + self.assertTrue((out.numpy() == expected_out).all(), True) + paddle.enable_static() + + +class TestAllcloseError(unittest.TestCase): + def test_input_dtype(self): + def test_x_dtype(): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + x = paddle.data(name='x', shape=[10, 10], dtype='float16') + y = paddle.data(name='y', shape=[10, 10], dtype='float64') + result = paddle.allclose(x, y) + + self.assertRaises(TypeError, test_x_dtype) + + def test_y_dtype(): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + x = paddle.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.data(name='y', shape=[10, 10], dtype='int32') + result = paddle.allclose(x, y) + + self.assertRaises(TypeError, test_y_dtype) + + def test_attr(self): + x = paddle.data(name='x', shape=[10, 10], dtype='float64') + y = paddle.data(name='y', shape=[10, 10], dtype='float64') + + def test_rtol(): + result = paddle.allclose(x, y, rtol=True) + + self.assertRaises(TypeError, test_rtol) + + def test_atol(): + result = paddle.allclose(x, y, rtol=True) + + self.assertRaises(TypeError, test_atol) + + def test_equal_nan(): + result = paddle.allclose(x, y, equal_nan=1) + + self.assertRaises(TypeError, test_equal_nan) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 18dbeb0c46e8a..ec080235239ee 100644 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -13,9 +13,11 @@ # limitations under the License. from ..fluid.layer_helper import LayerHelper -from ..fluid.data_feeder import check_type +from ..fluid.data_feeder import check_type, check_variable_and_dtype from ..fluid.layers.layer_function_generator import templatedoc from .. import fluid +from ..fluid.framework import in_dygraph_mode +from paddle.common_ops_import import * # TODO: define logic functions of a tensor from ..fluid.layers import is_empty #DEFINE_ALIAS @@ -91,7 +93,7 @@ def equal_all(x, y, name=None): @templatedoc() -def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): +def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): """ :alias_main: paddle.allclose :alias: paddle.allclose,paddle.tensor.allclose,paddle.tensor.logic.allclose @@ -99,67 +101,65 @@ def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): ${comment} Args: - input(inputtype):{input_comment}. - other(othertype):{other_comment}. - rtol(rtoltype,optional):{rtol_comment}. - atol(atoltype,optional):{atol_comment}. - equal_nan(equalnantype,optional):{equal_nan_comment}. - name(STR, optional): The default value is None. - Normally there is no need for user to set this property. - For more information, please refer to :ref:`api_guide_Name`. + x(Tensor): ${input_comment}. + y(Tensor): ${other_comment}. + rtol(rtoltype, optional): ${rtol_comment}. + atol(atoltype, optional): ${atol_comment}. + equal_nan(equalnantype, optional): ${equal_nan_comment}. + name (str, optional): Name for the operation. For more information, please + refer to :ref:`api_guide_Name`. Default: None. Returns: - ${out_comment}. + Tensor: ${out_comment}. + + Raises: + TypeError: The data type of ``x`` must be one of float32, float64. + TypeError: The data type of ``y`` must be one of float32, float64. + TypeError: The type of ``rtol`` must be float. + TypeError: The type of ``atol`` must be float. + TypeError: The type of ``equal_nan`` must be bool. - Return Type: - ${out_type} - Examples: .. code-block:: python import paddle - import paddle.fluid as fluid import numpy as np - use_cuda = fluid.core.is_compiled_with_cuda() - - a = fluid.data(name="a", shape=[2], dtype='float32') - b = fluid.data(name="b", shape=[2], dtype='float32') + paddle.disable_static() - result = paddle.allclose(a, b, rtol=1e-05, atol=1e-08, + np_x = np.array([10000., 1e-07]).astype("float32") + np_y = np.array([10000.1, 1e-08]).astype("float32") + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name="ignore_nan") - result_nan = paddle.allclose(a, b, rtol=1e-05, atol=1e-08, + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=True, name="equal_nan") - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - x = np.array([10000., 1e-07]).astype("float32") - y = np.array([10000.1, 1e-08]).astype("float32") - result_v, result_nan_v = exe.run( - feed={'a': x, 'b': y}, - fetch_list=[result, result_nan]) - print(result_v, result_nan_v) - # Output: (array([False]), array([False])) - - x = np.array([10000., 1e-08]).astype("float32") - y = np.array([10000.1, 1e-09]).astype("float32") - result_v, result_nan_v = exe.run( - feed={'a': x, 'b': y}, - fetch_list=[result, result_nan]) - print(result_v, result_nan_v) - # Output: (array([ True]), array([ True])) - - x = np.array([1.0, float('nan')]).astype("float32") - y = np.array([1.0, float('nan')]).astype("float32") - result_v, result_nan_v = exe.run( - feed={'a': x, 'b': y}, - fetch_list=[result, result_nan]) - print(result_v, result_nan_v) - # Output: (array([False]), array([ True])) + np_result2 = result2.numpy() + # [False] + + np_x = np.array([1.0, float('nan')]).astype("float32") + np_y = np.array([1.0, float('nan')]).astype("float32") + x = paddle.to_tensor(np_x) + y = paddle.to_tensor(np_y) + result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=False, name="ignore_nan") + np_result1 = result1.numpy() + # [False] + result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08, + equal_nan=True, name="equal_nan") + np_result2 = result2.numpy() + # [True] """ + if in_dygraph_mode(): + return core.ops.allclose(x, y, 'rtol', rtol, 'atol', atol, 'equal_nan', + equal_nan) + + check_variable_and_dtype(x, "input", ['float32', 'float64'], 'allclose') + check_variable_and_dtype(y, "input", ['float32', 'float64'], 'allclose') check_type(rtol, 'rtol', float, 'allclose') check_type(atol, 'atol', float, 'allclose') check_type(equal_nan, 'equal_nan', bool, 'allclose') @@ -167,7 +167,7 @@ def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): helper = LayerHelper("allclose", **locals()) out = helper.create_variable_for_type_inference(dtype='bool') - inputs = {'Input': input, 'Other': other} + inputs = {'Input': x, 'Other': y} outputs = {'Out': out} attrs = {'rtol': rtol, 'atol': atol, 'equal_nan': equal_nan} helper.append_op( From 8c48c7daecbe8419cb34770f0b178c7e13ade123 Mon Sep 17 00:00:00 2001 From: yukavio <67678385+yukavio@users.noreply.github.com> Date: Thu, 20 Aug 2020 10:58:36 +0800 Subject: [PATCH 32/37] Add new one hot function in nn.functional (#26183) * add input.py file * write input.py * fix init file * add unit tests * fix dygraph and input shape * Revert "fix dygraph and input shape" This reverts commit 89ad8664124c1872640b68c0a4418c377ad6aa6f. * fixed pylint * fix deprecated * fix old op * fix old op * set check_dygraph=True * Revert "set check_dygraph=True" This reverts commit a8e93e33281f84e5f6fbc2b4bf05ea3e63611519. * test commit * fix doc and change test file name --- python/paddle/fluid/input.py | 2 + python/paddle/fluid/layers/nn.py | 2 + .../unittests/test_nn_functional_hot_op.py | 207 ++++++++++++++++++ python/paddle/nn/functional/__init__.py | 1 + python/paddle/nn/functional/input.py | 110 ++++++++++ 5 files changed, 322 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py create mode 100644 python/paddle/nn/functional/input.py diff --git a/python/paddle/fluid/input.py b/python/paddle/fluid/input.py index 347927509e6d5..15a3022f932f4 100644 --- a/python/paddle/fluid/input.py +++ b/python/paddle/fluid/input.py @@ -17,10 +17,12 @@ from .framework import Variable, in_dygraph_mode from .layer_helper import LayerHelper from .data_feeder import check_variable_and_dtype, check_dtype +from ..utils import deprecated __all__ = ['one_hot', 'embedding'] +@deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot') def one_hot(input, depth, allow_out_of_range=False): """ :alias_main: paddle.nn.functional.one_hot diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 3595406a011ce..41531f67f3c9c 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -35,6 +35,7 @@ from .. import unique_name from functools import reduce from .. import core +from ...utils import deprecated from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype import paddle from paddle.utils import deprecated @@ -5800,6 +5801,7 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): return loss +@deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot') def one_hot(input, depth, allow_out_of_range=False): """ diff --git a/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py b/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py new file mode 100644 index 0000000000000..339f689998f81 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_nn_functional_hot_op.py @@ -0,0 +1,207 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import math +from op_test import OpTest +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.nn.functional as functional +import paddle.fluid.framework as framework +from paddle.fluid.framework import Program, program_guard + + +class TestOneHotOp(OpTest): + def setUp(self): + self.op_type = 'one_hot_v2' + depth = 10 + depth_np = np.array(10).astype('int32') + dimension = 12 + x_lod = [[4, 1, 3, 3]] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] + x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) + + out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') + + for i in range(np.product(x.shape)): + out[i, x[i]] = 1.0 + + self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} + self.attrs = {'dtype': int(core.VarDesc.VarType.FP32)} + self.outputs = {'Out': (out, x_lod)} + + def test_check_output(self): + self.check_output(check_dygraph=False) + + +class TestOneHotOp_attr(OpTest): + def setUp(self): + self.op_type = 'one_hot_v2' + depth = 10 + dimension = 12 + x_lod = [[4, 1, 3, 3]] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] + x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) + + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, + depth)).astype('float32') + + for i in range(np.product(x.shape)): + out[i, 0, x[i]] = 1.0 + + self.inputs = {'X': (x, x_lod)} + self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth} + self.outputs = {'Out': (out, x_lod)} + + def test_check_output(self): + self.check_output(check_dygraph=False) + + +class TestOneHotOp_default_dtype(OpTest): + def setUp(self): + self.op_type = 'one_hot_v2' + depth = 10 + depth_np = np.array(10).astype('int32') + dimension = 12 + x_lod = [[4, 1, 3, 3]] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] + x = np.array(x).astype('int32').reshape([sum(x_lod[0])]) + + out = np.zeros(shape=(np.product(x.shape), depth)).astype('float32') + + for i in range(np.product(x.shape)): + out[i, x[i]] = 1.0 + + self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} + self.attrs = {} + self.outputs = {'Out': (out, x_lod)} + + def test_check_output(self): + self.check_output(check_dygraph=False) + + +class TestOneHotOp_default_dtype_attr(OpTest): + def setUp(self): + self.op_type = 'one_hot_v2' + depth = 10 + dimension = 12 + x_lod = [[4, 1, 3, 3]] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] + x = np.array(x).astype('int32').reshape([sum(x_lod[0]), 1]) + + out = np.zeros(shape=(np.product(x.shape[:-1]), 1, + depth)).astype('float32') + + for i in range(np.product(x.shape)): + out[i, 0, x[i]] = 1.0 + + self.inputs = {'X': (x, x_lod)} + self.attrs = {'depth': depth} + self.outputs = {'Out': (out, x_lod)} + + def test_check_output(self): + self.check_output(check_dygraph=False) + + +class TestOneHotOp_exception(unittest.TestCase): + def setUp(self): + self.op_type = 'one_hot_v2' + self.depth = 10 + self.place = core.CPUPlace() + self.dimension = 12 + self.x = core.LoDTensor() + x_lod = [[4, 1, 3, 3]] + data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))] + data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1]) + self.x.set(data, self.place) + self.x.set_recursive_sequence_lengths(x_lod) + + def test_check_output(self): + program = Program() + with program_guard(program): + x = fluid.layers.data( + name='x', shape=[self.dimension], dtype='float32', lod_level=1) + block = program.current_block() + one_hot_out = block.create_var( + name="one_hot_out", + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='float32') + block.append_op( + type='one_hot', + inputs={'X': x}, + attrs={'depth': self.depth}, + outputs={'Out': one_hot_out}) + exe = fluid.Executor(self.place) + + def run(): + exe.run(feed={'x': self.x}, + fetch_list=[one_hot_out], + return_numpy=False) + + self.assertRaises(core.EnforceNotMet, run) + + +class TestOneHotOpApi(unittest.TestCase): + def test_api(self): + num_classes = 10 + self._run(num_classes) + + def test_api_with_depthTensor(self): + num_classes = fluid.layers.assign(input=np.array([10], dtype=np.int32)) + self._run(num_classes) + + def test_api_with_dygraph(self): + num_classes = 10 + label = np.array( + [np.random.randint(0, num_classes - 1) + for i in range(6)]).reshape([6, 1]) + with fluid.dygraph.guard(): + one_hot_label = functional.one_hot( + x=fluid.dygraph.to_variable(label), num_classes=num_classes) + + def _run(self, num_classes): + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + one_hot_label = functional.one_hot(x=label, num_classes=num_classes) + + place = fluid.CPUPlace() + label_data = np.array([np.random.randint(0, 10 - 1) + for i in range(6)]).reshape([6, 1]) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + ret = exe.run(feed={'label': label_data, }, + fetch_list=[one_hot_label], + return_numpy=False) + + +class BadInputTestOnehotV2(unittest.TestCase): + def test_error(self): + with fluid.program_guard(fluid.Program()): + + def test_bad_x(): + label = fluid.layers.data( + name="label", + shape=[4], + append_batch_size=False, + dtype="float32") + one_hot_label = functional.one_hot(x=label, num_classes=4) + + self.assertRaises(TypeError, test_bad_x) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/nn/functional/__init__.py b/python/paddle/nn/functional/__init__.py index bd1d334f663d8..855935f662099 100644 --- a/python/paddle/nn/functional/__init__.py +++ b/python/paddle/nn/functional/__init__.py @@ -198,3 +198,4 @@ from .vision import space_to_depth #DEFINE_ALIAS from .vision import yolo_box #DEFINE_ALIAS from .vision import yolov3_loss #DEFINE_ALIAS +from .input import one_hot #DEFINE_ALIAS diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py new file mode 100644 index 0000000000000..e51da1bd74456 --- /dev/null +++ b/python/paddle/nn/functional/input.py @@ -0,0 +1,110 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import warnings +from ...fluid.framework import Variable, in_dygraph_mode +from ...fluid.layer_helper import LayerHelper +from ...fluid.layers import core +from ...fluid.data_feeder import check_variable_and_dtype, check_dtype + +__all__ = ['one_hot'] + + +def one_hot(x, num_classes, name=None): + """ + + The operator converts each id in the input 'x' to an one-hot vector with a + num_classes length. The value in the vector dimension corresponding to the id + is 1, and the value in the remaining dimension is 0. + + The shape of output Tensor is generated by appending num_classes dimension + behind the last dimension of the 'x' shape. + + .. code-block:: text + + Example 1: + + input: + x.shape = [4] + x.data = [1, 1, 3, 0] + num_classes = 4 + + output: + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + + Example 2: + + input: + x.shape = [4] + x.data = [1, 1, 5, 0] + num_classes = 4 + + output: Throw an exception for Illegal value + The second dimension in X is 5, which is greater than num_classes, + so it throws an exception. + + + Args: + x(Tensor): Tensor with shape :math:`[N_1, N_2, ..., N_k]` , + which contains at least one dimension. The data type is int32 or int64. + num_classes(int): An integer defining the num_classes of the one hot dimension. If input 'x' + is word id, num_classes is generally the dictionary size. + + Returns: + Tensor: The one-hot representations of 'x'. A Tensor with type float32. + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + # Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4]. + label = fluid.data(name="label", shape=[4, 1], dtype="int64") + # label.shape = [4] + # label.data = [1, 1, 3, 0] + one_hot_label = fluid.one_hot(x=label, num_classes=4) + # one_hot_label.shape = [4, 4] + # one_hot_label.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] + """ + + if in_dygraph_mode(): + return core.ops.one_hot_v2(x, 'depth', num_classes, + 'allow_out_of_range', False) + else: + check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2') + helper = LayerHelper("one_hot_v2", **locals()) + + one_hot_out = helper.create_variable_for_type_inference(dtype='float32') + if not isinstance(num_classes, Variable): + # user attribute + inputs = {'X': x} + attrs = {'depth': num_classes, 'allow_out_of_range': False} + else: + num_classes.stop_gradient = True + inputs = {'X': x, 'depth_tensor': num_classes} + attrs = {'allow_out_of_range': False} + helper.append_op( + type="one_hot_v2", + inputs=inputs, + attrs=attrs, + outputs={'Out': one_hot_out}, + stop_gradient=True) + return one_hot_out From 7c1ff38e013991aeaeb46ffbbf4d55535727afe3 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Thu, 20 Aug 2020 11:47:03 +0800 Subject: [PATCH 33/37] Enhance add_parameter check for dygraph layer (#26188) * enhance add parameter check for layer * add unittest for coverage * remove uninit test case * enrich unittest ccase * trigger ci check --- python/paddle/fluid/dygraph/layers.py | 43 ++++++++++++++----- .../fluid/tests/unittests/test_base_layer.py | 25 +++++++++++ 2 files changed, 57 insertions(+), 11 deletions(-) diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 7b2434e1a213a..1ef719b9da187 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -553,7 +553,10 @@ def register_buffer(self, name, variable, persistable=True): "The name of buffer should be a string, but received {}.". format(type(name).__name__)) elif '.' in name: - raise KeyError("The name of buffer can not contain \".\"") + raise KeyError( + "The name of buffer can not contain `.`, " + "because when you access the newly added buffer in the " + "form of `self.**.**`, it will cause AttributeError.") elif name == '': raise KeyError("The name of buffer can not be empty.") elif hasattr(self, name) and name not in self._buffers: @@ -736,20 +739,38 @@ def add_parameter(self, name, parameter): Returns: Parameter: the parameter passed in. """ - if parameter is None: - self._parameters[name] = None - elif not isinstance(parameter, framework.Parameter): + if '_parameters' not in self.__dict__: + raise RuntimeError( + "super(YourLayer, self).__init__() should be called firstly.") + elif not isinstance(name, six.string_types): raise TypeError( - "parameter assignment requires Parameter or None, but got '{}'" - .format(type(parameter).__name__)) + "The name of parameter should be a string, but received {}.". + format(type(name).__name__)) + elif '.' in name: + raise KeyError( + "The name of parameter can not contain `.`, " + "because when you access the newly added parameter in the " + "form of `self.**.**`, it will cause AttributeError.") + elif name == '': + raise KeyError("The name of parameter can not be empty.") + elif hasattr(self, name) and name not in self._parameters: + raise KeyError("The parameter '{}' already exists.".format(name)) + elif parameter is not None and not isinstance(parameter, + framework.Parameter): + raise TypeError( + "The parameter to be added should be a Parameter, but received {}.". + format(type(parameter).__name__)) + else: + if parameter is None: + self._parameters[name] = None - if len(self._loaddict_holder) > 0: - assert parameter.name in self._loaddict_holder, "Parameter not found, Can't not find [ {} ] in stat_dict".format( - parameter.name) + if len(self._loaddict_holder) > 0: + assert parameter.name in self._loaddict_holder, "Parameter not found, Can't not find [ {} ] in state_dict".format( + parameter.name) - parameter.set_value(self._loaddict_holder[parameter.name]) + parameter.set_value(self._loaddict_holder[parameter.name]) - self._parameters[name] = parameter + self._parameters[name] = parameter return parameter def __getattr__(self, name): diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index bc666c0de5be0..875f6211a7fbd 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -86,6 +86,31 @@ def test_three_level(self): ret = l() self.assertTrue(np.allclose(ret.numpy(), 0.8 * np.ones([2, 2]))) + def test_add_parameter_with_error(self): + with fluid.dygraph.guard(): + net = fluid.Layer() + param = net.create_parameter(shape=[1]) + + with self.assertRaises(TypeError): + net.add_parameter(10, param) + + with self.assertRaises(KeyError): + net.add_parameter("param.name", param) + + with self.assertRaises(KeyError): + net.add_parameter("", param) + + with self.assertRaises(KeyError): + net.test_param = 10 + net.add_parameter("test_param", param) + + with self.assertRaises(TypeError): + net.add_parameter("no_param", 10) + + load_param = net.create_parameter(shape=[1]) + net._loaddict_holder[load_param.name] = load_param + net.add_parameter("load_param", load_param) + class BufferLayer(fluid.Layer): def __init__(self): From a57d63a0f21dff476270213491126b81ee122975 Mon Sep 17 00:00:00 2001 From: WuHaobo <14162525+WuHaobo@users.noreply.github.com> Date: Thu, 20 Aug 2020 13:37:15 +0800 Subject: [PATCH 34/37] Fix erf (#26426) --- python/paddle/fluid/layers/ops.py | 59 ++++--------------- .../fluid/tests/unittests/test_erf_op.py | 7 +++ 2 files changed, 17 insertions(+), 49 deletions(-) diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index 1a61072ace6ff..1c9aa97e29e00 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -730,7 +730,7 @@ def gelu(x, approximate=False): _erf_ = generate_layer_fn('erf') -def erf(x): +def erf(x, name=None): locals_var = locals().copy() kwargs = dict() for name, val in locals_var.items(): @@ -740,10 +740,6 @@ def erf(x): erf.__doc__ = """ - :alias_main: paddle.erf - :alias: paddle.erf,paddle.tensor.erf,paddle.tensor.math.erf,paddle.nn.functional.erf,paddle.nn.functional.activation.erf - :old_api: paddle.fluid.layers.erf - :strong:`Erf Operator` For more details, see [Error function](https://en.wikipedia.org/wiki/Error_function). @@ -753,57 +749,22 @@ def erf(x): Args: - x(Variable): The input of Erf op, Tensor or LoDTensor, dtype: float32 or float64. + x (Tensor): The input tensor, it's data type should be float32, float64. Returns: - Variable: The output of Erf op, Tensor or LoDTensor, dtype: float32 or float64, the same as the input, shape: the same as the input. + Tensor: The output of Erf op, dtype: float32 or float64, the same as the input, shape: the same as the input. Examples: .. code-block:: python - # declarative mode import numpy as np - from paddle import fluid - - x = fluid.data(name="x", shape=(-1, 3), dtype="float32") - y = fluid.layers.erf(x) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - start = fluid.default_startup_program() - main = fluid.default_main_program() - - data = np.random.randn(2, 3).astype("float32") - exe.run(start) - - y_np, = exe.run(main, feed={"x": data}, fetch_list=[y]) - - data - # array([[ 0.4643714 , -1.1509596 , 1.2538221 ], - # [ 0.34369683, 0.27478245, 1.1805398 ]], dtype=float32) - y_np - # array([[ 0.48863927, -0.8964121 , 0.9237998 ], - # [ 0.37307587, 0.30242872, 0.9049887 ]], dtype=float32) - - .. code-block:: python - - # imperative mode - import numpy as np - from paddle import fluid - import paddle.fluid.dygraph as dg - - data = np.random.randn(2, 3).astype("float32") - place = fluid.CPUPlace() - with dg.guard(place) as g: - x = dg.to_variable(data) - y = fluid.layers.erf(x) - y_np = y.numpy() - data - # array([[ 0.4643714 , -1.1509596 , 1.2538221 ], - # [ 0.34369683, 0.27478245, 1.1805398 ]], dtype=float32) - y_np - # array([[ 0.48863927, -0.8964121 , 0.9237998 ], - # [ 0.37307587, 0.30242872, 0.9049887 ]], dtype=float32) + import paddle + paddle.disable_static() + x_data = np.array([-0.4, -0.2, 0.1, 0.3]) + x = paddle.to_tensor(x_data) + out = paddle.erf(x) + print(out.numpy()) + # [-0.42839236 -0.22270259 0.11246292 0.32862676] """ diff --git a/python/paddle/fluid/tests/unittests/test_erf_op.py b/python/paddle/fluid/tests/unittests/test_erf_op.py index 93ab0212f136a..b83436fae01e6 100644 --- a/python/paddle/fluid/tests/unittests/test_erf_op.py +++ b/python/paddle/fluid/tests/unittests/test_erf_op.py @@ -19,6 +19,7 @@ from scipy.special import erf from op_test import OpTest +import paddle import paddle.fluid as fluid import paddle.fluid.dygraph as dg @@ -58,6 +59,12 @@ def test_case(self): if fluid.is_compiled_with_cuda(): self._test_case(fluid.CUDAPlace(0)) + def test_name(self): + with fluid.program_guard(fluid.Program()): + x = paddle.nn.data('x', [3, 4]) + y = paddle.erf(x, name='erf') + self.assertTrue('erf' in y.name) + if __name__ == '__main__': unittest.main() From 0cc63cc343fef246a4cc6f0afe104be29af280fd Mon Sep 17 00:00:00 2001 From: Aurelius84 Date: Thu, 20 Aug 2020 13:53:18 +0800 Subject: [PATCH 35/37] [Paddle2.0] Rename hapi.Input and move `data` api (#26396) * Rename `Input` into `InputSpec` * fix argument place of Input api --- .../fluid/tests/unittests/test_compare_op.py | 5 +- .../fluid/tests/unittests/test_cumsum_op.py | 4 +- .../test_flatten_contiguous_range_op.py | 9 +- .../fluid/tests/unittests/test_max_op.py | 10 +- .../fluid/tests/unittests/test_maximum_op.py | 8 +- .../fluid/tests/unittests/test_min_op.py | 10 +- .../fluid/tests/unittests/test_minimum_op.py | 8 +- .../fluid/tests/unittests/test_multiply.py | 14 ++- .../unittests/test_nn_margin_rank_loss.py | 16 ++- .../tests/unittests/test_nn_sigmoid_op.py | 4 +- .../fluid/tests/unittests/test_randint_op.py | 2 +- .../fluid/tests/unittests/test_randn_op.py | 2 +- python/paddle/incubate/hapi/callbacks.py | 8 +- python/paddle/incubate/hapi/model.py | 113 ++++++++---------- .../hapi/tests/dist_hapi_mnist_dynamic.py | 4 +- .../hapi/tests/dist_hapi_mnist_static.py | 4 +- .../incubate/hapi/tests/test_callbacks.py | 2 +- .../paddle/incubate/hapi/tests/test_model.py | 40 ++++--- .../hapi/tests/test_pretrained_model.py | 2 +- .../paddle/incubate/hapi/tests/test_text.py | 58 ++++----- .../incubate/hapi/tests/test_vision_models.py | 4 +- python/paddle/nn/__init__.py | 1 - python/paddle/nn/input.py | 21 ---- python/paddle/static/__init__.py | 5 +- python/paddle/static/input.py | 54 +++++++++ 25 files changed, 227 insertions(+), 181 deletions(-) delete mode 100644 python/paddle/nn/input.py create mode 100644 python/paddle/static/input.py diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index 99d0c77fce50f..cfad50409802d 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -93,8 +93,9 @@ def test_api(self): def test_broadcast_api_1(self): with program_guard(Program(), Program()): - x = paddle.nn.data(name='x', shape=[1, 2, 1, 3], dtype='int32') - y = paddle.nn.data(name='y', shape=[1, 2, 3], dtype='int32') + x = paddle.static.data( + name='x', shape=[1, 2, 1, 3], dtype='int32') + y = paddle.static.data(name='y', shape=[1, 2, 3], dtype='int32') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) exe = paddle.static.Executor(self.place) diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 57024e8ae5cd5..ad121fac8cc04 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -54,7 +54,7 @@ def run_cases(self): def run_static(self, use_gpu=False): with fluid.program_guard(fluid.Program()): data_np = np.random.random((100, 100)).astype(np.float32) - x = paddle.nn.data('X', [100, 100]) + x = paddle.static.data('X', [100, 100]) y = paddle.cumsum(x) y2 = paddle.cumsum(x, axis=0) y3 = paddle.cumsum(x, axis=-1) @@ -100,7 +100,7 @@ def test_gpu(self): def test_name(self): with fluid.program_guard(fluid.Program()): - x = paddle.nn.data('x', [3, 4]) + x = paddle.static.data('x', [3, 4]) y = paddle.cumsum(x, name='out') self.assertTrue('out' in y.name) diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index 4bd56802efd46..642044bb4b115 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -145,19 +145,22 @@ def test_errors(self): x = x.astype('float32') def test_ValueError1(): - x_var = paddle.nn.data(name="x", shape=image_shape, dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32') out = paddle.flatten(x_var, start_axis=2, stop_axis=1) self.assertRaises(ValueError, test_ValueError1) def test_ValueError2(): - x_var = paddle.nn.data(name="x", shape=image_shape, dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32') paddle.flatten(x_var, start_axis=10, stop_axis=1) self.assertRaises(ValueError, test_ValueError2) def test_ValueError3(): - x_var = paddle.nn.data(name="x", shape=image_shape, dtype='float32') + x_var = paddle.static.data( + name="x", shape=image_shape, dtype='float32') paddle.flatten(x_var, start_axis=2, stop_axis=10) self.assertRaises(ValueError, test_ValueError3) diff --git a/python/paddle/fluid/tests/unittests/test_max_op.py b/python/paddle/fluid/tests/unittests/test_max_op.py index e2bdaba91a68f..c9afc4bec66f2 100644 --- a/python/paddle/fluid/tests/unittests/test_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_max_op.py @@ -32,7 +32,7 @@ def test_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="float32") + data = paddle.static.data("data", shape=[10, 10], dtype="float32") result_max = paddle.max(x=data, axis=1) exe = paddle.static.Executor(self.place) input_data = np.random.rand(10, 10).astype(np.float32) @@ -41,7 +41,7 @@ def test_api(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="int64") + data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_max = paddle.max(x=data, axis=0) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) @@ -50,7 +50,7 @@ def test_api(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="int64") + data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_max = paddle.max(x=data, axis=(0, 1)) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) @@ -71,8 +71,8 @@ def test_input_type(): def test_axis_type(): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="int64") - axis = paddle.nn.data("axis", shape=[10, 10], dtype="int64") + data = paddle.static.data("data", shape=[10, 10], dtype="int64") + axis = paddle.static.data("axis", shape=[10, 10], dtype="int64") result_min = paddle.min(data, axis) self.assertRaises(TypeError, test_axis_type) diff --git a/python/paddle/fluid/tests/unittests/test_maximum_op.py b/python/paddle/fluid/tests/unittests/test_maximum_op.py index bed2b57ec5969..5645597007a00 100644 --- a/python/paddle/fluid/tests/unittests/test_maximum_op.py +++ b/python/paddle/fluid/tests/unittests/test_maximum_op.py @@ -36,8 +36,8 @@ def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") - data_y = paddle.nn.data("y", shape=[10, 15], dtype="float32") + data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") + data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_max = paddle.maximum(data_x, data_y) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"x": self.input_x, @@ -48,8 +48,8 @@ def test_static_api(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") - data_z = paddle.nn.data("z", shape=[15], dtype="float32") + data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") + data_z = paddle.static.data("z", shape=[15], dtype="float32") result_max = paddle.maximum(data_x, data_z, axis=1) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"x": self.input_x, diff --git a/python/paddle/fluid/tests/unittests/test_min_op.py b/python/paddle/fluid/tests/unittests/test_min_op.py index e8bfe55f32a12..b9eff05c5ea9f 100644 --- a/python/paddle/fluid/tests/unittests/test_min_op.py +++ b/python/paddle/fluid/tests/unittests/test_min_op.py @@ -32,7 +32,7 @@ def test_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="float32") + data = paddle.static.data("data", shape=[10, 10], dtype="float32") result_min = paddle.min(x=data, axis=1) exe = paddle.static.Executor(self.place) input_data = np.random.rand(10, 10).astype(np.float32) @@ -41,7 +41,7 @@ def test_api(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="int64") + data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_min = paddle.min(x=data, axis=0) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) @@ -50,7 +50,7 @@ def test_api(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="int64") + data = paddle.static.data("data", shape=[10, 10], dtype="int64") result_min = paddle.min(x=data, axis=(0, 1)) exe = paddle.static.Executor(self.place) input_data = np.random.randint(10, size=(10, 10)).astype(np.int64) @@ -71,8 +71,8 @@ def test_input_type(): def test_axis_type(): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data = paddle.nn.data("data", shape=[10, 10], dtype="int64") - axis = paddle.nn.data("axis", shape=[10, 10], dtype="int64") + data = paddle.static.data("data", shape=[10, 10], dtype="int64") + axis = paddle.static.data("axis", shape=[10, 10], dtype="int64") result_min = paddle.min(data, axis) self.assertRaises(TypeError, test_axis_type) diff --git a/python/paddle/fluid/tests/unittests/test_minimum_op.py b/python/paddle/fluid/tests/unittests/test_minimum_op.py index 550580407acf2..4c08b7386ca2c 100644 --- a/python/paddle/fluid/tests/unittests/test_minimum_op.py +++ b/python/paddle/fluid/tests/unittests/test_minimum_op.py @@ -36,8 +36,8 @@ def test_static_api(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") - data_y = paddle.nn.data("y", shape=[10, 15], dtype="float32") + data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") + data_y = paddle.static.data("y", shape=[10, 15], dtype="float32") result_min = paddle.minimum(data_x, data_y) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"x": self.input_x, @@ -48,8 +48,8 @@ def test_static_api(self): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - data_x = paddle.nn.data("x", shape=[10, 15], dtype="float32") - data_z = paddle.nn.data("z", shape=[15], dtype="float32") + data_x = paddle.static.data("x", shape=[10, 15], dtype="float32") + data_z = paddle.static.data("z", shape=[15], dtype="float32") result_min = paddle.minimum(data_x, data_z, axis=1) exe = paddle.static.Executor(self.place) res, = exe.run(feed={"x": self.input_x, diff --git a/python/paddle/fluid/tests/unittests/test_multiply.py b/python/paddle/fluid/tests/unittests/test_multiply.py index f7f6e1f1aac67..32d0f7b63b8fc 100644 --- a/python/paddle/fluid/tests/unittests/test_multiply.py +++ b/python/paddle/fluid/tests/unittests/test_multiply.py @@ -26,8 +26,10 @@ class TestMultiplyAPI(unittest.TestCase): def __run_static_graph_case(self, x_data, y_data, axis=-1): with program_guard(Program(), Program()): - x = paddle.nn.data(name='x', shape=x_data.shape, dtype=x_data.dtype) - y = paddle.nn.data(name='y', shape=y_data.shape, dtype=y_data.dtype) + x = paddle.static.data( + name='x', shape=x_data.shape, dtype=x_data.dtype) + y = paddle.static.data( + name='y', shape=y_data.shape, dtype=y_data.dtype) res = tensor.multiply(x, y, axis=axis) place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( @@ -109,14 +111,14 @@ def test_errors(self): # test static computation graph: dtype can not be int8 paddle.enable_static() with program_guard(Program(), Program()): - x = paddle.nn.data(name='x', shape=[100], dtype=np.int8) - y = paddle.nn.data(name='y', shape=[100], dtype=np.int8) + x = paddle.static.data(name='x', shape=[100], dtype=np.int8) + y = paddle.static.data(name='y', shape=[100], dtype=np.int8) self.assertRaises(TypeError, tensor.multiply, x, y) # test static computation graph: inputs must be broadcastable with program_guard(Program(), Program()): - x = paddle.nn.data(name='x', shape=[20, 50], dtype=np.float64) - y = paddle.nn.data(name='y', shape=[20], dtype=np.float64) + x = paddle.static.data(name='x', shape=[20, 50], dtype=np.float64) + y = paddle.static.data(name='y', shape=[20], dtype=np.float64) self.assertRaises(fluid.core.EnforceNotMet, tensor.multiply, x, y) np.random.seed(7) diff --git a/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py b/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py index 4f60f3e39a573..2e74ffa88f5c5 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py +++ b/python/paddle/fluid/tests/unittests/test_nn_margin_rank_loss.py @@ -54,9 +54,11 @@ def run_static_functional_api(self, place): margin=margin, reduction=reduction) with program_guard(Program(), Program()): - x = paddle.nn.data(name="x", shape=[10, 10], dtype="float64") - y = paddle.nn.data(name="y", shape=[10, 10], dtype="float64") - label = paddle.nn.data( + x = paddle.static.data( + name="x", shape=[10, 10], dtype="float64") + y = paddle.static.data( + name="y", shape=[10, 10], dtype="float64") + label = paddle.static.data( name="label", shape=[10, 10], dtype="float64") result = paddle.nn.functional.margin_ranking_loss( x, y, label, margin, reduction) @@ -78,9 +80,11 @@ def run_static_api(self, place): margin=margin, reduction=reduction) with program_guard(Program(), Program()): - x = paddle.nn.data(name="x", shape=[10, 10], dtype="float64") - y = paddle.nn.data(name="y", shape=[10, 10], dtype="float64") - label = paddle.nn.data( + x = paddle.static.data( + name="x", shape=[10, 10], dtype="float64") + y = paddle.static.data( + name="y", shape=[10, 10], dtype="float64") + label = paddle.static.data( name="label", shape=[10, 10], dtype="float64") margin_rank_loss = paddle.nn.loss.MarginRankingLoss( margin=margin, reduction=reduction) diff --git a/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py b/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py index 451ab696fd6a2..d52a1f5d5b16c 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py +++ b/python/paddle/fluid/tests/unittests/test_nn_sigmoid_op.py @@ -45,7 +45,7 @@ def check_static_api(self, place): main_program = paddle.static.Program() mysigmoid = nn.Sigmoid(name="api_sigmoid") with paddle.static.program_guard(main_program): - x = paddle.nn.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) x.stop_gradient = False y = mysigmoid(x) fluid.backward.append_backward(paddle.mean(y)) @@ -86,7 +86,7 @@ def check_static_api(self, place): paddle.enable_static() main_program = paddle.static.Program() with paddle.static.program_guard(main_program): - x = paddle.nn.data(name='x', shape=self.x_shape) + x = paddle.static.data(name='x', shape=self.x_shape) y = functional.sigmoid(x, name="api_sigmoid") exe = paddle.static.Executor(fluid.CPUPlace()) out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y]) diff --git a/python/paddle/fluid/tests/unittests/test_randint_op.py b/python/paddle/fluid/tests/unittests/test_randint_op.py index 715d66aa3332c..88b07f5df83f8 100644 --- a/python/paddle/fluid/tests/unittests/test_randint_op.py +++ b/python/paddle/fluid/tests/unittests/test_randint_op.py @@ -125,7 +125,7 @@ def test_api(self): out4 = paddle.randint( low=-100, high=100, shape=[dim_1, 5, dim_2], dtype='int32') # shape is a tensor and dtype is 'float64' - var_shape = paddle.nn.data( + var_shape = paddle.static.data( name='var_shape', shape=[2], dtype="int64") out5 = paddle.randint( low=1, high=1000, shape=var_shape, dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_randn_op.py b/python/paddle/fluid/tests/unittests/test_randn_op.py index 8b560f18f9f7b..9d2c03f3bba91 100644 --- a/python/paddle/fluid/tests/unittests/test_randn_op.py +++ b/python/paddle/fluid/tests/unittests/test_randn_op.py @@ -34,7 +34,7 @@ def test_api(self): dim_2 = paddle.fill_constant([1], "int32", 50) x3 = paddle.randn([dim_1, dim_2, 784]) - var_shape = paddle.nn.data('X', [2], 'int32') + var_shape = paddle.static.data('X', [2], 'int32') x4 = paddle.randn(var_shape) place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda( diff --git a/python/paddle/incubate/hapi/callbacks.py b/python/paddle/incubate/hapi/callbacks.py index 741552511f9fd..ccf1b6389b831 100644 --- a/python/paddle/incubate/hapi/callbacks.py +++ b/python/paddle/incubate/hapi/callbacks.py @@ -295,8 +295,8 @@ class ProgBarLogger(Callback): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - inputs = [hapi.Input('image', [-1, 1, 28, 28], 'float32')] - labels = [hapi.Input('label', [None, 1], 'int64')] + inputs = [hapi.Input([-1, 1, 28, 28], 'float32', 'image')] + labels = [hapi.Input([None, 1], 'int64', 'label')] train_dataset = hapi.datasets.MNIST(mode='train') @@ -431,8 +431,8 @@ class ModelCheckpoint(Callback): import paddle.fluid as fluid import paddle.incubate.hapi as hapi - inputs = [hapi.Input('image', [-1, 1, 28, 28], 'float32')] - labels = [hapi.Input('label', [None, 1], 'int64')] + inputs = [hapi.Input([-1, 1, 28, 28], 'float32', 'image')] + labels = [hapi.Input([None, 1], 'int64', 'label')] train_dataset = hapi.datasets.MNIST(mode='train') diff --git a/python/paddle/incubate/hapi/model.py b/python/paddle/incubate/hapi/model.py index 0b12987b10a05..f9dec82bd71f0 100644 --- a/python/paddle/incubate/hapi/model.py +++ b/python/paddle/incubate/hapi/model.py @@ -25,6 +25,8 @@ from collections import Iterable from paddle import fluid +# Note: Use alias `Input` temporarily before releasing hapi feature. +from paddle.static import InputSpec as Input from paddle.fluid.framework import in_dygraph_mode, Variable from paddle.fluid.executor import global_scope from paddle.fluid.io import is_belong_to_optimizer @@ -47,40 +49,6 @@ ] -class Input(fluid.dygraph.Layer): - """ - Define inputs the model. - - Args: - name (str): The name/alias of the variable, see :ref:`api_guide_Name` - for more details. - shape (tuple(integers)|list[integers]): List|Tuple of integers - declaring the shape. You can set "None" or -1 at a dimension - to indicate the dimension can be of any size. For example, - it is useful to set changeable batch size as "None" or -1. - dtype (np.dtype|VarType|str, optional): The type of the data. Supported - dtype: bool, float16, float32, float64, int8, int16, int32, int64, - uint8. Default: float32. - - Examples: - .. code-block:: python - - import paddle.incubate.hapi as hapi - - input = hapi.Input('x', [None, 784], 'float32') - label = hapi.Input('label', [None, 1], 'int64') - """ - - def __init__(self, name, shape=None, dtype='float32'): - super(Input, self).__init__() - self.shape = shape - self.dtype = dtype - self.name = name - - def forward(self): - return fluid.data(self.name, shape=self.shape, dtype=self.dtype) - - class StaticGraphAdapter(object): """ Model traning/inference with a static graph. @@ -388,8 +356,8 @@ def _make_program(self, mode): with fluid.program_guard(prog, self._startup_prog): inputs = self.model._inputs labels = self.model._labels if self.model._labels else [] - inputs = [k.forward() for k in to_list(inputs)] - labels = [k.forward() for k in to_list(labels)] + inputs = [k._create_feed_layer() for k in to_list(inputs)] + labels = [k._create_feed_layer() for k in to_list(labels)] self._label_vars[mode] = labels outputs = to_list(self.model.network.forward(*inputs)) @@ -704,8 +672,8 @@ def forward(self, x): fluid.enable_dygraph(device) # inputs and labels are not required for dynamic graph. - input = hapi.Input('x', [None, 784], 'float32') - label = hapi.Input('label', [None, 1], 'int64') + input = hapi.Input([None, 784], 'float32', 'x') + label = hapi.Input([None, 1], 'int64', 'label') model = hapi.Model(MyNet(), input, label) optim = fluid.optimizer.SGD(learning_rate=1e-3, @@ -734,16 +702,8 @@ def __init__(self, network, inputs=None, labels=None): if not isinstance(inputs, (list, dict, Input)): raise TypeError( "'inputs' must be list or dict in static graph mode") - if inputs is None: - self._inputs = [Input(name=n) \ - for n in extract_args(self.network.forward) if n != 'self'] - elif isinstance(input, dict): - self._inputs = [inputs[n] \ - for n in extract_args(self.network.forward) if n != 'self'] - else: - self._inputs = to_list(inputs) - - self._labels = to_list(labels) + self._inputs = self._verify_spec(inputs, True) + self._labels = self._verify_spec(labels) # init backend if fluid.in_dygraph_mode(): @@ -787,8 +747,8 @@ def forward(self, x): device = hapi.set_device('gpu') fluid.enable_dygraph(device) - input = hapi.Input('x', [None, 784], 'float32') - label = hapi.Input('label', [None, 1], 'int64') + input = hapi.Input([None, 784], 'float32', 'x') + label = hapi.Input([None, 1], 'int64', 'label') model = hapi.Model(MyNet(), input, label) optim = fluid.optimizer.SGD(learning_rate=1e-3, parameter_list=model.parameters()) @@ -836,8 +796,8 @@ def forward(self, x): device = hapi.set_device('gpu') fluid.enable_dygraph(device) - input = hapi.Input('x', [None, 784], 'float32') - label = hapi.Input('label', [None, 1], 'int64') + input = hapi.Input([None, 784], 'float32', 'x') + label = hapi.Input([None, 1], 'int64', 'label') model = hapi.Model(MyNet(), input, label) optim = fluid.optimizer.SGD(learning_rate=1e-3, parameter_list=model.parameters()) @@ -1194,8 +1154,8 @@ def fit( train_dataset = hapi.datasets.MNIST(mode='train') val_dataset = hapi.datasets.MNIST(mode='test') - input = hapi.Input('image', [None, 1, 28, 28], 'float32') - label = hapi.Input('label', [None, 1], 'int64') + input = hapi.Input([None, 1, 28, 28], 'float32', 'image') + label = hapi.Input([None, 1], 'int64', 'label') model = hapi.Model(hapi.vision.LeNet(classifier_activation=None), input, label) @@ -1231,8 +1191,8 @@ def fit( val_loader = fluid.io.DataLoader(val_dataset, places=device, batch_size=64) - input = hapi.Input('image', [None, 1, 28, 28], 'float32') - label = hapi.Input('label', [None, 1], 'int64') + input = hapi.Input([None, 1, 28, 28], 'float32', 'image') + label = hapi.Input([None, 1], 'int64', 'label') model = hapi.Model(hapi.vision.LeNet(classifier_activation=None), input, label) @@ -1359,8 +1319,8 @@ def evaluate( # declarative mode val_dataset = hapi.datasets.MNIST(mode='test') - input = hapi.Input('image', [-1, 1, 28, 28], 'float32') - label = hapi.Input('label', [None, 1], 'int64') + input = hapi.Input([-1, 1, 28, 28], 'float32', 'image') + label = hapi.Input([None, 1], 'int64', 'label') model = hapi.Model(hapi.vision.LeNet(), input, label) model.prepare(metrics=hapi.metrics.Accuracy()) @@ -1433,12 +1393,13 @@ def predict(self, num_workers (int): The number of subprocess to load data, 0 for no subprocess used and loading data in main process. When train_data and eval_data are both the instance of Dataloader, this argument will be ignored. Default: 0. - stack_output (bool): Whether stack output field like a batch, as for an output + stack_outputs (bool): Whether stack output field like a batch, as for an output filed of a sample is in shape [X, Y], test_data contains N samples, predict output field will be in shape [N, X, Y] if stack_output is True, and will be a length N list in shape [[X, Y], [X, Y], ....[X, Y]] if stack_outputs is False. stack_outputs as False is used for LoDTensor output situation, it is recommended set as True if outputs contains no LoDTensor. Default: False. + callbacks(Callback): A Callback instance, default None. Returns: list: output of models. @@ -1466,7 +1427,7 @@ def __len__(self): test_dataset = MnistDataset(mode='test', return_label=False) # declarative mode - input = hapi.Input('image', [-1, 1, 28, 28], 'float32') + input = hapi.Input([-1, 1, 28, 28], 'float32', 'image') model = hapi.Model(hapi.vision.LeNet(), input) model.prepare() @@ -1548,7 +1509,7 @@ def save_inference_model(self, import paddle.fluid as fluid import paddle.incubate.hapi as hapi - input = hapi.Input('image', [-1, 1, 28, 28], 'float32') + input = hapi.Input([-1, 1, 28, 28], 'float32', 'image') model = hapi.Model(hapi.vision.LeNet(), input) model.prepare() @@ -1639,6 +1600,36 @@ def _run_one_epoch(self, data_loader, callbacks, mode, logs={}): return logs, outputs return logs + def _verify_spec(self, specs, is_input=False): + out_specs = [] + + if specs is None: + # If not specific specs of `Input`, using argument names of `forward` function + # to generate `Input`. + if is_input: + out_specs = [ + Input(name=n) for n in extract_args(self.network.forward) + if n != 'self' + ] + else: + out_specs = to_list(specs) + elif isinstance(specs, dict): + assert is_input == False + out_specs = [specs[n] \ + for n in extract_args(self.network.forward) if n != 'self'] + else: + out_specs = to_list(specs) + # Note: checks each element has specificed `name`. + if out_specs is not None: + for i, spec in enumerate(out_specs): + assert isinstance(spec, Input) + if spec.name is None: + raise ValueError( + "Requires Input[{}].name != None, but receive `None` with {}.". + format(i, spec)) + + return out_specs + def _reset_metrics(self): for metric in self._metrics: metric.reset() diff --git a/python/paddle/incubate/hapi/tests/dist_hapi_mnist_dynamic.py b/python/paddle/incubate/hapi/tests/dist_hapi_mnist_dynamic.py index b338f3310b4c7..f57a039fa49e3 100644 --- a/python/paddle/incubate/hapi/tests/dist_hapi_mnist_dynamic.py +++ b/python/paddle/incubate/hapi/tests/dist_hapi_mnist_dynamic.py @@ -64,8 +64,8 @@ def test_static_multiple_gpus(self): im_shape = (-1, 1, 28, 28) batch_size = 128 - inputs = [Input('image', im_shape, 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input(im_shape, 'float32', 'image')] + labels = [Input([None, 1], 'int64', 'label')] model = Model(LeNet(classifier_activation=None), inputs, labels) optim = fluid.optimizer.Momentum( diff --git a/python/paddle/incubate/hapi/tests/dist_hapi_mnist_static.py b/python/paddle/incubate/hapi/tests/dist_hapi_mnist_static.py index 1484620a4efdf..25e02774d11a1 100644 --- a/python/paddle/incubate/hapi/tests/dist_hapi_mnist_static.py +++ b/python/paddle/incubate/hapi/tests/dist_hapi_mnist_static.py @@ -63,8 +63,8 @@ def test_static_multiple_gpus(self): im_shape = (-1, 1, 28, 28) batch_size = 128 - inputs = [Input('image', im_shape, 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input(im_shape, 'float32', 'image')] + labels = [Input([None, 1], 'int64', 'label')] model = Model(LeNet(classifier_activation=None), inputs, labels) optim = fluid.optimizer.Momentum( diff --git a/python/paddle/incubate/hapi/tests/test_callbacks.py b/python/paddle/incubate/hapi/tests/test_callbacks.py index 2a8a470736d92..e49bf215c276c 100644 --- a/python/paddle/incubate/hapi/tests/test_callbacks.py +++ b/python/paddle/incubate/hapi/tests/test_callbacks.py @@ -36,7 +36,7 @@ def run_callback(self): freq = 2 eval_steps = 20 - inputs = [Input('image', [None, 1, 28, 28], 'float32')] + inputs = [Input([None, 1, 28, 28], 'float32', 'image')] lenet = Model(LeNet(), inputs) lenet.prepare() diff --git a/python/paddle/incubate/hapi/tests/test_model.py b/python/paddle/incubate/hapi/tests/test_model.py index cf659514248ec..103ab30724ca8 100644 --- a/python/paddle/incubate/hapi/tests/test_model.py +++ b/python/paddle/incubate/hapi/tests/test_model.py @@ -150,8 +150,8 @@ def setUpClass(cls): cls.acc1 = dynamic_evaluate(dy_lenet, cls.val_loader) - cls.inputs = [Input('image', [-1, 1, 28, 28], 'float32')] - cls.labels = [Input('label', [None, 1], 'int64')] + cls.inputs = [Input([-1, 1, 28, 28], 'float32', 'image')] + cls.labels = [Input([None, 1], 'int64', 'label')] cls.save_dir = tempfile.mkdtemp() cls.weight_path = os.path.join(cls.save_dir, 'lenet') @@ -330,8 +330,8 @@ def get_expect(): optim2 = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=net.parameters()) - inputs = [Input('x', [None, dim], 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input([None, dim], 'float32', 'x')] + labels = [Input([None, 1], 'int64', 'label')] model = Model(net, inputs, labels) model.prepare( optim2, loss_function=CrossEntropyLoss(reduction="sum")) @@ -359,7 +359,7 @@ def get_expect(): fluid.enable_dygraph(device) if dynamic else None self.set_seed() net = MyModel() - inputs = [Input('x', [None, dim], 'float32')] + inputs = [Input([None, dim], 'float32', 'x')] model = Model(net, inputs) model.prepare() out, = model.test_batch([data]) @@ -373,8 +373,8 @@ def test_save_load(self): device = hapi.set_device('cpu') fluid.enable_dygraph(device) if dynamic else None net = MyModel(classifier_activation=None) - inputs = [Input('x', [None, 20], 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input([None, 20], 'float32', 'x')] + labels = [Input([None, 1], 'int64', 'label')] optim = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=net.parameters()) model = Model(net, inputs, labels) @@ -399,8 +399,8 @@ def test_dynamic_save_static_load(self): model.save(path + '/test') fluid.disable_dygraph() - inputs = [Input('x', [None, 20], 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input([None, 20], 'float32', 'x')] + labels = [Input([None, 1], 'int64', 'label')] model = Model(MyModel(classifier_activation=None), inputs, labels) optim = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=model.parameters()) @@ -413,8 +413,8 @@ def test_static_save_dynamic_load(self): path = tempfile.mkdtemp() net = MyModel(classifier_activation=None) - inputs = [Input('x', [None, 20], 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input([None, 20], 'float32', 'x')] + labels = [Input([None, 1], 'int64', 'label')] optim = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=net.parameters()) model = Model(net, inputs, labels) @@ -426,8 +426,8 @@ def test_static_save_dynamic_load(self): fluid.enable_dygraph(device) #if dynamic else None net = MyModel(classifier_activation=None) - inputs = [Input('x', [None, 20], 'float32')] - labels = [Input('label', [None, 1], 'int64')] + inputs = [Input([None, 20], 'float32', 'x')] + labels = [Input([None, 1], 'int64', 'label')] optim = fluid.optimizer.SGD(learning_rate=0.001, parameter_list=net.parameters()) model = Model(net, inputs, labels) @@ -442,7 +442,7 @@ def test_parameters(self): device = hapi.set_device('cpu') fluid.enable_dygraph(device) if dynamic else None net = MyModel() - inputs = [Input('x', [None, 20], 'float32')] + inputs = [Input([None, 20], 'float32', 'x')] model = Model(net, inputs) model.prepare() params = model.parameters() @@ -452,7 +452,7 @@ def test_parameters(self): def test_export_deploy_model(self): net = LeNet() - inputs = [Input('image', [-1, 1, 28, 28], 'float32')] + inputs = [Input([-1, 1, 28, 28], 'float32', 'image')] model = Model(net, inputs) model.prepare() save_dir = tempfile.mkdtemp() @@ -480,5 +480,15 @@ def test_export_deploy_model(self): shutil.rmtree(save_dir) +class TestRaiseError(unittest.TestCase): + def test_input_without_name(self): + net = MyModel(classifier_activation=None) + + inputs = [Input([None, 10], 'float32')] + labels = [Input([None, 1], 'int64', 'label')] + with self.assertRaises(ValueError): + model = Model(net, inputs, labels) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/incubate/hapi/tests/test_pretrained_model.py b/python/paddle/incubate/hapi/tests/test_pretrained_model.py index 588797322f4ab..334ebff449d4f 100644 --- a/python/paddle/incubate/hapi/tests/test_pretrained_model.py +++ b/python/paddle/incubate/hapi/tests/test_pretrained_model.py @@ -28,7 +28,7 @@ def infer(self, x, arch, dygraph=True): fluid.enable_dygraph() net = models.__dict__[arch](pretrained=True, classifier_activation=None) - inputs = [Input('image', [None, 3, 224, 224], 'float32')] + inputs = [Input([None, 3, 224, 224], 'float32', 'image')] model = Model(network=net, inputs=inputs) model.prepare() res = model.test_batch(x) diff --git a/python/paddle/incubate/hapi/tests/test_text.py b/python/paddle/incubate/hapi/tests/test_text.py index 78f089b06a38d..bdc637997b0cb 100644 --- a/python/paddle/incubate/hapi/tests/test_text.py +++ b/python/paddle/incubate/hapi/tests/test_text.py @@ -142,7 +142,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -168,7 +168,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -219,8 +219,8 @@ def model_forward(model, init_hidden, init_cell): def make_inputs(self): inputs = [ - Input("init_hidden", [None, self.inputs[0].shape[-1]], "float32"), - Input("init_cell", [None, self.inputs[1].shape[-1]], "float32"), + Input([None, self.inputs[0].shape[-1]], "float32", "init_hidden"), + Input([None, self.inputs[1].shape[-1]], "float32", "init_cell"), ] return inputs @@ -272,10 +272,10 @@ def model_forward(model, enc_input, attn_bias): def make_inputs(self): inputs = [ - Input("enc_input", [None, None, self.inputs[0].shape[-1]], - "float32"), - Input("attn_bias", [None, self.inputs[1].shape[1], None, None], - "float32"), + Input([None, None, self.inputs[0].shape[-1]], "float32", + "enc_input"), + Input([None, self.inputs[1].shape[1], None, None], "float32", + "attn_bias"), ] return inputs @@ -336,14 +336,14 @@ def model_forward(model, def make_inputs(self): inputs = [ - Input("dec_input", [None, None, self.inputs[0].shape[-1]], - "float32"), - Input("enc_output", [None, None, self.inputs[0].shape[-1]], - "float32"), - Input("self_attn_bias", - [None, self.inputs[-1].shape[1], None, None], "float32"), - Input("cross_attn_bias", - [None, self.inputs[-1].shape[1], None, None], "float32"), + Input([None, None, self.inputs[0].shape[-1]], "float32", + "dec_input"), + Input([None, None, self.inputs[0].shape[-1]], "float32", + "enc_output"), + Input([None, self.inputs[-1].shape[1], None, None], "float32", + "self_attn_bias"), + Input([None, self.inputs[-1].shape[1], None, None], "float32", + "cross_attn_bias"), ] return inputs @@ -431,10 +431,10 @@ def model_forward(model, enc_output, trg_src_attn_bias): def make_inputs(self): inputs = [ - Input("enc_output", [None, None, self.inputs[0].shape[-1]], - "float32"), - Input("trg_src_attn_bias", - [None, self.inputs[1].shape[1], None, None], "float32"), + Input([None, None, self.inputs[0].shape[-1]], "float32", + "enc_output"), + Input([None, self.inputs[1].shape[1], None, None], "float32", + "trg_src_attn_bias"), ] return inputs @@ -473,9 +473,9 @@ def model_forward(model, word, lengths, target=None): def make_inputs(self): inputs = [ - Input("word", [None, None], "int64"), - Input("lengths", [None], "int64"), - Input("target", [None, None], "int64"), + Input([None, None], "int64", "word"), + Input([None], "int64", "lengths"), + Input([None, None], "int64", "target"), ] return inputs @@ -517,7 +517,7 @@ def model_forward(self, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -543,7 +543,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -579,7 +579,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -609,7 +609,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -645,7 +645,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, None, self.inputs[-1].shape[-1]], "float32"), + Input([None, None, self.inputs[-1].shape[-1]], "float32", "input"), ] return inputs @@ -680,7 +680,7 @@ def model_forward(model, inputs): def make_inputs(self): inputs = [ - Input("input", [None, self.inputs[-1].shape[1], None], "float32"), + Input([None, self.inputs[-1].shape[1], None], "float32", "input"), ] return inputs diff --git a/python/paddle/incubate/hapi/tests/test_vision_models.py b/python/paddle/incubate/hapi/tests/test_vision_models.py index 16dbe431be801..2dc9355bcc300 100644 --- a/python/paddle/incubate/hapi/tests/test_vision_models.py +++ b/python/paddle/incubate/hapi/tests/test_vision_models.py @@ -28,7 +28,7 @@ def models_infer(self, arch, pretrained=False, batch_norm=False): else: net = models.__dict__[arch](pretrained=pretrained) - input = hapi.Input('image', [None, 3, 224, 224], 'float32') + input = hapi.Input([None, 3, 224, 224], 'float32', 'image') model = hapi.Model(net, input) model.prepare() @@ -71,7 +71,7 @@ def test_resnet152(self): self.models_infer('resnet152') def test_lenet(self): - input = hapi.Input('x', [None, 1, 28, 28], 'float32') + input = hapi.Input([None, 1, 28, 28], 'float32', 'x') lenet = hapi.Model(models.__dict__['LeNet'](), input) lenet.prepare() diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index cc8658f9d73a4..72a9f8d5c934b 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -49,7 +49,6 @@ # from .decode import ctc_greedy_decoder #DEFINE_ALIAS # from .decode import dynamic_decode #DEFINE_ALIAS from .decode import gather_tree #DEFINE_ALIAS -from .input import data #DEFINE_ALIAS # from .input import Input #DEFINE_ALIAS from .layer.activation import ELU from .layer.activation import GELU diff --git a/python/paddle/nn/input.py b/python/paddle/nn/input.py deleted file mode 100644 index b5f591f44a9a1..0000000000000 --- a/python/paddle/nn/input.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TODO: define input placeholders of neural network -from ..fluid import data #DEFINE_ALIAS - -__all__ = [ - 'data', - # 'Input' -] diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index d31e5173f8b7d..93060c7865e27 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -17,9 +17,12 @@ 'append_backward', 'gradients', 'Executor', 'global_scope', 'scope_guard', 'BuildStrategy', 'CompiledProgram', 'Print', 'py_func', 'ExecutionStrategy', 'name_scope', 'ParallelExecutor', 'program_guard', 'WeightNormParamAttr', - 'default_main_program', 'default_startup_program', 'Program', 'save', 'load' + 'default_main_program', 'default_startup_program', 'Program', 'save', + 'load', 'data', 'InputSpec' ] +from .input import data #DEFINE_ALIAS +from .input import InputSpec #DEFINE_ALIAS from ..fluid.executor import Executor #DEFINE_ALIAS from ..fluid.executor import global_scope #DEFINE_ALIAS from ..fluid.executor import scope_guard #DEFINE_ALIAS diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py new file mode 100644 index 0000000000000..0fe1d7e03f43b --- /dev/null +++ b/python/paddle/static/input.py @@ -0,0 +1,54 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ..fluid.data import data + +__all__ = ['data', 'InputSpec'] + + +class InputSpec(object): + """ + Define input specification of the model. + + Args: + name (str): The name/alias of the variable, see :ref:`api_guide_Name` + for more details. + shape (tuple(integers)|list[integers]): List|Tuple of integers + declaring the shape. You can set "None" or -1 at a dimension + to indicate the dimension can be of any size. For example, + it is useful to set changeable batch size as "None" or -1. + dtype (np.dtype|VarType|str, optional): The type of the data. Supported + dtype: bool, float16, float32, float64, int8, int16, int32, int64, + uint8. Default: float32. + + Examples: + .. code-block:: python + + from paddle.static import InputSpec + + input = InputSpec([None, 784], 'float32', 'x') + label = InputSpec([None, 1], 'int64', 'label') + """ + + def __init__(self, shape=None, dtype='float32', name=None): + self.shape = shape + self.dtype = dtype + self.name = name + + def _create_feed_layer(self): + return data(self.name, shape=self.shape, dtype=self.dtype) + + def __repr__(self): + return '{}(shape={}, dtype={}, name={})'.format( + type(self).__name__, self.shape, self.dtype, self.name) From 57d434df5d0306452f2d27a66d810993c4e0eea7 Mon Sep 17 00:00:00 2001 From: 123malin Date: Thu, 20 Aug 2020 14:40:52 +0800 Subject: [PATCH 36/37] add save/load for parameter server (#26235) * add save/load for parameter server --- python/paddle/distributed/fleet/__init__.py | 2 + .../distributed/fleet/base/fleet_base.py | 38 ++- .../fleet/runtime/parameter_server_runtime.py | 314 +++++++++++++++++- .../distributed/fleet/runtime/runtime_base.py | 6 + .../fluid/tests/unittests/CMakeLists.txt | 4 + .../fluid/tests/unittests/dist_fleet_ctr.py | 15 +- .../dist_fleet_sparse_embedding_ctr.py | 10 +- .../tests/unittests/test_dist_fleet_base.py | 25 +- .../tests/unittests/test_dist_fleet_ctr.py | 4 +- .../fluid/tests/unittests/test_fleet_base.py | 22 +- .../tests/unittests/test_fleet_base_2.py | 102 ++++++ .../tests/unittests/test_fleet_base_3.py | 52 +++ .../tests/unittests/test_fleet_runtime.py | 23 ++ 13 files changed, 562 insertions(+), 55 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_fleet_base_2.py create mode 100644 python/paddle/fluid/tests/unittests/test_fleet_base_3.py diff --git a/python/paddle/distributed/fleet/__init__.py b/python/paddle/distributed/fleet/__init__.py index e291fdf4ed975..b080fb17553d4 100644 --- a/python/paddle/distributed/fleet/__init__.py +++ b/python/paddle/distributed/fleet/__init__.py @@ -48,4 +48,6 @@ run_server = fleet.run_server stop_worker = fleet.stop_worker distributed_optimizer = fleet.distributed_optimizer +save_inference_model = fleet.save_inference_model +save_persistables = fleet.save_persistables minimize = fleet.minimize diff --git a/python/paddle/distributed/fleet/base/fleet_base.py b/python/paddle/distributed/fleet/base/fleet_base.py index 695fd01909c0e..8093ad504c1a4 100644 --- a/python/paddle/distributed/fleet/base/fleet_base.py +++ b/python/paddle/distributed/fleet/base/fleet_base.py @@ -19,10 +19,26 @@ from .meta_optimizer_factory import MetaOptimizerFactory from .runtime_factory import RuntimeFactory from .util_factory import UtilFactory +from paddle.fluid.wrapped_decorator import wrap_decorator __all__ = ['Fleet'] +def _inited_runtime_handler_(func): + def __impl__(*args, **kwargs): + cls = args[0] + + if cls._runtime_handle is None: + raise ValueError("Fleet can not find suitable runtime handler") + + return func(*args, **kwargs) + + return __impl__ + + +inited_runtime_handler = wrap_decorator(_inited_runtime_handler_) + + class Fleet(object): """ Unified API for distributed training of PaddlePaddle @@ -182,34 +198,48 @@ def barrier_worker(self): """ self._role_maker.barrier_worker() + @inited_runtime_handler def init_worker(self): """ init worker """ - assert self._runtime_handle is not None self._runtime_handle._init_worker() + @inited_runtime_handler def init_server(self, *args, **kwargs): """ init server """ - assert self._runtime_handle is not None self._runtime_handle._init_server(*args, **kwargs) + @inited_runtime_handler def run_server(self): """ run server """ - assert self._runtime_handle is not None self._runtime_handle._run_server() + @inited_runtime_handler def stop_worker(self): """ stop worker """ - assert self._runtime_handle is not None self._runtime_handle._stop_worker() + def save_inference_model(self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True): + self._runtime_handle._save_inference_model( + executor, dirname, feeded_var_names, target_vars, main_program, + export_for_deployment) + + def save_persistables(self, executor, dirname, main_program=None): + self._runtime_handle._save_persistables(executor, dirname, main_program) + def distributed_optimizer(self, optimizer, strategy=None): """ distirbuted_optimizer diff --git a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py index 813649edbcba7..c731ed0889334 100644 --- a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py +++ b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py @@ -13,11 +13,14 @@ # limitations under the License. import os -import logging import warnings import paddle.fluid as fluid from paddle.fluid import core +from paddle.fluid.framework import Program +from paddle.fluid.compiler import CompiledProgram +from paddle.fluid.executor import Executor +from paddle.fluid.parallel_executor import ParallelExecutor from .runtime_base import RuntimeBase @@ -241,3 +244,312 @@ def _stop_worker(self): self._communicator.stop() executor = fluid.Executor(fluid.CPUPlace()) executor.close() + + def _get_optimizer_status(self, op, param_name): + supported_opts = [ + "sgd", "adam", "adagrad", "adamax", "momentum", "lars_momentum", + "rmsprop", "decayed_adagrad", "ftrl" + ] + + reshaped_val_map = {} + reshaped_val_map["sgd"] = [] + reshaped_val_map["adam"] = ["moment1_0", "moment2_0"] + reshaped_val_map["adagrad"] = ["moment_0"] + reshaped_val_map["adamax"] = ["moment_0", "inf_norm_0"] + reshaped_val_map["momentum"] = ["velocity_0"] + reshaped_val_map["lars_momentum"] = ["velocity_0"] + reshaped_val_map[ + "rmsprop"] = ["momentum_0", "mean_square_0", "mean_grad_0"] + reshaped_val_map["decayed_adagrad"] = ["moment_0"] + reshaped_val_map["ftrl"] = ["squared_0", "linear_0"] + + orishaped_val_map = {} + orishaped_val_map["adam"] = ["beta1_pow_acc_0", "beta2_pow_acc_0"] + orishaped_val_map["adamax"] = ["beta1_pow_acc_0"] + + if op not in supported_opts: + raise ValueError( + "fleet can not support optimizer: {}, only this can be supported: {}". + format(op, supported_opts)) + + reshaped_names = [ + param_name + "_" + val for val in reshaped_val_map[op] + ] + + if op not in orishaped_val_map: + origin_names = [] + else: + origin_names = [ + param_name + "_" + val for val in orishaped_val_map[op] + ] + return reshaped_names, origin_names + + def _get_optimizer_op(self, param_name): + from paddle.fluid.incubate.fleet.parameter_server.ir.public import _get_optimize_ops + + opts = _get_optimize_ops(self.origin_main_program) + for op in opts: + if "Param" in op.input_names and \ + "LearningRate" in op.input_names and op.input("Param")[0] == param_name: + return op + + def _save_dense_params(self, executor, dirname, context, main_program): + self._communicator.recv() + + prog = Program() + block = prog.global_block() + local_vars = [] + + for name, var_ctx in context.items(): + if len(var_ctx.origin_varnames()) != 1: + raise ValueError("Dense can not support split now.") + + varname = var_ctx.origin_varnames()[0] + local_vars.append(varname) + + optimizer = self._get_optimizer_op(varname) + reshaped_varnames, origin_varnames = self._get_optimizer_status( + optimizer.type, varname) + + for var_name in [varname] + reshaped_varnames + origin_varnames: + var = self.origin_main_program.global_block().vars[var_name] + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": + [",".join([str(i) for i in var.shape])], + "slice_varnames": [var.name], + "remote_varnames": [var.name], + "is_sparse": False, + "endpoints": var_ctx.split_endpoints(), + "file_path": os.path.join(dirname, var.name) + }) + + executor.run(prog) + return local_vars + + def _save_sparse_params(self, executor, dirname, context, main_program): + prog = Program() + block = prog.global_block() + local_vars = [] + + for name, var_ctx in context.items(): + if len(var_ctx.origin_varnames()) != 1: + raise ValueError("Dense can not support split now.") + + varname = var_ctx.origin_varnames()[0] + local_vars.append(varname) + + optimizer = self._get_optimizer_op(varname) + reshaped_varnames, origin_varnames = self._get_optimizer_status( + optimizer.type, varname) + + var = self.origin_main_program.global_block().vars[varname] + slice_shapes = [] + dims1 = ",".join([str(i) for i in var.shape[1:]]) + + for section in var_ctx.sections(): + slice_shapes.append(str(section) + dims1) + + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": slice_shapes, + "slice_varnames": var_ctx.split_varnames(), + "remote_varnames": var_ctx.split_varnames(), + "is_sparse": True, + "endpoints": var_ctx.split_endpoints(), + "pserver_num": len(self.role_maker.get_pserver_endpoints()), + "file_path": os.path.join(dirname, var.name) + }) + + for reshaped_varname in reshaped_varnames: + var = self.origin_main_program.global_block().vars[ + reshaped_varname] + + slice_varnames = [] + remote_varnames = [] + for i in range(len(var_ctx.split_varnames())): + slice_varnames.append("{}.block{}".format(reshaped_varname, + i)) + remote_varnames.append(reshaped_varname) + + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": slice_shapes, + "slice_varnames": slice_varnames, + "remote_varnames": remote_varnames, + "is_sparse": True, + "endpoints": var_ctx.split_endpoints(), + "pserver_num": + len(self.role_maker.get_pserver_endpoints()), + "file_path": os.path.join(dirname, var.name) + }) + + for origin_varname in origin_varnames: + var = self.origin_main_program.global_block().vars[ + origin_varname] + + block.append_op( + type='recv_save', + attrs={ + "trainer_id": self.role_maker.worker_index(), + "shape": var.shape, + "slice_shapes": + [",".join([str(i) for i in var.shape])], + "slice_varnames": [origin_varname], + "remote_varnames": [origin_varname], + "is_sparse": False, + "endpoints": var_ctx.split_endpoints()[:1], + "file_path": os.path.join(dirname, var.name) + }) + executor.run(prog) + return context.keys() + + def _save_distributed_params(self, executor, dirname, context, + main_program): + prog = Program() + block = prog.global_block() + + for name, var_ctx in context.items(): + block.append_op( + type='checkpoint_notify', + attrs={ + "varname": name, + "is_slice": True, + "slice_varnames": var_ctx.split_varnames(), + "remote_varnames": var_ctx.split_varnames(), + "endpoints": var_ctx.split_endpoints(), + "dirname": dirname + }) + + executor.run(prog) + return context.keys() + + def _save_distributed_persistables(self, executor, dirname, main_program): + dense_ctx = self.compiled_strategy.get_communicator_recv_context( + recv_type=1) + + sparse_ctx = self.compiled_strategy.get_communicator_recv_context( + recv_type=2) + + distributed_ctx = self.compiled_strategy.get_communicator_recv_context( + recv_type=3) + + recv_dense_varnames = self._save_dense_params(executor, dirname, + dense_ctx, main_program) + + recv_sparse_varnames = self._save_sparse_params( + executor, dirname, sparse_ctx, main_program) + + recv_distributed_varnames = self._save_distributed_params( + executor, dirname, distributed_ctx, main_program) + + saved_varnames = recv_dense_varnames + list( + recv_sparse_varnames) + list(recv_distributed_varnames) + + remaining_vars = list( + filter( + ParameterServerRuntime.__exclude_vars(saved_varnames), + main_program.list_vars())) + + fluid.io.save_vars( + executor, + main_program=main_program, + dirname=dirname, + vars=remaining_vars) + + def _ps_inference_save_persistables(self, + executor, + dirname, + main_program=None, + **kwargs): + """ + This function filters out all variables with `persistable==True` from the + give `main_program` and then saves these variables to the folder `dirname` + or file `filename`. + + The `dirname` is used to specify the folder where persistable variables + are going to be saved. If you would like to save variables in separate + files, set `filename` None; if you would like to save all variables in a + single file, use `filename` to specify the file name. + """ + + if isinstance(executor, ParallelExecutor): + raise TypeError( + "in fleet.save_persistables() function, executor must be as Executor type, ParallelExecutor is not allowed" + ) + + if not isinstance(executor, Executor): + raise TypeError( + "in fleet.save_persistables() function, executor must be as Executor type" + ) + + if main_program is None: + main_program = fluid.default_main_program() + + if isinstance(main_program, CompiledProgram): + raise TypeError( + "in fleet.save_persistables() function, main_program must be as Program type, CompiledProgram is not allowed" + ) + + self._save_distributed_persistables(executor, dirname, main_program) + + def _ps_inference_save_inference_model(self, + executor, + dirname, + feeded_var_names, + target_vars, + main_program=None, + export_for_deployment=True): + """ + Prune the given `main_program` to build a new program especially for inference, + and then save it and all related parameters to given `dirname` by the `executor`. + """ + + if isinstance(executor, ParallelExecutor): + raise TypeError( + "in fleet.save_inference_model() function, executor must be as Executor type, ParallelExecutor is not allowed" + ) + + if not isinstance(executor, Executor): + raise TypeError( + "in fleet.save_inference_model() function, executor must be as Executor type" + ) + + if main_program is not None: + if isinstance(main_program, CompiledProgram): + raise TypeError( + "in fleet.save_inference_model() function, main_program must be as Program type, CompiledProgram is not allowed" + ) + fluid.io.save_inference_model(dirname, feeded_var_names, + target_vars, executor, main_program, + None, None, export_for_deployment) + else: + fluid.io.save_inference_model(dirname, feeded_var_names, + target_vars, executor, + self.origin_main_program, None, None, + export_for_deployment, True) + + model_basename = "__model__" + model_filename = os.path.join(dirname, model_basename) + + with open(model_filename, "rb") as f: + program_desc_str = f.read() + + program = Program.parse_from_string(program_desc_str) + program._copy_dist_param_info_from(fluid.default_main_program()) + self._ps_inference_save_persistables(executor, dirname, program) + + def _save_inference_model(self, *args, **kwargs): + self._ps_inference_save_inference_model(*args, **kwargs) + + def _save_persistables(self, *args, **kwargs): + self._ps_inference_save_persistables(*args, **kwargs) diff --git a/python/paddle/distributed/fleet/runtime/runtime_base.py b/python/paddle/distributed/fleet/runtime/runtime_base.py index 38f9f882cb487..2e8bacfbc3b1d 100644 --- a/python/paddle/distributed/fleet/runtime/runtime_base.py +++ b/python/paddle/distributed/fleet/runtime/runtime_base.py @@ -33,3 +33,9 @@ def _run_server(self): def _stop_worker(self): pass + + def _save_inference_model(self, *args, **kwargs): + pass + + def _save_persistables(self, *args, **kwargs): + pass diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 126b4465eae48..20b49c5fa37e4 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -33,6 +33,8 @@ list(APPEND MIXED_DIST_TEST_OPS test_fleet_api_input) list(APPEND MIXED_DIST_TEST_OPS test_fleet_checkpoint) list(APPEND MIXED_DIST_TEST_OPS test_collective_optimizer) list(APPEND MIXED_DIST_TEST_OPS test_fleet_base) +list(APPEND MIXED_DIST_TEST_OPS test_fleet_base_2) +list(APPEND MIXED_DIST_TEST_OPS test_fleet_base_3) list(APPEND MIXED_DIST_TEST_OPS test_fleet_recompute_meta_optimizer) list(APPEND MIXED_DIST_TEST_OPS test_fleet_graph_execution_meta_optimizer) list(APPEND MIXED_DIST_TEST_OPS test_fleet_pipeline_meta_optimizer) @@ -382,6 +384,8 @@ if(WITH_DISTRIBUTE) py_test_modules(test_collective_optimizer MODULES test_collective_optimizer) if(NOT APPLE) py_test_modules(test_fleet_base MODULES test_fleet_base ENVS ${dist_ENVS}) + py_test_modules(test_fleet_base_2 MODULES test_fleet_base_2 ENVS ${dist_ENVS}) + py_test_modules(test_fleet_base_3 MODULES test_fleet_base_3 ENVS ${dist_ENVS}) py_test_modules(test_fleet_recompute_meta_optimizer MODULES test_fleet_recompute_meta_optimizer ENVS ${dist_ENVS}) py_test_modules(test_fleet_graph_execution_meta_optimizer MODULES test_fleet_graph_execution_meta_optimizer ENVS ${dist_ENVS}) py_test_modules(test_fleet_graph_executor MODULES test_fleet_graph_executor ENVS ${dist_ENVS}) diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py index 2f3d3ced6f84e..73b546b95cfeb 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py @@ -162,24 +162,17 @@ def do_pyreader_training(self, fleet): exe = fluid.Executor(fluid.CPUPlace()) fleet.init_worker() - exe.run(fleet.startup_program) - + exe.run(fluid.default_startup_program()) batch_size = 4 train_reader = paddle.batch(fake_ctr_reader(), batch_size=batch_size) self.reader.decorate_sample_list_generator(train_reader) - compiled_prog = fluid.compiler.CompiledProgram( - fleet.main_program).with_data_parallel( - loss_name=self.avg_cost.name, - build_strategy=self.strategy.get_build_strategy(), - exec_strategy=self.strategy.get_execute_strategy()) - for epoch_id in range(1): self.reader.start() try: pass_start = time.time() while True: - loss_val = exe.run(program=compiled_prog, + loss_val = exe.run(program=fluid.default_main_program(), fetch_list=[self.avg_cost.name]) loss_val = np.mean(loss_val) # TODO(randomly fail) @@ -209,7 +202,7 @@ def do_dataset_training(self, fleet): exe = fluid.Executor(fluid.CPUPlace()) fleet.init_worker() - exe.run(fleet.startup_program) + exe.run(fluid.default_startup_program()) thread_num = 2 batch_size = 128 @@ -231,7 +224,7 @@ def do_dataset_training(self, fleet): pass_start = time.time() dataset.set_filelist(filelist) exe.train_from_dataset( - program=fleet.main_program, + program=fluid.default_main_program(), dataset=dataset, fetch_list=[self.avg_cost], fetch_info=["cost"], diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py index c69e1247a9bb8..77697896b4d55 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_sparse_embedding_ctr.py @@ -152,24 +152,18 @@ def do_pyreader_training(self, fleet): exe = fluid.Executor(fluid.CPUPlace()) fleet.init_worker() - exe.run(fleet.startup_program) + exe.run(fluid.default_startup_program()) batch_size = 4 train_reader = paddle.batch(fake_ctr_reader(), batch_size=batch_size) self.reader.decorate_sample_list_generator(train_reader) - compiled_prog = fluid.compiler.CompiledProgram( - fleet.main_program).with_data_parallel( - loss_name=self.avg_cost.name, - build_strategy=self.strategy.get_build_strategy(), - exec_strategy=self.strategy.get_execute_strategy()) - for epoch_id in range(1): self.reader.start() try: while True: - loss_val = exe.run(program=compiled_prog, + loss_val = exe.run(program=fluid.default_main_program(), fetch_list=[self.avg_cost.name]) loss_val = np.mean(loss_val) print("TRAIN ---> pass: {} loss: {}\n".format(epoch_id, diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py index 8c700ff3217b5..620b5e16a1dd0 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py @@ -31,10 +31,11 @@ import tempfile import unittest +import paddle import paddle.fluid as fluid import paddle.distributed.fleet.base.role_maker as role_maker from paddle.distributed.fleet.base.util_factory import fleet_util -from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet +from paddle.distributed.fleet import fleet from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory __all__ = ['FleetDistRunnerBase', 'TestFleetBase', 'runtime_main'] @@ -75,21 +76,23 @@ def build_role(self, args): return role def build_strategy(self, args): - self.strategy = None + self.strategy = paddle.distributed.fleet.DistributedStrategy() + self.strategy.a_sync = False if args.mode == "async": - self.strategy = StrategyFactory.create_async_strategy() - elif args.mode == "sync": - self.strategy = StrategyFactory.create_sync_strategy() - elif args.mode == "half_async": - self.strategy = StrategyFactory.create_half_async_strategy() + self.strategy = paddle.distributed.fleet.DistributedStrategy() + self.strategy.a_sync = True elif args.mode == "geo": - self.strategy = StrategyFactory.create_geo_strategy( - args.geo_sgd_need_push_nums) + self.strategy = paddle.distributed.fleet.DistributedStrategy() + self.strategy.a_sync = True + self.strategy.a_sync_configs = { + "k_steps": args.geo_sgd_need_push_nums + } self.dump_param = os.getenv("dump_param", "").split(",") self.dump_fields = os.getenv("dump_fields", "").split(",") self.dump_fields_path = os.getenv("dump_fields_path", "") debug = int(os.getenv("Debug", "0")) - if debug: + # TODO(update strategy to support dump params) + if False: #debug: self.strategy.set_debug_opt({ "dump_param": self.dump_param, "dump_fields": self.dump_fields, @@ -122,7 +125,7 @@ def build_optimizer(self, avg_cost, strategy): staircase=True)) else: optimizer = fluid.optimizer.SGD(LEARNING_RATE) - optimizer = fleet.distributed_optimizer(optimizer, strategy) + optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) def run_pserver(self, args): diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py index e16e91192e1fe..b506f17914341 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ctr.py @@ -22,7 +22,7 @@ class TestDistMnistSync2x2(TestFleetBase): def _setup_config(self): - self._mode = "async" + self._mode = "sync" self._reader = "pyreader" def check_with_place(self, @@ -123,7 +123,7 @@ def test_dist_train(self): class TestDistCtrHalfAsync2x2(TestFleetBase): def _setup_config(self): - self._mode = "half_async" + self._mode = "async" self._reader = "pyreader" def check_with_place(self, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base.py b/python/paddle/fluid/tests/unittests/test_fleet_base.py index ca657a5a619b6..9e651dea24ba7 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base.py @@ -17,6 +17,7 @@ import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker import os +import paddle.fluid as fluid class TestFleetBase(unittest.TestCase): @@ -119,24 +120,9 @@ def test_distributed_optimizer(self): optimizer = paddle.optimizer.SGD(learning_rate=0.001) optimizer = fleet.distributed_optimizer(optimizer) - def test_minimize(self): - input_x = paddle.fluid.layers.data( - name="x", shape=[32], dtype='float32') - input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') - - fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') - fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') - prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y) - avg_cost = paddle.fluid.layers.mean(x=cost) - - role = role_maker.PaddleCloudRoleMaker(is_collective=True) - fleet.init(role) - strategy = fleet.DistributedStrategy() - optimizer = paddle.optimizer.SGD(learning_rate=0.001) - optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) - optimizer.minimize(avg_cost) + def test_exception(self): + import paddle.distributed.fleet as fleet + self.assertRaises(Exception, fleet.init_worker) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py new file mode 100644 index 0000000000000..fba7cdbc8ca0f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py @@ -0,0 +1,102 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle +import os +import paddle.fluid as fluid + + +class TestFleetBase(unittest.TestCase): + def setUp(self): + os.environ["POD_IP"] = "127.0.0.1" + os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" + os.environ["PADDLE_TRAINERS_NUM"] = "2" + os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ + "127.0.0.1:36001,127.0.0.2:36001" + + def test_ps_minimize(self): + import paddle + import paddle.distributed.fleet as fleet + import paddle.fluid.incubate.fleet.base.role_maker as role_maker + + os.environ["TRAINING_ROLE"] = "PSERVER" + os.environ["POD_IP"] = "127.0.0.1" + os.environ["PADDLE_PORT"] = "36001" + + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32') + input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + + fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') + fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') + prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y) + avg_cost = paddle.fluid.layers.mean(x=cost) + + role = role_maker.PaddleCloudRoleMaker(is_collective=False) + fleet.init(role) + strategy = paddle.distributed.fleet.DistributedStrategy() + strategy.a_sync = False + optimizer = paddle.optimizer.SGD(learning_rate=0.001) + optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) + optimizer.minimize(avg_cost) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + pe = fluid.ParallelExecutor(use_cuda=False, loss_name=avg_cost.name) + compiled_prog = fluid.compiler.CompiledProgram( + fluid.default_main_program()) + self.assertRaises( + Exception, + fleet.save_inference_model, + dirname='/tmp/', + feeded_var_names=['x', 'y'], + target_vars=[avg_cost], + executor=pe) + + self.assertRaises( + Exception, + fleet.save_inference_model, + dirname='/tmp/', + feeded_var_names=['x', 'y'], + target_vars=[avg_cost], + executor="exe") + + self.assertRaises( + Exception, + fleet.save_inference_model, + dirname='/tmp/', + feeded_var_names=['x', 'y'], + target_vars=[avg_cost], + executor=exe, + main_program=compiled_prog) + + self.assertRaises( + Exception, fleet.save_persistables, executor=pe, dirname='/tmp/') + + self.assertRaises( + Exception, fleet.save_persistables, executor="exe", dirname='/tmp/') + + self.assertRaises( + Exception, + fleet.save_persistables, + executor=exe, + dirname='/tmp/', + main_program=compiled_prog) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py new file mode 100644 index 0000000000000..f5e888ab0eb3c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py @@ -0,0 +1,52 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import os +import paddle +import paddle.distributed.fleet as fleet +import paddle.distributed.fleet.base.role_maker as role_maker +import paddle.fluid as fluid + + +class TestFleetBase(unittest.TestCase): + def setUp(self): + os.environ["POD_IP"] = "127.0.0.1" + os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" + os.environ["PADDLE_TRAINERS_NUM"] = "2" + os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = \ + "127.0.0.1:36001,127.0.0.2:36001" + + def test_collective_minimize(self): + input_x = paddle.fluid.layers.data( + name="x", shape=[32], dtype='float32') + input_y = paddle.fluid.layers.data(name="y", shape=[1], dtype='int64') + + fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') + fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') + prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') + cost = paddle.fluid.layers.cross_entropy( + input=prediction, label=input_y) + avg_cost = paddle.fluid.layers.mean(x=cost) + + role = role_maker.PaddleCloudRoleMaker(is_collective=True) + fleet.init(role) + strategy = fleet.DistributedStrategy() + optimizer = paddle.optimizer.SGD(learning_rate=0.001) + optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) + optimizer.minimize(avg_cost) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_runtime.py b/python/paddle/fluid/tests/unittests/test_fleet_runtime.py index 3fd646f4340dc..80109716a54e5 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_runtime.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_runtime.py @@ -25,6 +25,8 @@ def test_fleet_runtime_base(self): base._init_server() base._run_server() base._stop_worker() + base._save_inference_model() + base._save_persistables() def test_fleet_collective_runtime(self): import paddle.distributed.fleet.runtime @@ -35,6 +37,27 @@ def test_fleet_collective_runtime(self): collective_runtime._init_worker() collective_runtime._run_server() collective_runtime._stop_worker() + collective_runtime._save_inference_model() + collective_runtime._save_persistables() + + def test_fleet_ps_runtime(self): + ps_runtime = paddle.distributed.fleet.runtime.ParameterServerRuntime() + self.assertRaises(Exception, ps_runtime._get_optimizer_status, + "test_op", None) + reshaped_names, origin_names = ps_runtime._get_optimizer_status("adam", + "param") + self.assertTrue( + len(reshaped_names) == 2 and + reshaped_names[0] == 'param_moment1_0' and + reshaped_names[1] == 'param_moment2_0') + self.assertTrue( + len(origin_names) == 2 and + origin_names[0] == 'param_beta1_pow_acc_0' and + origin_names[1] == 'param_beta2_pow_acc_0') + + reshaped_names, origin_names = ps_runtime._get_optimizer_status("sgd", + "param") + self.assertTrue(len(reshaped_names) == 0 and len(origin_names) == 0) if __name__ == "__main__": From f00f982a026e323eecbdbd7028907355539ae94f Mon Sep 17 00:00:00 2001 From: Zhaolong Xing Date: Thu, 20 Aug 2020 14:42:47 +0800 Subject: [PATCH 37/37] add cub impl for arg max, min (#25941) test=develop --- paddle/fluid/operators/arg_max_op.cu | 51 +++--- .../fluid/operators/arg_min_max_op_base.cu.h | 163 ++++++++++++++++++ paddle/fluid/operators/arg_min_op.cu | 50 +++--- 3 files changed, 206 insertions(+), 58 deletions(-) create mode 100644 paddle/fluid/operators/arg_min_max_op_base.cu.h diff --git a/paddle/fluid/operators/arg_max_op.cu b/paddle/fluid/operators/arg_max_op.cu index 85e4f98173511..14708c4df10f5 100644 --- a/paddle/fluid/operators/arg_max_op.cu +++ b/paddle/fluid/operators/arg_max_op.cu @@ -1,29 +1,22 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/arg_min_max_op_base.h" - -REGISTER_OP_CUDA_KERNEL( - arg_max, - paddle::operators::ArgMaxKernel, - paddle::operators::ArgMaxKernel, - paddle::operators::ArgMaxKernel, - paddle::operators::ArgMaxKernel, - paddle::operators::ArgMaxKernel, - paddle::operators::ArgMaxKernel); +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/arg_min_max_op_base.cu.h" + +REGISTER_OP_CUDA_KERNEL( + arg_max, paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel); diff --git a/paddle/fluid/operators/arg_min_max_op_base.cu.h b/paddle/fluid/operators/arg_min_max_op_base.cu.h new file mode 100644 index 0000000000000..e9b65946a5b1e --- /dev/null +++ b/paddle/fluid/operators/arg_min_max_op_base.cu.h @@ -0,0 +1,163 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#ifdef __NVCC__ + +#include +#include +#include +#include +#include +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/transpose_op.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace operators { + +namespace { // NOLINT +template +using KeyValuePair = cub::KeyValuePair; +using Tensor = framework::Tensor; + +} // end namespace + +#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \ + case (1 << (log2_block_dim)): { \ + constexpr auto kBlockDim = (1 << (log2_block_dim)); \ + __VA_ARGS__; \ + } break + +#define FIXED_BLOCK_DIM_CASE(...) \ + FIXED_BLOCK_DIM_CASE_BASE(10, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); + +template +__global__ void ArgCUDAKernel(const IndType height, // n * h + const IndType width, // c + const IndType post_size, // h + const Reducer reducer, const T init, const T* in, + IndType* out) { + typedef cub::BlockReduce, BlockDim> BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + + for (int idx = blockIdx.x; idx < height; idx += gridDim.x) { + KeyValuePair kv_pair = {-1, init}; + int h = idx / post_size; + int w = idx % post_size; + for (int k = threadIdx.x; k < width; k += blockDim.x) { + kv_pair = + reducer({k, in[h * width * post_size + k * post_size + w]}, kv_pair); + } + kv_pair = BlockReduce(temp_storage).Reduce(kv_pair, reducer); + if (threadIdx.x == 0) { + out[idx] = static_cast(kv_pair.key); + } + __syncthreads(); + } +} + +template +void ComputeFullArg(const platform::CUDADeviceContext& ctx, const Tensor& input, + Tensor* indices, const IndType pre, const IndType post, + const IndType n) { + auto cu_stream = ctx.stream(); + auto ComputeBlockSize = [](IndType col) { + if (col > 512) + return 1024; + else if (col > 256) + return 512; + else if (col > 128) + return 256; + else if (col > 64) + return 128; + else if (col > 32) + return 64; + else if (col > 16) + return 32; + else if (col > 8) + return 16; + else + return 8; + }; + + int max_grid_dimx = ctx.GetCUDAMaxGridDimSize().x; + int height = pre * post; + int width = n; + int grid_size = height < max_grid_dimx ? height : max_grid_dimx; + + const T* in_data = input.data(); + IndType* out_data = indices->mutable_data(ctx.GetPlace()); + + if (typeid(Reducer) == typeid(cub::ArgMax)) { + switch (ComputeBlockSize(width)) { + FIXED_BLOCK_DIM_CASE( + ArgCUDAKernel<<>>( + height, width, post, Reducer(), std::numeric_limits::lowest(), + in_data, out_data)); + } + } else { + switch (ComputeBlockSize(width)) { + FIXED_BLOCK_DIM_CASE( + ArgCUDAKernel<<>>( + height, width, post, Reducer(), std::numeric_limits::max(), + in_data, out_data)); + } + } +} + +template +class ArgMinMaxOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + int axis = ctx.Attr("axis"); + auto in_dims = input->dims(); + axis = (axis < 0) ? (in_dims.size() + axis) : axis; + + int64_t numel = input->numel(); + int64_t groups = numel / in_dims[axis]; + int64_t pre = 1; + int64_t post = 1; + int64_t n = in_dims[axis]; + + for (int i = 0; i < axis; i++) { + pre *= in_dims[i]; + } + + for (int i = axis + 1; i < in_dims.size(); i++) { + post *= in_dims[i]; + } + + const auto& dev_ctx = ctx.cuda_device_context(); + ComputeFullArg(dev_ctx, *input, output, pre, post, n); + } +}; + +#endif + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/arg_min_op.cu b/paddle/fluid/operators/arg_min_op.cu index 47d7c8b12243c..23170bf008790 100644 --- a/paddle/fluid/operators/arg_min_op.cu +++ b/paddle/fluid/operators/arg_min_op.cu @@ -1,29 +1,21 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/arg_min_max_op_base.h" - -REGISTER_OP_CUDA_KERNEL( - arg_min, - paddle::operators::ArgMinKernel, - paddle::operators::ArgMinKernel, - paddle::operators::ArgMinKernel, - paddle::operators::ArgMinKernel, - paddle::operators::ArgMinKernel, - paddle::operators::ArgMinKernel); +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/arg_min_max_op_base.cu.h" +REGISTER_OP_CUDA_KERNEL( + arg_min, paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel, + paddle::operators::ArgMinMaxOpCUDAKernel);