Skip to content

Commit

Permalink
polish some details
Browse files Browse the repository at this point in the history
  • Loading branch information
chenwhql committed Oct 27, 2021
1 parent be9df70 commit a83e9c7
Show file tree
Hide file tree
Showing 12 changed files with 58 additions and 69 deletions.
12 changes: 6 additions & 6 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ limitations under the License. */
#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/op_call_stack.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/transfer_scope_cache.h"
#include "paddle/fluid/framework/unused_var_check.h"
Expand Down Expand Up @@ -1278,7 +1277,7 @@ void OperatorWithKernel::ChoosePtenKernel(const ExecutionContext& ctx) const {
kernel_type_.reset(
new OpKernelType(std::move(InnerGetExpectedKernelType(ctx))));

auto pt_kernel_name = pten::KernelName(pt_kernel_signature_->first);
auto pt_kernel_name = pten::KernelName(pt_kernel_signature_->name);
auto pt_kernel_key = TransOpKernelTypeToPtenKernelKey(*kernel_type_.get());
pt_kernel_.reset(
new pten::Kernel(pten::KernelFactory::Instance().SelectKernel(
Expand Down Expand Up @@ -1764,6 +1763,7 @@ OpKernelType OperatorWithKernel::GetKernelTypeForVar(
KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs(
const ExecutionContext& ctx) const {
if (!KernelSignatureMap::Instance().Has(Type())) {
// TODO(chenweihang): we can generate this map by proto info in compile time
KernelArgsNameMakerByOpProto maker(Info().proto_);
KernelSignatureMap::Instance().Emplace(
Type(), std::move(maker.GetKernelSignature()));
Expand All @@ -1782,9 +1782,9 @@ pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
// 5. kernel input is not DenseTensor
pten::KernelContext op_kernel_ctx(dev_ctx);

auto& input_names = std::get<0>(pt_kernel_signature_->second);
auto& attr_names = std::get<1>(pt_kernel_signature_->second);
auto& output_names = std::get<2>(pt_kernel_signature_->second);
auto& input_names = std::get<0>(pt_kernel_signature_->args);
auto& attr_names = std::get<1>(pt_kernel_signature_->args);
auto& output_names = std::get<2>(pt_kernel_signature_->args);

auto input_defs = pt_kernel_->args_def().input_defs();
auto attr_defs = pt_kernel_->args_def().attribute_defs();
Expand Down Expand Up @@ -1843,7 +1843,7 @@ pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
// attribtue type by attr_defs
if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
op_kernel_ctx.EmplaceBackAttr(
pten::Scalar(BOOST_GET_CONST(float, attr)));
std::move(pten::Scalar(BOOST_GET_CONST(float, attr))));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` to Scalar when construct "
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
Expand Down
15 changes: 6 additions & 9 deletions paddle/fluid/framework/pten_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,20 +119,17 @@ KernelArgsNameMakerByOpProto::GetAttrsArgsNames() {
}

KernelSignature KernelArgsNameMakerByOpProto::GetKernelSignature() {
return std::make_pair(
op_proto_->type(),
std::make_tuple(GetInputArgsNames(), GetAttrsArgsNames(),
GetOutputArgsNames()));
return KernelSignature(op_proto_->type(), GetInputArgsNames(),
GetAttrsArgsNames(), GetOutputArgsNames());
}

std::string KernelSignatureToString(const KernelSignature& signature) {
std::stringstream os;
os << "Kernel Signature - name: " << signature.first << "; inputs: "
<< string::join_strings(std::get<0>(signature.second), ", ")
os << "Kernel Signature - name: " << signature.name
<< "; inputs: " << string::join_strings(std::get<0>(signature.args), ", ")
<< "; attributes: "
<< string::join_strings(std::get<1>(signature.second), ", ")
<< "; outputs: "
<< string::join_strings(std::get<2>(signature.second), ", ");
<< string::join_strings(std::get<1>(signature.args), ", ") << "; outputs: "
<< string::join_strings(std::get<2>(signature.args), ", ");
return os.str();
}

Expand Down
33 changes: 18 additions & 15 deletions paddle/fluid/framework/pten_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,24 @@ pten::KernelKey TransOpKernelTypeToPtenKernelKey(

/* Kernel Args parse */

struct KernelSignature {
std::string name;
KernelArgsTuple args;

KernelSignature() = default;
KernelSignature(std::string&& kernel_name,
paddle::SmallVector<std::string>&& inputs,
paddle::SmallVector<std::string>&& attrs,
paddle::SmallVector<std::string>&& outputs)
: name(std::move(kernel_name)),
args(std::make_tuple(inputs, attrs, outputs)) {}
KernelSignature(const std::string& kernel_name,
const paddle::SmallVector<std::string>& inputs,
const paddle::SmallVector<std::string>& attrs,
const paddle::SmallVector<std::string>& outputs)
: name(kernel_name), args(std::make_tuple(inputs, attrs, outputs)) {}
};

// TODO(chenweihang): we can generate this map by proto info in compile time
class KernelSignatureMap {
public:
Expand All @@ -53,27 +71,12 @@ class KernelSignatureMap {
return map_.find(op_type) != map_.end();
}

void Insert(const std::string& op_type, const KernelSignature& signature) {
if (!Has(op_type)) {
map_.insert({op_type, signature});
}
}

void Emplace(const std::string& op_type, KernelSignature&& signature) {
if (!Has(op_type)) {
map_.emplace(op_type, signature);
}
}

const KernelSignature* GetNullable(const std::string& op_type) const {
auto it = map_.find(op_type);
if (it == map_.end()) {
return nullptr;
} else {
return &it->second;
}
}

const KernelSignature& Get(const std::string& op_type) const {
auto it = map_.find(op_type);
PADDLE_ENFORCE_NE(
Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/framework/type_defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,6 @@ using InferInplaceOpFN = std::function<InplacePair(bool /*use_cuda*/)>;
using KernelArgsTuple = std::tuple<paddle::SmallVector<std::string>,
paddle::SmallVector<std::string>,
paddle::SmallVector<std::string>>;
// TODD(yuanrisheng): impl implicit overload signature, use KernelArgsTuple
// directly
using KernelSignature = std::pair<std::string, KernelArgsTuple>;

} // namespace framework
} // namespace paddle
11 changes: 5 additions & 6 deletions paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/imperative/infer_shape_context.h"
#include "paddle/pten/common/scalar.h"
#include "paddle/utils/small_vector.h"
Expand Down Expand Up @@ -160,7 +159,7 @@ PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,

VLOG(1) << framework::KernelSignatureToString(pt_kernel_signature);

auto pt_kernel_name = pten::KernelName(pt_kernel_signature.first);
auto pt_kernel_name = pten::KernelName(pt_kernel_signature.name);
auto pt_kernel_key = TransOpKernelTypeToPtenKernelKey(expected_kernel_key);
auto pt_kernel = pten::KernelFactory::Instance().SelectKernel(
pt_kernel_name, pt_kernel_key);
Expand Down Expand Up @@ -261,9 +260,9 @@ static pten::KernelContext BuildDygraphPtenKernelContext(
// 5. kernel input is not DenseTensor
pten::KernelContext op_kernel_ctx(dev_ctx);

auto& input_names = std::get<0>(pt_kernel_signature.second);
auto& attr_names = std::get<1>(pt_kernel_signature.second);
auto& output_names = std::get<2>(pt_kernel_signature.second);
auto& input_names = std::get<0>(pt_kernel_signature.args);
auto& attr_names = std::get<1>(pt_kernel_signature.args);
auto& output_names = std::get<2>(pt_kernel_signature.args);

auto& input_defs = pt_kernel.args_def().input_defs();
auto& output_defs = pt_kernel.args_def().output_defs();
Expand Down Expand Up @@ -321,7 +320,7 @@ static pten::KernelContext BuildDygraphPtenKernelContext(
// attribtue type by attr_defs
if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
op_kernel_ctx.EmplaceBackAttr(
pten::Scalar(BOOST_GET_CONST(float, attr)));
std::move(pten::Scalar(BOOST_GET_CONST(float, attr))));
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"unsupported cast op attribute `%s` to Scalar when construct "
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/imperative/prepared_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/imperative/execution_context.h"
#include "paddle/fluid/imperative/layer.h"
Expand Down
7 changes: 2 additions & 5 deletions paddle/fluid/operators/fill_any_like_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,11 +50,8 @@ class FillAnyLikeOp : public framework::OperatorWithKernel {

framework::KernelSignature GetExpectedPtenKernelArgs(
const framework::ExecutionContext &ctx) const override {
return std::make_pair(
"fill_any_like",
std::make_tuple(paddle::SmallVector<std::string>({"X"}),
paddle::SmallVector<std::string>({"value"}),
paddle::SmallVector<std::string>({"Out"})));
return framework::KernelSignature("fill_any_like", {"X"}, {"value"},
{"Out"});
}
};

Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/operators/mean_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ class MeanKernel : public framework::OpKernel<T> {
auto pt_out = paddle::experimental::MakePtenDenseTensor(*out);

// call new kernel
VLOG(1) << "chenweihang: call original mean kernel compute.";
pten::Mean<T>(dev_ctx, *pt_x.get(), pt_out.get());
}
};
Expand Down
15 changes: 4 additions & 11 deletions paddle/fluid/operators/scale_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,18 +74,11 @@ class ScaleOp : public framework::OperatorWithKernel {
framework::KernelSignature GetExpectedPtenKernelArgs(
const framework::ExecutionContext &ctx) const override {
if (ctx.HasInput("ScaleTensor")) {
return std::make_pair(
"scale.host",
std::make_tuple(
paddle::SmallVector<std::string>({"X", "ScaleTensor"}),
paddle::SmallVector<std::string>({"bias", "bias_after_scale"}),
paddle::SmallVector<std::string>({"Out"})));
return framework::KernelSignature("scale.host", {"X", "ScaleTensor"},
{"bias", "bias_after_scale"}, {"Out"});
} else {
return std::make_pair(
"scale", std::make_tuple(paddle::SmallVector<std::string>({"X"}),
paddle::SmallVector<std::string>(
{"scale", "bias", "bias_after_scale"}),
paddle::SmallVector<std::string>({"Out"})));
return framework::KernelSignature(
"scale", {"X"}, {"scale", "bias", "bias_after_scale"}, {"Out"});
}
}
};
Expand Down
26 changes: 14 additions & 12 deletions paddle/pten/core/kernel_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,14 +52,14 @@ class KernelContext {
}

void EmplaceBackInput(std::shared_ptr<TensorBase> input) {
inputs_.emplace_back(input);
inputs_.emplace_back(std::move(input));
// Record the start and end index of the input
int index = inputs_.size();
input_range_.emplace_back(std::pair<int, int>(index, index + 1));
}

void EmplaceBackInputs(
const paddle::SmallVector<std::shared_ptr<TensorBase>>& inputs) {
paddle::SmallVector<std::shared_ptr<TensorBase>> inputs) {
for (auto in : inputs) {
inputs_.emplace_back(in);
}
Expand All @@ -70,14 +70,14 @@ class KernelContext {
}

void EmplaceBackOutput(std::shared_ptr<TensorBase> output) {
outputs_.emplace_back(output);
outputs_.emplace_back(std::move(output));
// Record the start and end index of the input
int index = outputs_.size();
output_range_.emplace_back(std::pair<int, int>(index, index + 1));
}

void EmplaceBackOutputs(
const paddle::SmallVector<std::shared_ptr<TensorBase>>& outputs) {
paddle::SmallVector<std::shared_ptr<TensorBase>> outputs) {
for (auto out : outputs) {
outputs_.emplace_back(out);
}
Expand All @@ -87,7 +87,9 @@ class KernelContext {
std::pair<int, int>(index, index + outputs.size()));
}

void EmplaceBackAttr(paddle::any attr) { attrs_.emplace_back(attr); }
void EmplaceBackAttr(paddle::any attr) {
attrs_.emplace_back(std::move(attr));
}

template <typename TensorType>
const TensorType& InputAt(size_t idx) const {
Expand Down Expand Up @@ -118,18 +120,18 @@ class KernelContext {

// TODO(chenweihang): Tensor -> Tensor*, Tensor should by managed `scope`
// Note: can't use API Tensor here, the inference don't use this API Tensor
paddle::SmallVector<std::shared_ptr<TensorBase>> inputs_{};
paddle::SmallVector<std::shared_ptr<TensorBase>> outputs_{};
paddle::SmallVector<paddle::any> attrs_{};
paddle::SmallVector<std::shared_ptr<TensorBase>> inputs_;
paddle::SmallVector<std::shared_ptr<TensorBase>> outputs_;
paddle::SmallVector<paddle::any> attrs_;

// Only contains input like list[Tensor] need `range`
paddle::SmallVector<std::pair<int, int>> input_range_{{}};
paddle::SmallVector<std::pair<int, int>> output_range_{{}};
paddle::SmallVector<std::pair<int, int>> input_range_;
paddle::SmallVector<std::pair<int, int>> output_range_;

// Only static graph need `name`
// TODO(chenweihang): replaced by paddle::string_view
paddle::SmallVector<std::string> input_names_{{}};
paddle::SmallVector<std::string> output_names_{{}};
paddle::SmallVector<std::string> input_names_;
paddle::SmallVector<std::string> output_names_;
};

} // namespace pten
2 changes: 1 addition & 1 deletion paddle/pten/kernels/functions/eigen/common.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand Down

1 comment on commit a83e9c7

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on a83e9c7 Oct 27, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #34425 Commit ID: a83e9c7 contains failed CI.

🔹 Failed: PR-CI-APPROVAL

approve_failed
2021-10-27 18:03:27 正在保存至: “bk.txt”
2021-10-27 18:03:27 0K 100% 2.55M=0s
2021-10-27 18:03:27 2021-10-27 18:03:26 (2.55 MB/s) - 已保存 “bk.txt” [5/5])
2021-10-27 18:03:34 ****************
2021-10-27 18:03:34 0. You must have one RD (lanxianghit (Recommend), phlrain or luotao1) approval for changing the FLAGS, which manages the environment variables.
2021-10-27 18:03:34 1. You must have Dianhai approval for change 20+ files or add than 1000+ lines of content.
2021-10-27 18:03:34 2. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for paddle/fluid/framework/operator.h, which manages the underlying code for fluid.
2021-10-27 18:03:34 3. You must have one RD (zhiqiu (Recommend) , phlrain) approval for the changes of paddle/fluid/pybind/op_function_generator.cc, which manages the logic of automatic generating op functions for dygraph.
2021-10-27 18:03:34 4. You must have one RD (XiaoguangHu01,chenwhql,zhiqiu,Xreki,luotao1) approval for the usage of const_cast.
2021-10-27 18:03:34 5. You must have one RD (Avin0323(Recommend) or zhouwei25 or wanghuancoder or luotao1) approval for modifying unity_build_rule.cmake which the rules of Unity Build.
2021-10-27 18:03:34 There are 6 approved errors.
2021-10-27 18:03:34 ****************
2021-10-27 18:03:34 + EXCODE=6
2021-10-27 18:03:34 + echo 'EXCODE: 6'
2021-10-27 18:03:34 EXCODE: 6
2021-10-27 18:03:34 + echo 'ipipe_log_param_EXCODE: 6'
2021-10-27 18:03:34 ipipe_log_param_EXCODE: 6
2021-10-27 18:03:34 + exit 6

Please sign in to comment.