Skip to content

Commit

Permalink
optimize op convert support (PaddlePaddle#31)
Browse files Browse the repository at this point in the history
* add ConvertDataType for popart_canonicalization

* update ConstantOfShape

* add guarg for RegisterHandler

* fix conv2d pads attr

* fix ReduceMean when reduce_all=True
  • Loading branch information
gglin001 authored Aug 6, 2021
1 parent 056be56 commit 3af0a36
Show file tree
Hide file tree
Showing 6 changed files with 102 additions and 44 deletions.
6 changes: 5 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,14 @@ cmake-build-*
paddle/fluid/operators/distributed/send_recv.proto
model_test

# clangd background index
.clangd/
.cache/

python/paddle/distributed/fleet/proto/
python/paddle/fluid/core*.so

scripts
sdk/
demos/
*.onnx
*.onnx
1 change: 0 additions & 1 deletion paddle/fluid/framework/ipu/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
set(POPART_CANONICALIZATION_HANDLERS_SRC
"popart_canonicalization/other_ops.cpp"
)
# TODO(alleng) build static library
cc_library(popart_canonicalization_utils SRCS popart_canonicalization_utils.cc
${POPART_CANONICALIZATION_HANDLERS_SRC} DEPS framework_proto enforce)

Expand Down
13 changes: 7 additions & 6 deletions paddle/fluid/framework/ipu/ipu_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@ limitations under the License. */

#include "paddle/fluid/framework/ipu/ipu_backend.h"

#include <algorithm>
#include <vector>

#include <popart/builder.hpp>
#include <popart/dataflow.hpp>
#include <popart/devicemanager.hpp>
Expand Down Expand Up @@ -269,8 +266,9 @@ void IpuBackend::LowerBody(const ir::Graph* graph) {
auto outputs = op->Output("__outputs__");
auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto high = 1.0f;
auto low = 0.0f;
auto value = BOOST_GET_CONST(float, op->GetAttr("value"));
auto high = value;
auto low = value;
popart::TensorId result =
builder_->aiOnnxOpset11().randomuniform(shape, dtype, high, low);
tensors_.emplace(outputs[0], result);
Expand All @@ -294,7 +292,10 @@ void IpuBackend::LowerBody(const ir::Graph* graph) {
} else if (op_type == "ReduceMean") {
auto inputs = GetOpInputs(op);
auto outputs = op->Output("__outputs__");
auto axes = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("axes"));
auto axes = nonstd::optional<std::vector<int64_t>>();
if (op->HasAttr("axes")) {
axes = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("axes"));
}
auto keepdims = BOOST_GET_CONST(int64_t, op->GetAttr("keepdims"));
popart::TensorId result =
builder_->aiOnnxOpset11().reducemean(inputs, axes, keepdims);
Expand Down
52 changes: 16 additions & 36 deletions paddle/fluid/framework/ipu/popart_canonicalization/other_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,12 @@ ir::Node *conv2d_handler(ir::Graph *graph, ir::Node *node) {
auto dilations = std::vector<int64_t>{dilations_.begin(), dilations_.end()};
auto group_ = BOOST_GET_CONST(int, op->GetAttr("groups"));
auto group = int64_t{group_};
// auto paddings_ = BOOST_GET_CONST(std::vector<int>,
// op->GetAttr("paddings")); auto pads =
// std::vector<int64_t>{paddings_.begin(), paddings_.end()};
auto pads = std::vector<int64_t>{1, 1, 1, 1};
auto pads_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("paddings"));
if (pads_.size() == 2) {
pads_.push_back(pads_[0]);
pads_.push_back(pads_[1]);
}
auto pads = std::vector<int64_t>{pads_.begin(), pads_.end()};
auto stride_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("strides"));
auto stride = std::vector<int64_t>{stride_.begin(), stride_.end()};
op_desc->SetAttr("dilations", dilations);
Expand Down Expand Up @@ -85,11 +87,7 @@ ir::Node *reduce_mean_handler(ir::Graph *graph, ir::Node *node) {
outputs.push_back(op->Output("Out").front());
op_desc->SetOutput("__outputs__", outputs);
auto reduce_all = BOOST_GET_CONST(bool, op->GetAttr("reduce_all"));
if (reduce_all) {
// TODO(alleng) get axes from input tensor shape/dim
auto axes = std::vector<int64_t>{0, 1, 2, 3};
op_desc->SetAttr("axes", axes);
} else {
if (!reduce_all) {
auto axes_ = BOOST_GET_CONST(std::vector<int>, op->GetAttr("dim"));
auto axes = std::vector<int64_t>{axes_.begin(), axes_.end()};
op_desc->SetAttr("axes", axes);
Expand All @@ -113,28 +111,9 @@ ir::Node *uniform_random_handler(ir::Graph *graph, ir::Node *node) {

auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
op_desc->SetAttr("shape", shape);
// auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
op_desc->SetAttr("dtype", 1);
// cvt dtype
/*
enum Type {
// Pod Types
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
// Tensor<size_t> is used in C++.
SIZE_T = 19;
UINT8 = 20;
INT8 = 21;
BF16 = 22;
COMPLEX64 = 23;
COMPLEX128 = 24;
...
*/
auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto dtype = ConvertDataType(dtype_);
op_desc->SetAttr("dtype", dtype);
auto max = BOOST_GET_CONST(float, op->GetAttr("max"));
op_desc->SetAttr("high", max);
auto min = BOOST_GET_CONST(float, op->GetAttr("min"));
Expand All @@ -155,8 +134,9 @@ ir::Node *gaussian_random_handler(ir::Graph *graph, ir::Node *node) {

auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
op_desc->SetAttr("shape", shape);
// auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
op_desc->SetAttr("dtype", 1);
auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto dtype = ConvertDataType(dtype_);
op_desc->SetAttr("dtype", dtype);

auto mean = BOOST_GET_CONST(float, op->GetAttr("mean"));
op_desc->SetAttr("mean", mean);
Expand All @@ -179,9 +159,9 @@ ir::Node *fill_constant_handler(ir::Graph *graph, ir::Node *node) {

auto shape = BOOST_GET_CONST(std::vector<int64_t>, op->GetAttr("shape"));
op_desc->SetAttr("shape", shape);
// auto dtype = BOOST_GET_CONST(int, op->GetAttr("dtype"));
op_desc->SetAttr("dtype", 1);

auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype"));
auto dtype = ConvertDataType(dtype_);
op_desc->SetAttr("dtype", dtype);
auto value = BOOST_GET_CONST(float, op->GetAttr("value"));
op_desc->SetAttr("value", value);

Expand Down
71 changes: 71 additions & 0 deletions paddle/fluid/framework/ipu/popart_canonicalization_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,40 @@
namespace paddle {
namespace framework {

// onnx dtype
// https://github.com/onnx/onnx/blob/master/onnx/onnx-ml.proto3
enum ONNXDataType : int {
UNDEFINED = 0,
FLOAT = 1,
UINT8 = 2,
INT8 = 3,
UINT16 = 4,
INT16 = 5,
INT32 = 6,
INT64 = 7,
STRING = 8,
BOOL = 9,
FLOAT16 = 10,
DOUBLE = 11,
UINT32 = 12,
UINT64 = 13,
COMPLEX64 = 14,
COMPLEX128 = 15,
BFLOAT16 = 16
};

// This avoids the static initialisation order fiasco,
std::unordered_map<std::string, SymbolHandler> &SymbolHandlers() {
static std::unordered_map<std::string, SymbolHandler> symbol_handlers;
return symbol_handlers;
}

bool RegisterHandler(const std::string &symbol, const SymbolHandler &handler) {
if (SymbolHandlers().count(symbol) != 0) {
LOG(WARNING) << "Trying to register popart handler twice for operator: "
<< symbol;
return false;
}
bool new_handler = SymbolHandlers().emplace(symbol, handler).second;
return new_handler;
}
Expand All @@ -39,6 +66,9 @@ SymbolHandler GetHandler(const std::string &kind) {
}

void MoveNodeInputs(ir::Node *node, ir::Node *new_node) {
if (node->inputs.empty()) {
return;
}
new_node->inputs = node->inputs;
for (auto *node_in : node->inputs) {
for (size_t i = 0; i < node_in->outputs.size(); ++i) {
Expand All @@ -51,6 +81,9 @@ void MoveNodeInputs(ir::Node *node, ir::Node *new_node) {
}

void MoveNodeOutputs(ir::Node *node, ir::Node *new_node) {
if (node->outputs.empty()) {
return;
}
new_node->outputs = node->outputs;
for (auto *node_out : node->outputs) {
for (size_t i = 0; i < node_out->inputs.size(); ++i) {
Expand All @@ -62,5 +95,43 @@ void MoveNodeOutputs(ir::Node *node, ir::Node *new_node) {
}
}

void ConnectNodes(ir::Node *first_node, ir::Node *next_node) {
first_node->outputs.push_back(next_node);
next_node->inputs.push_back(first_node);
}

int ConvertDataType(int type) {
auto dtype = static_cast<proto::VarType::Type>(type);
switch (dtype) {
case proto::VarType::BOOL:
return static_cast<int>(ONNXDataType::BOOL);
case proto::VarType::INT16:
return static_cast<int>(ONNXDataType::INT16);
case proto::VarType::INT32:
return static_cast<int>(ONNXDataType::INT32);
case proto::VarType::INT64:
return static_cast<int>(ONNXDataType::INT64);
case proto::VarType::FP16:
return static_cast<int>(ONNXDataType::FLOAT16);
case proto::VarType::FP32:
return static_cast<int>(ONNXDataType::FLOAT);
case proto::VarType::FP64:
return static_cast<int>(ONNXDataType::DOUBLE);
case proto::VarType::UINT8:
return static_cast<int>(ONNXDataType::UINT8);
case proto::VarType::INT8:
return static_cast<int>(ONNXDataType::INT8);
case proto::VarType::BF16:
return static_cast<int>(ONNXDataType::BFLOAT16);
case proto::VarType::COMPLEX64:
return static_cast<int>(ONNXDataType::COMPLEX64);
case proto::VarType::COMPLEX128:
return static_cast<int>(ONNXDataType::COMPLEX128);
default:
PADDLE_THROW(
platform::errors::Unimplemented("Unsupported data type: %d.", dtype));
}
}

} // namespace framework
} // namespace paddle
3 changes: 3 additions & 0 deletions paddle/fluid/framework/ipu/popart_canonicalization_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ SymbolHandler GetHandler(const std::string &);

void MoveNodeInputs(ir::Node *node, ir::Node *new_node);
void MoveNodeOutputs(ir::Node *node, ir::Node *new_node);
void ConnectNodes(ir::Node *first_node, ir::Node *next_node);

int ConvertDataType(int);

} // namespace framework
} // namespace paddle

0 comments on commit 3af0a36

Please sign in to comment.