Skip to content

Commit

Permalink
fix Issue of output tensor's dim = 0 (PaddlePaddle#7)
Browse files Browse the repository at this point in the history
* add IpuBackend.GetTensorShape

* rm a blank line

* use const &
  • Loading branch information
gglin001 authored Jul 23, 2021
1 parent 5d09b5a commit 3965526
Show file tree
Hide file tree
Showing 7 changed files with 138 additions and 46 deletions.
8 changes: 7 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,10 @@ cmake-build-*
paddle/fluid/operators/distributed/send_recv.proto
model_test

scripts
python/paddle/distributed/fleet/proto/
python/paddle/fluid/core*.so

scripts
sdk/
demos/
*.onnx
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ipu/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
cc_library(ipu_backend SRCS ipu_backend.cc DEPS popart graph framework_proto enforce )
# cc_test(ipu_backend_test SRCS ipu_backend_test.cc DEPS ipu_backend)
cc_library(ipu_utils SRCS ipu_utils.cc DEPS memory framework_proto popart)
cc_library(ipu_backend SRCS ipu_backend.cc DEPS popart graph framework_proto enforce ipu_utils)
52 changes: 12 additions & 40 deletions paddle/fluid/framework/ipu/ipu_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ limitations under the License. */

#include "paddle/fluid/framework/feed_fetch_type.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/ipu/ipu_utils.h"
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/node.h"
#include "paddle/fluid/framework/lod_tensor.h"
Expand All @@ -39,37 +40,6 @@ namespace framework {

std::shared_ptr<IpuBackend> IpuBackend::instance_ = nullptr;

popart::DataType toPopartType(proto::VarType::Type type) {
switch (type) {
case proto::VarType::UINT8:
return popart::DataType::UINT8;
case proto::VarType::INT8:
return popart::DataType::INT8;
case proto::VarType::INT16:
return popart::DataType::INT16;
case proto::VarType::INT32:
return popart::DataType::INT32;
case proto::VarType::INT64:
return popart::DataType::INT64;
case proto::VarType::BOOL:
return popart::DataType::BOOL;
case proto::VarType::FP32:
return popart::DataType::FLOAT;
case proto::VarType::FP16:
return popart::DataType::FLOAT16;
case proto::VarType::BF16:
return popart::DataType::BFLOAT16;
case proto::VarType::COMPLEX64:
return popart::DataType::COMPLEX64;
case proto::VarType::COMPLEX128:
return popart::DataType::COMPLEX128;

default:
PADDLE_THROW(
platform::errors::Unavailable("Unsupported Paddle var type."));
}
}

IpuBackend::IpuBackend() { builder_ = popart::Builder::create(); }

void IpuBackend::Compile(ir::Graph* graph,
Expand All @@ -85,7 +55,7 @@ void IpuBackend::Compile(ir::Graph* graph,
if (feed_name == var_desc->Name()) {
// Get tensor_info from var_desc
VLOG(1) << "feed_name= " << var_desc->Name();
popart::DataType data_type = toPopartType(var_desc->GetDataType());
auto data_type = VarType2PopartType(var_desc->GetDataType());
popart::TensorInfo input_info{data_type, var_desc->GetShape()};
// Create popart tensor
VLOG(1) << "popart input_info = " << input_info;
Expand Down Expand Up @@ -126,9 +96,9 @@ void IpuBackend::Compile(ir::Graph* graph,

for (const auto& fetch_name : fetch_list) {
auto tensor = tensors_.find(fetch_name);
PADDLE_ENFORCE_NE(
tensor, tensors_.end(),
platform::errors::NotFound("output tensor %s does not exist.", fetch_name));
PADDLE_ENFORCE_NE(tensor, tensors_.end(),
platform::errors::NotFound(
"output tensor %s does not exist.", fetch_name));

VLOG(1) << "fetch_name= " << fetch_name;
VLOG(1) << "popart output tensor id = " << tensor->second;
Expand Down Expand Up @@ -165,8 +135,8 @@ void IpuBackend::Compile(ir::Graph* graph,
VLOG(1) << "Preparing session device...done";
}

void IpuBackend::Run(const std::vector<const Tensor *> &inputs,
std::vector<Tensor *> &outputs) {
void IpuBackend::Run(const std::vector<const Tensor*>& inputs,
std::vector<Tensor*>& outputs) {
// Prepare input tensor
std::map<popart::TensorId, popart::IArray&> popart_inputs;
std::map<popart::TensorId, popart::NDArrayWrapper<float>> input_wrappers;
Expand All @@ -175,7 +145,8 @@ void IpuBackend::Run(const std::vector<const Tensor *> &inputs,
auto tensor_id = inputs_[i];
const Tensor* tensor = inputs[i];
std::vector<int64_t> tensor_shape = builder_->getTensorShape(tensor_id);
popart::NDArrayWrapper<float> data(const_cast<float*>(tensor->data<float>()), tensor_shape);
popart::NDArrayWrapper<float> data(
const_cast<float*>(tensor->data<float>()), tensor_shape);
VLOG(1) << "Preparing Input data for tensor " << tensor_id;
input_wrappers.emplace(tensor_id, std::move(data));
popart_inputs.emplace(tensor_id, input_wrappers.at(tensor_id));
Expand All @@ -184,11 +155,12 @@ void IpuBackend::Run(const std::vector<const Tensor *> &inputs,
// Prepare output tensor
std::map<popart::TensorId, popart::IArray&> popart_anchors;
std::map<popart::TensorId, popart::NDArrayWrapper<float>> anchor_wrappers;
for(size_t i = 0; i < outputs.size(); i++) {
for (size_t i = 0; i < outputs.size(); i++) {
auto tensor_id = outputs_[i];
Tensor* tensor = outputs[i];
std::vector<int64_t> tensor_shape = builder_->getTensorShape(tensor_id);
popart::NDArrayWrapper<float> data(const_cast<float*>(tensor->data<float>()), tensor_shape);
popart::NDArrayWrapper<float> data(
const_cast<float*>(tensor->data<float>()), tensor_shape);
VLOG(1) << "Preparing Output data for tensor " << tensor_id;
anchor_wrappers.emplace(tensor_id, std::move(data));
popart_anchors.emplace(tensor_id, anchor_wrappers.at(tensor_id));
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/framework/ipu/ipu_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,10 @@ class IpuBackend {
optimizer_.attrs[attr] = value;
}

std::vector<int64_t> GetTensorShape(const std::string& var_name) {
return builder_->getTensorShape(tensors_[var_name]);
}

static std::shared_ptr<IpuBackend> GetInstance() {
if (NULL == instance_) {
instance_.reset(new IpuBackend());
Expand Down
51 changes: 51 additions & 0 deletions paddle/fluid/framework/ipu/ipu_utils.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/ipu/ipu_utils.h"

namespace paddle {
namespace framework {

popart::DataType VarType2PopartType(proto::VarType::Type type) {
switch (type) {
case proto::VarType::UINT8:
return popart::DataType::UINT8;
case proto::VarType::INT8:
return popart::DataType::INT8;
case proto::VarType::INT16:
return popart::DataType::INT16;
case proto::VarType::INT32:
return popart::DataType::INT32;
case proto::VarType::INT64:
return popart::DataType::INT64;
case proto::VarType::BOOL:
return popart::DataType::BOOL;
case proto::VarType::FP32:
return popart::DataType::FLOAT;
case proto::VarType::FP16:
return popart::DataType::FLOAT16;
case proto::VarType::BF16:
return popart::DataType::BFLOAT16;
case proto::VarType::COMPLEX64:
return popart::DataType::COMPLEX64;
case proto::VarType::COMPLEX128:
return popart::DataType::COMPLEX128;
default:
PADDLE_THROW(paddle::platform::errors::Unavailable(
"Unsupported Paddle var type."));
}
}

} // namespace framework
} // namespace paddle
55 changes: 55 additions & 0 deletions paddle/fluid/framework/ipu/ipu_utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#include <popart/ndarraywrapper.hpp>
#include <popart/tensordata.hpp>
#include <popart/tensorinfo.hpp>

#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_tensor.h"

namespace paddle {
namespace framework {

popart::DataType VarType2PopartType(proto::VarType::Type type);

template <typename T>
std::unique_ptr<popart::NDArrayWrapper<T>> Tensor2IArray(Tensor &tensor) {
auto dtype = VarType2PopartType(tensor.type());
auto shape = std::vector<int64_t>();
for (size_t i = 0; i < tensor.dims().size(); ++i) {
shape.push_back(tensor.dims().at(i));
}
popart::TensorInfo tensor_info(dtype, shape);

return std::make_unique<popart::NDArrayWrapper<T>>(
reinterpret_cast<T *>(tensor.data<void>()), tensor_info);
}

template <typename T>
std::unique_ptr<popart::NDArrayWrapper<T>> LoDTensor2IArray(
LoDTensor &lod_tensor) {
if (lod_tensor.lod().size() == 0) {
return Tensor2IArray<T>(lod_tensor);
} else {
PADDLE_THROW(
platform::errors::Unimplemented("LoDTensor2IArray is Unimplemented"));
}
}

} // namespace framework
} // namespace paddle
10 changes: 7 additions & 3 deletions paddle/fluid/operators/ipu_runtime_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#pragma once
#include <memory>
#include <vector>

#include "paddle/fluid/framework/op_registry.h"
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/framework/ipu/ipu_backend.h"
Expand All @@ -27,14 +28,17 @@ namespace operators {
template <typename T>
class IpuRuntimeKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
void Compute(const framework::ExecutionContext& ctx) const override {
#ifdef PADDLE_WITH_IPU
auto ipu_backend = paddle::framework::IpuBackend::GetInstance();
VLOG(4) << "IpuRuntime Kernel, begin to run graph";
auto inputs = ctx.MultiInput<framework::Tensor>("FeedList");
auto outputs = ctx.MultiOutput<framework::Tensor>("FetchList");
for (auto* out : outputs){
out->Resize(framework::make_ddim({1}));
auto output_names = ctx.OutputNames("FetchList");
for (size_t i = 0; i < outputs.size(); ++i) {
auto* out = outputs[i];
auto oshape = ipu_backend->GetTensorShape(output_names[i]);
out->Resize(framework::make_ddim(oshape));
out->mutable_data<float>(ctx.GetPlace());
}
ipu_backend->Run(inputs, outputs);
Expand Down

0 comments on commit 3965526

Please sign in to comment.