Skip to content

Commit

Permalink
add pten utils tests
Browse files Browse the repository at this point in the history
  • Loading branch information
chenwhql committed Oct 23, 2021
1 parent 52fead0 commit 2ff2721
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 21 deletions.
4 changes: 0 additions & 4 deletions paddle/fluid/framework/pten_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ std::shared_ptr<pten::DenseTensor> MakeTensorImpl<pten::DenseTensor, LoDTensor>(

if (holder != nullptr) {
tensor_impl->ShareAllocation(tensor.Holder());
} else {
VLOG(1) << "Old LoDTensor holder is nullptr.";
}
return tensor_impl;
}
Expand All @@ -55,8 +53,6 @@ std::shared_ptr<pten::DenseTensor> MakeTensorImpl<pten::DenseTensor, Tensor>(

if (holder != nullptr) {
tensor_impl->ShareAllocation(tensor.Holder());
} else {
VLOG(1) << "Old Tensor holder is nullptr.";
}
return tensor_impl;
}
Expand Down
82 changes: 65 additions & 17 deletions paddle/fluid/framework/pten_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,18 @@ limitations under the License. */
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/variable.h"

namespace paddle {
namespace framework {

TEST(TcmptUtils, MakeTensor) {
TEST(PtenUtils, FluidTensorToPtenTensor) {
// 1. create tensor
LoDTensor x;
Tensor x2;
paddle::framework::LoDTensor x;
paddle::framework::Tensor x2;
x.Resize({2});
x.mutable_data<float>(platform::CPUPlace());
x.mutable_data<float>(paddle::platform::CPUPlace());
x.data<float>()[0] = 0.2;
x.data<float>()[1] = 0.5;

// 2. test API
auto dense_x = MakeTensorImpl<pten::DenseTensor>(x, x.place(), x.type());
auto dense_x = paddle::framework::MakeTensorImpl<pten::DenseTensor>(
x, x.place(), x.type());

// 3. check result
std::vector<float> expect_value = {0.2, 0.5};
Expand All @@ -41,13 +39,13 @@ TEST(TcmptUtils, MakeTensor) {
ASSERT_EQ(dense_x->data_type(), pten::DataType::FLOAT32);
}

TEST(TcmptUtils, VarToPtenTensor) {
TEST(PtenUtils, VarToPtenTensor) {
// 1. create Variable
Variable v;
auto selected_rows = v.GetMutable<SelectedRows>();
Tensor* value = selected_rows->mutable_value();
auto* data =
value->mutable_data<int>(make_ddim({1, 1}), paddle::platform::CPUPlace());
paddle::framework::Variable v;
auto selected_rows = v.GetMutable<paddle::framework::SelectedRows>();
paddle::framework::Tensor* value = selected_rows->mutable_value();
auto* data = value->mutable_data<int>(paddle::framework::make_ddim({1, 1}),
paddle::platform::CPUPlace());
data[0] = 123;
pten::Backend expect_backend = pten::Backend::CPU;

Expand All @@ -57,11 +55,61 @@ TEST(TcmptUtils, VarToPtenTensor) {
auto tensor_def = pten::TensorArgDef(expect_backend, pten::DataLayout::NCHW,
pten::DataType::INT32);
// 2. test API
auto tensor_x = InputVariableToPtenTensor(v, tensor_def);
auto tensor_x = paddle::framework::InputVariableToPtenTensor(v, tensor_def);
// 3. check result
ASSERT_EQ(tensor_x->backend(), expect_backend);
ASSERT_EQ(tensor_x->data_type(), pten::DataType::INT32);
}

} // namespace framework
} // namespace paddle
TEST(PtenUtils, PtenTensorToFluidTensor) {
pten::DenseTensor dense_tensor(
pten::TensorMeta(paddle::framework::make_ddim({1, 1}), pten::Backend::CPU,
pten::DataType::FLOAT32, pten::DataLayout::ANY),
pten::TensorStatus());
auto* data_ptr = dense_tensor.mutable_data<float>();
data_ptr[0] = 0.5;
// share allocation into fluid Tensor
paddle::framework::Tensor tensor;
paddle::framework::LoDTensor lod_tensor;
paddle::framework::ShareTensorImpl(&dense_tensor, &tensor);
paddle::framework::ShareTensorImpl(&dense_tensor, &lod_tensor);
// compare
ASSERT_EQ(tensor.data<float>()[0], 0.5);
ASSERT_EQ(lod_tensor.data<float>()[0], 0.5);
}

TEST(PtenUtils, TransPtenKernelKeyToOpKernelType) {
pten::KernelKey kernel_key(pten::Backend::CPU, pten::DataLayout::NCHW,
pten::DataType::FLOAT32);
auto op_kernel_type =
paddle::framework::TransPtenKernelKeyToOpKernelType(kernel_key);
ASSERT_EQ(op_kernel_type.data_type_, paddle::framework::proto::VarType::FP32);
ASSERT_EQ(op_kernel_type.data_layout_, paddle::framework::DataLayout::kNCHW);
ASSERT_TRUE(paddle::platform::is_cpu_place(op_kernel_type.place_));
ASSERT_EQ(op_kernel_type.library_type_,
paddle::framework::LibraryType::kPlain);

#ifdef PADDLE_WITH_MKLDNN
pten::KernelKey kernel_key_mkldnn(
pten::Backend::MKLDNN, pten::DataLayout::NCHW, pten::DataType::FLOAT32);
op_kernel_type =
paddle::framework::TransPtenKernelKeyToOpKernelType(kernel_key_mkldnn);
ASSERT_EQ(op_kernel_type.data_type_, paddle::framework::proto::VarType::FP32);
ASSERT_EQ(op_kernel_type.data_layout_, paddle::framework::DataLayout::kNCHW);
ASSERT_TRUE(paddle::platform::is_cpu_place(op_kernel_type.place_));
ASSERT_EQ(op_kernel_type.library_type_,
paddle::framework::LibraryType::kMKLDNN);
#endif

#ifdef PADDLE_WITH_CUDA
pten::KernelKey kernel_key_cudnn(pten::Backend::CUDNN, pten::DataLayout::NCHW,
pten::DataType::FLOAT32);
op_kernel_type =
paddle::framework::TransPtenKernelKeyToOpKernelType(kernel_key_cudnn);
ASSERT_EQ(op_kernel_type.data_type_, paddle::framework::proto::VarType::FP32);
ASSERT_EQ(op_kernel_type.data_layout_, paddle::framework::DataLayout::kNCHW);
ASSERT_TRUE(paddle::platform::is_gpu_place(op_kernel_type.place_));
ASSERT_EQ(op_kernel_type.library_type_,
paddle::framework::LibraryType::kCUDNN);
#endif
}

0 comments on commit 2ff2721

Please sign in to comment.