Skip to content

Commit

Permalink
Add Intermediate API layer
Browse files Browse the repository at this point in the history
  • Loading branch information
YuanRisheng committed Oct 22, 2021
1 parent 76a588e commit 7c41b15
Show file tree
Hide file tree
Showing 40 changed files with 526 additions and 203 deletions.
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ paddle/fluid/API_DEV.spec
paddle/fluid/API_PR.spec
paddle/fluid/op_use_default_grad_maker_DEV.spec
paddle/fluid/op_use_default_grad_maker_PR.spec
tools/__pycache__/static_mode_white_list.cpython-37.pyc

*.DS_Store
*.vs
Expand Down
26 changes: 5 additions & 21 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ limitations under the License. */
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/pten/common/scalar.h"

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -1080,20 +1081,6 @@ void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
this->InferShape(&infer_shape_ctx);
}

static std::string RuntimeContextDebugString(const RuntimeContext& ctx) {
std::stringstream ss;
ss << "RuntimeContext(Inputs: ";
for (auto& var_pair : ctx.inputs) {
ss << var_pair.first << ", ";
}
ss << "Outputs: ";
for (auto& var_pair : ctx.outputs) {
ss << var_pair.first << ", ";
}
ss << ")";
return ss.str();
}

void OperatorWithKernel::RunImpl(const Scope& scope,
const platform::Place& place) const {
// To reduce the elapsed time of HasAttr, we use bool variable to record the
Expand Down Expand Up @@ -1144,7 +1131,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
// and RCOM backend, the XPU, NPU and MKLDNN will be supported in the second
// phase
if (FLAGS_run_pt_kernel &&
pten::KernelFactory::Instance().ContainsKernel(type_.c_str())) {
pten::KernelFactory::Instance().HasCompatiblePtenKernel(type_)) {
if (pt_kernel_signature_.get() == nullptr || pt_kernel_.get() == nullptr) {
ChoosePtenKernel(exe_ctx);
}
Expand Down Expand Up @@ -1651,10 +1638,9 @@ void OperatorWithKernel::ParseInputDataType(
if (t != nullptr) {
PADDLE_ENFORCE_EQ(
t->IsInitialized(), true,
platform::errors::InvalidArgument(
"The Tensor in the %s Op's Input Variable %s(%s) is "
"not initialized.",
Type(), name, Inputs().at(name).at(i)));
platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
"contains uninitialized Tensor.",
Type(), name));
proto::VarType::Type tmp = t->type();
PADDLE_ENFORCE(tmp == *data_type || *data_type == default_data_type,
platform::errors::InvalidArgument(
Expand Down Expand Up @@ -1789,8 +1775,6 @@ KernelSignature OperatorWithKernel::GetExpectedPtenKernelArgs(

pten::KernelContext OperatorWithKernel::BuildPtenKernelContext(
const RuntimeContext& ctx, const platform::DeviceContext& dev_ctx) const {
VLOG(1) << RuntimeContextDebugString(ctx);

// TODO(chenweihang): now only work for very simple case,
// many cases need to be deal with later:
// 1. the input and output are not tensor
Expand Down
11 changes: 5 additions & 6 deletions paddle/fluid/framework/operator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -439,9 +439,8 @@ TEST(IndicateVarDataTypeTest, lodtensor) {
std::string ex_msg = err.what();
EXPECT_TRUE(
ex_msg.find(
"The Tensor in the indicate_lod_tensor_data_type_test Op's "
"Input Variable LoDTensor(lodtensor_1) is not initialized") !=
std::string::npos);
"The indicate_lod_tensor_data_type_test Op's Input Variable "
"`LoDTensor` contains uninitialized Tensor.") != std::string::npos);
}
ASSERT_TRUE(caught);
}
Expand All @@ -466,9 +465,9 @@ TEST(IndicateVarDataTypeTest, selectedrows) {
caught = true;
std::string ex_msg = err.what();
EXPECT_TRUE(
ex_msg.find("The Tensor in the indicate_selected_rows_data_type_test "
"Op's Input Variable SelectedRows(selected_rows_1) is not "
"initialized") != std::string::npos);
ex_msg.find("The indicate_selected_rows_data_type_test Op's "
"Input Variable `SelectedRows` contains uninitialized "
"Tensor.") != std::string::npos);
}
ASSERT_TRUE(caught);
}
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/fluid/framework/details/nan_inf_utils.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/imperative/infer_shape_context.h"
#include "paddle/pten/common/scalar.h"
#include "paddle/utils/small_vector.h"
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/platform/xpu/xpu_op_list.h"
Expand Down Expand Up @@ -153,7 +154,7 @@ PreparedOp PrepareImpl(const NameVarMap<VarType>& ins,
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

if (FLAGS_run_pt_kernel &&
pten::KernelFactory::Instance().ContainsKernel(op.Type().c_str())) {
pten::KernelFactory::Instance().HasCompatiblePtenKernel(op.Type())) {
auto pt_kernel_signature = op.GetExpectedPtenKernelArgs(dygraph_exe_ctx);

VLOG(1) << framework::KernelSignatureToString(pt_kernel_signature);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/mean_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
* Currently, only the first two cases are adapted.
*
* The principle here is that the implementation in the kernel must reuse the
* corresponding functions in the Tensor compute library and cannot maintain
* corresponding functions in the Tensor Operation library and cannot maintain
* two copies of the code.
*/
template <typename DeviceContext, typename T>
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/op_function_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ GenerateOpFunctions() {
// since only OperatorWithKernel can run in dygraph mode.
// if the pten lib contains op kernel, we still generate ops method
if (!all_kernels.count(op_type) &&
!pten::KernelFactory::Instance().ContainsKernel(op_type.c_str())) {
!pten::KernelFactory::Instance().HasCompatiblePtenKernel(op_type)) {
continue;
}

Expand Down
1 change: 0 additions & 1 deletion paddle/pten/api/include/core.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,4 @@ limitations under the License. */
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/kernel_context.h"
#include "paddle/pten/core/kernel_factory.h"
#include "paddle/pten/core/scalar.h"
#include "paddle/pten/core/tensor_meta.h"
15 changes: 15 additions & 0 deletions paddle/pten/api/include/creation.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,20 @@

#pragma once

#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/creation.h"
#include "paddle/pten/kernels/cuda/creation.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor FillAnyLike(const ContextT& dev_ctx,
const DenseTensor& x,
const Scalar& val) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
FillAnyLike<T>(dev_ctx, x, val, &dense_out);
return dense_out;
}

} // namespace pten
15 changes: 15 additions & 0 deletions paddle/pten/api/include/linalg.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,20 @@
#pragma once

// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/linalg.h"
#include "paddle/pten/kernels/cuda/linalg.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor Dot(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
auto out_meta = DotInferShape(x.meta(), y.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Dot<T>(dev_ctx, x, y, &dense_out);
return dense_out;
}

} // namespace pten
16 changes: 16 additions & 0 deletions paddle/pten/api/include/manipulation.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,21 @@
#pragma once

// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/manipulation.h"
#include "paddle/pten/kernels/cuda/manipulation.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor Flatten(const ContextT& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis) {
auto out_meta = FlattenInferShape(x.meta(), start_axis, stop_axis);
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Flatten<T>(dev_ctx, x, start_axis, stop_axis, &dense_out);
return dense_out;
}

} // namespace pten
44 changes: 44 additions & 0 deletions paddle/pten/api/include/math.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,49 @@ limitations under the License. */
#pragma once

// See Note: [ How do we organize the kernel directory ]
#include "paddle/pten/api/include/infershape.h"
#include "paddle/pten/kernels/cpu/math.h"
#include "paddle/pten/kernels/cuda/math.h"

namespace pten {

template <typename T, typename ContextT>
DenseTensor Sign(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Sign<T>(dev_ctx, x, &dense_out);
return dense_out;
}

template <typename T, typename ContextT>
DenseTensor Mean(const ContextT& dev_ctx, const DenseTensor& x) {
auto out_meta = ReductionInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Mean<T>(dev_ctx, x, &dense_out);
return dense_out;
}

template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
float scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
Scale<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}

template <typename T, typename ContextT>
DenseTensor Scale(const ContextT& dev_ctx,
const DenseTensor& x,
const DenseTensor& scale,
float bias,
bool bias_after_scale) {
auto out_meta = UnchangedInferShape(x.meta());
pten::DenseTensor dense_out(out_meta, pten::TensorStatus());
ScaleHost<T>(dev_ctx, x, scale, bias, bias_after_scale, &dense_out);
return dense_out;
}
} // namespace pten
9 changes: 6 additions & 3 deletions paddle/pten/common/backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ limitations under the License. */

#include <ostream>

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace experimental {

Expand All @@ -28,8 +30,8 @@ namespace experimental {
* but in order to make the boundary of the kernel clearer and the function
* more specific, we need to distinguish the calculation method.
*
* Such as the kernel for CUDA device, it can be a native CUDA kernel,
* or a kernel implemented by CUDNN library.
* Such as the kernel for CPU device, it can be a native CPU kernel,
* or a kernel implemented by MKLDNN library.
*
* Note(chenweihang): HIP is not needed now, we can added it if needed
* in the future
Expand Down Expand Up @@ -78,7 +80,8 @@ inline std::ostream& operator<<(std::ostream& os, Backend backend) {
os << "CUDNN";
break;
default:
throw std::runtime_error("Invalid Backend type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum backend type `%d`.", static_cast<int>(backend)));
}
return os;
}
Expand Down
5 changes: 2 additions & 3 deletions paddle/pten/common/data_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/fluid/platform/float16.h"

namespace paddle {
Expand Down Expand Up @@ -164,8 +163,8 @@ inline std::ostream& operator<<(std::ostream& os, DataType dtype) {
os << "complex128";
break;
default:
// TODO(chenweihang): change to enforce later
throw std::runtime_error("Invalid DataType type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum data type `%d`.", static_cast<int>(dtype)));
}
return os;
}
Expand Down
10 changes: 6 additions & 4 deletions paddle/pten/common/layout.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ limitations under the License. */

#pragma once

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace experimental {

Expand All @@ -26,8 +28,8 @@ enum class DataLayout {
NUM_DATA_LAYOUTS,
};

inline std::ostream& operator<<(std::ostream& os, DataLayout dtype) {
switch (dtype) {
inline std::ostream& operator<<(std::ostream& os, DataLayout layout) {
switch (layout) {
case DataLayout::UNDEFINED:
os << "Undefined";
break;
Expand All @@ -44,8 +46,8 @@ inline std::ostream& operator<<(std::ostream& os, DataLayout dtype) {
os << "MKLDNN";
break;
default:
// TODO(chenweihang): change to enforce later
throw std::runtime_error("Invalid DataLayout type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum data layout type `%d`.", static_cast<int>(layout)));
}
return os;
}
Expand Down
17 changes: 14 additions & 3 deletions paddle/pten/core/scalar.h → paddle/pten/common/scalar.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,12 @@ limitations under the License. */

#pragma once

namespace pten {
#include <cstdint>

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace experimental {

class Scalar {
public:
Expand Down Expand Up @@ -43,7 +48,8 @@ class Scalar {
case Tag::HAS_B:
return static_cast<T>(data_.b);
default:
throw std::runtime_error("Invalid Scalar type.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid enum scalar type tag `%d`.", static_cast<int>(tag)));
}
}

Expand All @@ -60,4 +66,9 @@ class Scalar {
} data_;
};

} // namespace pten
} // namespace experimental
} // namespace paddle

namespace pten {
using Scalar = paddle::experimental::Scalar;
}
4 changes: 2 additions & 2 deletions paddle/pten/core/dense_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ class DenseTensor : public TensorBase {

// DenseTensor(const DenseTensor&) = delete;
// DenseTensor& operator=(const DenseTensor&) = delete;
DenseTensor(DenseTensor&&) = delete;
DenseTensor& operator=(DenseTensor&&) = delete;
DenseTensor(DenseTensor&&) = default;
DenseTensor& operator=(DenseTensor&&) = default;

/**
* If we still malloc memory by mutable_data,
Expand Down
Loading

0 comments on commit 7c41b15

Please sign in to comment.