Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove the declaration of using Tensor in framework/tensor.h #46432

Merged
merged 20 commits into from
Sep 28, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/service/brpc_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ void DeserializeSelectedRows(
const platform::DeviceContext& ctx) {
const auto place = ctx.GetPlace();
auto* slr = var->GetMutable<phi::SelectedRows>();
framework::Tensor* tensor = slr->mutable_value();
phi::DenseTensor* tensor = slr->mutable_value();
slr->set_height(msg.slr_height());
std::vector<int64_t> tmp_rows(msg.dims()[0]);
memcpy(tmp_rows.data(), msg.data().data(), msg.dims()[0] * sizeof(int64_t));
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/eager_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ class EagerVariable final {
// Construct allocation only once.
if (var_.IsInitialized()) {
if (var_.IsType<paddle::framework::LoDTensor>() ||
var_.IsType<paddle::framework::Tensor>()) {
var_.IsType<phi::DenseTensor>()) {
return SetImplWithLegacyTensor<phi::DenseTensor>();
} else if (var_.IsType<phi::SelectedRows>()) {
return SetImplWithLegacyTensor<phi::SelectedRows>();
Expand Down Expand Up @@ -286,7 +286,7 @@ class EagerVariable final {
template <typename VarType>
void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<VarType>();
// Contruct framework::Tensor from egr::EagerVariable
// Contruct phi::DenseTensor from egr::EagerVariable
auto tensor_dense = std::dynamic_pointer_cast<VarType>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense),
Expand All @@ -303,7 +303,7 @@ class EagerVariable final {
void ConstructVariableFromCompatTensor(
const paddle::experimental::Tensor& tensor) {
auto* framework_holder = var_.GetMutable<VarType>();
// Contruct framework::Tensor from egr::EagerVariable
// Contruct phi::DenseTensor from egr::EagerVariable
auto* compat_tensor =
static_cast<VariableCompatTensor*>(tensor.impl().get());
PADDLE_ENFORCE_NOT_NULL(compat_tensor,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/attribute_checker.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ class TypedAttrChecker {
true,
platform::errors::InvalidArgument(
"Found Attribute('%s') with type(Variable), but it "
"doesn't support Tensor type.",
"doesn't support phi::DenseTensor type.",
attr_name_));

VLOG(1) << "Found Attribute " << attr_name_ << " with type(Variable).";
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/copy_same_tensor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ static bool CopySameTensorTestMain(const DDim &dims,
FLAGS_use_system_allocator = true; // force to use system allocator

// Step 1: create a cpu tensor and initialize it with random value;
Tensor src_cpu_tensor;
phi::DenseTensor src_cpu_tensor;
{
src_cpu_tensor.Resize(dims);
auto *src_ptr_cpu = src_cpu_tensor.mutable_data<T>(platform::CPUPlace());
Expand All @@ -60,9 +60,9 @@ static bool CopySameTensorTestMain(const DDim &dims,
}

// Step 2: copy the source tensor to dst place
Tensor dst_cpu_tensor;
phi::DenseTensor dst_cpu_tensor;
{
Tensor src_tensor;
phi::DenseTensor src_tensor;
TensorCopySync(src_cpu_tensor, src_place, &src_tensor);

// The source tensor and dst_tensor is the same
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,8 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
for (auto& in_name : inputs) {
VLOG(3) << "Custom Operator: input name - " << in_name;
if (detail::IsDuplicableVar(in_name)) {
// return const std::vector<const Tensor*>
auto vec_x = ctx.MultiInput<Tensor>(in_name);
// return const std::vector<const phi::DenseTensor*>
auto vec_x = ctx.MultiInput<phi::DenseTensor>(in_name);
PADDLE_ENFORCE_NE(vec_x.empty(),
true,
platform::errors::NotFound(
Expand All @@ -161,7 +161,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
}
kernel_ctx.EmplaceBackInputs(std::move(custom_vec_in));
} else {
auto* x = ctx.Input<Tensor>(in_name);
auto* x = ctx.Input<phi::DenseTensor>(in_name);
PADDLE_ENFORCE_NOT_NULL(
x,
platform::errors::NotFound("Input tensor (%s) is nullptr.", in_name));
Expand Down Expand Up @@ -222,7 +222,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,

VLOG(3) << "Custom Operator: push outputs into CustomOpKernelContext.";
// cache the target tensor pointers
std::vector<Tensor*> true_out_ptrs;
std::vector<phi::DenseTensor*> true_out_ptrs;
for (size_t i = 0; i < outputs.size(); ++i) {
auto out_name = outputs[i];
if (detail::IsDuplicableVar(out_name)) {
Expand All @@ -231,7 +231,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
"If custom operator's outputs contains `paddle::Vec("
")` type, "
"it only can hold one output."));
auto vec_out = ctx.MultiOutput<Tensor>(out_name);
auto vec_out = ctx.MultiOutput<phi::DenseTensor>(out_name);
PADDLE_ENFORCE_NE(vec_out.empty(),
true,
platform::errors::NotFound(
Expand All @@ -253,7 +253,7 @@ static void RunKernelFunc(const framework::ExecutionContext& ctx,
}
kernel_ctx.EmplaceBackOutputs(std::move(custom_vec_out));
} else {
auto* out = ctx.Output<Tensor>(out_name);
auto* out = ctx.Output<phi::DenseTensor>(out_name);
PADDLE_ENFORCE_NOT_NULL(out,
platform::errors::NotFound(
"Output tensor (%s) is nullptr.", out_name));
Expand Down Expand Up @@ -431,7 +431,7 @@ class CustomOperator : public OperatorWithKernel {
*/
framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name,
const Tensor& tensor,
const phi::DenseTensor& tensor,
const OpKernelType& expected_kernel_type) const override {
return OpKernelType(expected_kernel_type.data_type_,
expected_kernel_type.place_,
Expand Down Expand Up @@ -511,7 +511,7 @@ class CustomOpMaker : public OpProtoAndCheckerMaker {
AddComment(R"DOC(
Custom Operator.

According to the Tensor operation function implemented by the user
According to the phi::DenseTensor operation function implemented by the user
independently of the framework, it is encapsulated into a framework
operator to adapt to various execution scenarios such as dynamic graph,
mode static graph mode, and inference mode.
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/data_device_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ limitations under the License. */
namespace paddle {
namespace framework {

void TransDataDevice(const Tensor &in,
void TransDataDevice(const phi::DenseTensor &in,
const platform::Place &dst_place,
Tensor *out) {
phi::DenseTensor *out) {
VLOG(3) << "DeviceTransform in, src_place " << in.place()
<< " dst_place: " << dst_place;

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/data_device_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ limitations under the License. */
namespace paddle {
namespace framework {

void TransDataDevice(const Tensor& in,
void TransDataDevice(const phi::DenseTensor& in,
const platform::Place& dst_place,
Tensor* out);
phi::DenseTensor* out);

} // namespace framework
} // namespace paddle
6 changes: 3 additions & 3 deletions paddle/fluid/framework/data_device_transform_test.cu
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class TestOpWithKernel : public OperatorWithKernel {
} else {
VLOG(3) << "use default kernel";
return OpKernelType(proto::VarType::FP32,
ctx.Input<Tensor>("input")->place());
ctx.Input<phi::DenseTensor>("input")->place());
}
}
};
Expand All @@ -66,7 +66,7 @@ class TestKernel : public OpKernel<float> {
void Compute(const ExecutionContext& ctx) const {
std::cout << ctx.DebugString() << std::endl;

const Tensor* input = ctx.Input<Tensor>("input");
const phi::DenseTensor* input = ctx.Input<phi::DenseTensor>("input");

std::cout << "input place:" << input->place() << std::endl;
auto* output = ctx.Output<framework::LoDTensor>("output");
Expand Down Expand Up @@ -158,7 +158,7 @@ TEST(Operator, CPUtoGPU) {
paddle::platform::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(cuda_place);

paddle::framework::Tensor output_tensor;
phi::DenseTensor output_tensor;
paddle::framework::TensorCopy(output2->Get<paddle::framework::LoDTensor>(),
paddle::platform::CPUPlace(),
*dev_ctx,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_feed.proto
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ message Slot {
required string type = 2;
optional bool is_dense = 3 [ default = false ];
optional bool is_used = 4 [ default = false ];
repeated int32 shape = 5; // we can define N-D Tensor
repeated int32 shape = 5; // we can define N-D phi::DenseTensor
}

message MultiSlotDesc {
Expand Down
15 changes: 8 additions & 7 deletions paddle/fluid/framework/data_layout_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ void CastDataLayout::apply() {

void TransDataLayout(const OpKernelType& kernel_type_for_var,
const OpKernelType& expected_kernel_type,
const Tensor& in,
Tensor* out) {
const phi::DenseTensor& in,
phi::DenseTensor* out) {
PADDLE_ENFORCE(
platform::places_are_same_class(kernel_type_for_var.place_,
expected_kernel_type.place_),
Expand Down Expand Up @@ -97,7 +97,8 @@ using dnnl::memory;
using dnnl::primitive;
using dnnl::reorder;

void* GetDataFromTensor(const Tensor& tensor, dnnl::memory::data_type type) {
void* GetDataFromTensor(const phi::DenseTensor& tensor,
dnnl::memory::data_type type) {
switch (type) {
case dnnl::memory::data_type::f32:
return platform::to_void_cast(tensor.data<float>());
Expand All @@ -117,8 +118,8 @@ void* GetDataFromTensor(const Tensor& tensor, dnnl::memory::data_type type) {

void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
const OpKernelType& expected_kernel_type,
const Tensor& in,
Tensor* out) {
const phi::DenseTensor& in,
phi::DenseTensor* out) {
auto in_layout = kernel_type_for_var.data_layout_;
auto out_layout = expected_kernel_type.data_layout_;
auto place = expected_kernel_type.place_;
Expand All @@ -139,8 +140,8 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,

void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
DataLayout out_layout,
const Tensor& in,
Tensor* out,
const phi::DenseTensor& in,
phi::DenseTensor* out,
platform::Place place,
bool always_copy) {
// Set default as NCHW in case not specified
Expand Down
22 changes: 11 additions & 11 deletions paddle/fluid/framework/data_layout_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ namespace framework {
struct CastDataLayout {
CastDataLayout(const platform::DeviceContext* ctx,
const std::vector<int>& axis,
const framework::Tensor& in,
framework::Tensor* out)
const phi::DenseTensor& in,
phi::DenseTensor* out)
: in_(in), out_(out), ctx_(ctx), axis_(axis) {}

const framework::Tensor in_;
framework::Tensor* out_;
const phi::DenseTensor in_;
phi::DenseTensor* out_;
const platform::DeviceContext* ctx_;
const std::vector<int> axis_;

Expand Down Expand Up @@ -101,26 +101,26 @@ inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {

void innerTransDataLayoutFromMKLDNN(DataLayout in_layout,
DataLayout out_layout,
const Tensor& in,
Tensor* out,
const phi::DenseTensor& in,
phi::DenseTensor* out,
platform::Place place,
bool always_copy = false);

void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
const OpKernelType& expected_kernel_type,
const Tensor& in,
Tensor* out);
const phi::DenseTensor& in,
phi::DenseTensor* out);

void* GetDataFromTensor(const Tensor& tensor, MKLDNNDataType type);
void* GetDataFromTensor(const phi::DenseTensor& tensor, MKLDNNDataType type);

#endif

std::vector<int> GetAxis(const DataLayout& from, const DataLayout& to);

void TransDataLayout(const OpKernelType& kernel_type_for_var,
const OpKernelType& expected_kernel_type,
const Tensor& in,
Tensor* out);
const phi::DenseTensor& in,
phi::DenseTensor* out);

} // namespace framework
} // namespace paddle
8 changes: 4 additions & 4 deletions paddle/fluid/framework/data_layout_transform_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

TEST(DataTransform, DataLayoutFunction) {
auto place = paddle::platform::CPUPlace();
paddle::framework::Tensor in = paddle::framework::Tensor();
paddle::framework::Tensor out = paddle::framework::Tensor();
phi::DenseTensor in = phi::DenseTensor();
phi::DenseTensor out = phi::DenseTensor();
in.mutable_data<double>(phi::make_ddim({2, 3, 1, 2}), place);
in.set_layout(paddle::framework::DataLayout::kNHWC);

Expand Down Expand Up @@ -48,7 +48,7 @@ TEST(DataTransform, DataLayoutFunction) {
#ifdef PADDLE_WITH_MKLDNN
TEST(DataTransformBf16, GetDataFromTensorDNNL) {
auto place = paddle::platform::CPUPlace();
paddle::framework::Tensor in = paddle::framework::Tensor();
phi::DenseTensor in = phi::DenseTensor();
in.mutable_data<paddle::platform::bfloat16>(phi::make_ddim({2, 3, 1, 2}),
place);

Expand All @@ -61,7 +61,7 @@ TEST(DataTransformBf16, GetDataFromTensorDNNL) {

TEST(DataTransformInt32, GetDataFromTensorDNNL) {
auto place = paddle::platform::CPUPlace();
paddle::framework::Tensor in = paddle::framework::Tensor();
phi::DenseTensor in = phi::DenseTensor();
in.mutable_data<int32_t>(phi::make_ddim({2, 3, 1, 2}), place);

void* in_data =
Expand Down
14 changes: 7 additions & 7 deletions paddle/fluid/framework/data_transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,19 +31,19 @@ class Variable;
namespace paddle {
namespace framework {

static void PassTensorData(Tensor *from, Tensor *to) {
static void PassTensorData(phi::DenseTensor *from, phi::DenseTensor *to) {
to->ShareDataWith(*from);
*from = Tensor();
*from = phi::DenseTensor();
}

void TransformData(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor,
Tensor *output_tensor) {
const phi::DenseTensor &input_tensor,
phi::DenseTensor *output_tensor) {
bool transformed = false;
Tensor in;
phi::DenseTensor in;
in.ShareDataWith(input_tensor);
Tensor out;
phi::DenseTensor out;
const DataLayout lin = kernel_type_for_var.data_layout_;
const DataLayout lout = expected_kernel_type.data_layout_;
// do layout transform
Expand Down Expand Up @@ -120,7 +120,7 @@ void TransformData(const OpKernelType &expected_kernel_type,
}

void SetTensorToVariable(const Variable &in_var,
const Tensor &tensor,
const phi::DenseTensor &tensor,
Variable *out_var) {
if (in_var.IsType<LoDTensor>()) {
auto &in_lod_tensor = in_var.Get<LoDTensor>();
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/data_transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,14 +35,14 @@ class Variable;

void TransformData(const OpKernelType &expected_kernel_type,
const OpKernelType &kernel_type_for_var,
const Tensor &input_tensor,
Tensor *out);
const phi::DenseTensor &input_tensor,
phi::DenseTensor *out);

/**
* Set OutVar from InVar, except the tensor is shared with `tensor`
*/
void SetTensorToVariable(const Variable &in_var,
const Tensor &tensor,
const phi::DenseTensor &tensor,
Variable *out_var);
} // namespace framework
} // namespace paddle
6 changes: 2 additions & 4 deletions paddle/fluid/framework/data_type_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,12 @@
#include "paddle/fluid/framework/tensor.h"

TEST(DataType, float16) {
using paddle::framework::Tensor;
using paddle::platform::CPUPlace;
using paddle::platform::float16;
namespace f = paddle::framework;
f::proto::VarType::Type dtype = f::proto::VarType::FP16;

Tensor tensor;
phi::DenseTensor tensor;
CPUPlace cpu;
tensor.mutable_data(cpu, f::TransToPhiDataType(dtype));

Expand All @@ -43,13 +42,12 @@ TEST(DataType, float16) {
}

TEST(DataType, bfloat16) {
using paddle::framework::Tensor;
using paddle::platform::bfloat16;
using paddle::platform::CPUPlace;
namespace f = paddle::framework;
f::proto::VarType::Type dtype = f::proto::VarType::BF16;

Tensor tensor;
phi::DenseTensor tensor;
CPUPlace cpu;
tensor.mutable_data(cpu, f::TransToPhiDataType(dtype));

Expand Down
Loading