Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace LoDTensor with phi::DenseTensor in fluid\operators #48417

Merged
merged 16 commits into from
Nov 29, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 11 additions & 9 deletions paddle/fluid/operators/array_to_lod_tensor_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ class ArrayToLoDTensorOp : public framework::OperatorBase {
return table_items[a].index < table_items[b].index;
});

// Build LoDTensor `out`
// Build phi::DenseTensor `out`
framework::LoD *out_lod = out->mutable_lod();
out_lod->clear();
auto prefix_lod = rank_table.coarse_lod();
Expand Down Expand Up @@ -215,16 +215,18 @@ class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("X",
"(std::vector<LodTensor>) A vector of tensors that is going to "
"be casted to a big LoDTensor.");
"be casted to a big phi::DenseTensor.");
AddInput("RankTable",
"(LoDRankTable) RankTable provides the coarse lod information to "
"build the output LoDTensor. See "
"build the output phi::DenseTensor. See "
"'paddle/framework/lod_rank_table.h' for more details.");
AddOutput("Out", "(LoDTensor) The LoDTensor formed by input tensor array.");
AddOutput("Out",
"(phi::DenseTensor) The phi::DenseTensor formed by input tensor "
"array.");
AddComment(
R"DOC(This Op build a big LoDTensor from a std::vector<LoDTensor>
R"DOC(This Op build a big phi::DenseTensor from a std::vector<phi::DenseTensor>
and a LoDRankTable. It is supposed to be used in getting dynamic RNN's
outputs back to a normal LoDTensor. The std::vector<LoDTensor>
outputs back to a normal phi::DenseTensor. The std::vector<phi::DenseTensor>
would be the output of RNN Op and the LoDRankTable would be build
with RNN's input.)DOC");
}
Expand All @@ -247,9 +249,9 @@ class ArrayToLoDTensorInferShape : public framework::InferShapeBase {
// detail kernel implementation.
context->SetOutputDim("Out", context->GetInputDim("X"));

// The output LoDTensor's lod_level should be input X's lod_level + 1.
// For compile-time, we call SetLoDLevel to set output's lod_level.
// For runtime, output LoDTensor's lod is determined by input X's lod and
// The output phi::DenseTensor's lod_level should be input X's lod_level
// + 1. For compile-time, we call SetLoDLevel to set output's lod_level. For
// runtime, output phi::DenseTensor's lod is determined by input X's lod and
// the level specified by input RandTable.
// We cannot get X's detail lod and RankTable's level in this function, so
// leave this work to the detail kernel implementation.
Expand Down
6 changes: 2 additions & 4 deletions paddle/fluid/operators/assert_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,6 @@ const char kSummarize[] = "summarize";
namespace paddle {
namespace operators {

using LoDTensor = phi::DenseTensor;

class AssertOp : public framework::OperatorBase {
public:
AssertOp(const std::string &type,
Expand All @@ -58,7 +56,7 @@ class AssertOp : public framework::OperatorBase {
PADDLE_ENFORCE_NOT_NULL(cond_var_ptr,
platform::errors::NotFound(
"Input(Condition) of AssertOp is not found."));
const LoDTensor &cond = cond_var_ptr->Get<LoDTensor>();
const phi::DenseTensor &cond = cond_var_ptr->Get<phi::DenseTensor>();
PADDLE_ENFORCE_EQ(
cond.dims(),
phi::make_ddim({1}),
Expand All @@ -78,7 +76,7 @@ class AssertOp : public framework::OperatorBase {
const std::vector<std::string> &x_names = Inputs(kData);
for (const std::string &name : x_names) {
const framework::Variable *x_var_ptr = scope.FindVar(name);
const phi::DenseTensor &x_tensor = x_var_ptr->Get<LoDTensor>();
const phi::DenseTensor &x_tensor = x_var_ptr->Get<phi::DenseTensor>();
formatter.Print(x_tensor, name);
}

Expand Down
13 changes: 8 additions & 5 deletions paddle/fluid/operators/assign_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,16 +79,19 @@ class AssignInferVarType : public framework::VarTypeInference {
class AssignOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(LoDTensor, SelectedRows or LoDTensorArray) The input variable "
"could be LoDTensor, SelectedRows or LoDTensorArray.")
AddInput(
"X",
"(phi::DenseTensor, SelectedRows or phi::DenseTensorArray) The input "
"variable "
"could be phi::DenseTensor, SelectedRows or phi::DenseTensorArray.")
.AsDispensable();
AddOutput("Out",
"(LoDTensor, SelectedRows or LoDTensorArray) The type of output "
"(phi::DenseTensor, SelectedRows or phi::DenseTensorArray) The "
"type of output "
"is the same as input X.");
AddComment(R"DOC(Assign Operator

Out = X, when type in [LoDTensor/SelectedRows/LoDTensorArray]
Out = X, when type in [phi::DenseTensor/SelectedRows/phi::DenseTensorArray]
raise error if the type is not listed above.
)DOC");
}
Expand Down
11 changes: 6 additions & 5 deletions paddle/fluid/operators/assign_pos_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -59,13 +59,14 @@ class AssignPosCUDAKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& context) const override {
// assign pos decides which tokens should be fetched belong to specially
// counter orderingly.
auto cum_count = context.Input<LoDTensor>(
auto cum_count = context.Input<phi::DenseTensor>(
"cum_count"); // (counter number) int32 | int64
auto numbers =
context.Input<LoDTensor>("X"); // (batch_size * seq_len, topk) int32
auto numbers = context.Input<phi::DenseTensor>(
"X"); // (batch_size * seq_len, topk) int32
auto eff_num_len =
context.Input<LoDTensor>("eff_num_len"); // (sum(cum_count))
auto out = context.Output<LoDTensor>("Out"); // (cum_count) value ranges
context.Input<phi::DenseTensor>("eff_num_len"); // (sum(cum_count))
auto out =
context.Output<phi::DenseTensor>("Out"); // (cum_count) value ranges
// from 0 to batch_size *
// seq_len * topk
auto place = context.GetPlace();
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/operators/assign_pos_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ limitations under the License. */
namespace paddle {
namespace operators {

using LoDTensor = phi::DenseTensor;

template <typename T>
class AssignPosOpCPUKernel : public framework::OpKernel<T> {
public:
Expand Down
31 changes: 17 additions & 14 deletions paddle/fluid/operators/attention_lstm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -205,11 +205,12 @@ framework::OpKernelType AttentionLSTMOp::GetExpectedKernelType(
}

void AttentionLSTMOpMaker::Make() {
AddInput("X",
"(LoDTensor) the input is a LodTensor, which support "
"variable-time length input sequence. The underlying tensor in "
"this LoDTensor is a matrix with shape (T X M), where T is the "
"total time steps in this mini-batch, M is the dim size of x.");
AddInput(
"X",
"(phi::DenseTensor) the input is a LodTensor, which support "
"variable-time length input sequence. The underlying tensor in "
"this phi::DenseTensor is a matrix with shape (T X M), where T is the "
"total time steps in this mini-batch, M is the dim size of x.");
AddInput("C0",
"(Tensor) LSTM C0"
"This is a tensor with shape (N x D), where N is the batch size, D "
Expand Down Expand Up @@ -247,12 +248,14 @@ void AttentionLSTMOpMaker::Make() {
"Note: we should add the bias of hidden and context accorindg to "
"the same gate: "
"{B_forget, B_input, B_output, B_cell}");
AddOutput("Hidden",
"(LoDTensor) (same as LSTMOp) the hidden state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`.");
AddOutput("Cell",
"(LoDTensor) (same as LSTMOp) the cell state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`.");
AddOutput(
"Hidden",
"(phi::DenseTensor) (same as LSTMOp) the hidden state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`.");
AddOutput(
"Cell",
"(phi::DenseTensor) (same as LSTMOp) the cell state of LSTM operator. "
"The shape is (T x D), and lod is the same with the `Input`.");
AddOutput("AttentionedX",
"(Tensor) shape is (T x 1), the result after X * AttentionWeight,"
" where T is the total time steps in this mini-batch,"
Expand Down Expand Up @@ -339,7 +342,7 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
void Compute(const framework::ExecutionContext& ctx) const override {
using DeviceContext = phi::CPUContext;

auto* x = ctx.Input<LoDTensor>("X");
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* h0 = ctx.Input<phi::DenseTensor>("H0");
auto* c0 = ctx.Input<phi::DenseTensor>("C0");
auto* atten_w = ctx.Input<phi::DenseTensor>("AttentionWeight");
Expand All @@ -350,8 +353,8 @@ class AttentionLSTMKernel : public framework::OpKernel<T> {
auto* lstm_w = ctx.Input<phi::DenseTensor>("LSTMWeight");
auto* lstm_b = ctx.Input<phi::DenseTensor>("LSTMBias");

auto* hidden_out = ctx.Output<LoDTensor>("Hidden");
auto* cell_out = ctx.Output<LoDTensor>("Cell");
auto* hidden_out = ctx.Output<phi::DenseTensor>("Hidden");
auto* cell_out = ctx.Output<phi::DenseTensor>("Cell");
auto* atted_x = ctx.Output<phi::DenseTensor>("AttentionedX");
auto* fc_out = ctx.Output<phi::DenseTensor>("AttentionFCOut");
auto* lstm_x = ctx.Output<phi::DenseTensor>("LSTMX");
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/operators/attention_lstm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ limitations under the License. */
namespace paddle {
namespace operators {

using LoDTensor = phi::DenseTensor;
using Tensor = phi::DenseTensor;

class AttentionLSTMOp : public framework::OperatorWithKernel {
Expand Down
8 changes: 4 additions & 4 deletions paddle/fluid/operators/batch_norm_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,8 @@ framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
const Tensor *t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down Expand Up @@ -525,8 +525,8 @@ framework::OpKernelType BatchNormDoubleGradOp::GetExpectedKernelType(
const Tensor *t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
} else if (var->IsType<phi::DenseTensor>()) {
t = &var->Get<phi::DenseTensor>();
}
if (t == nullptr) {
PADDLE_THROW(
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/operators/batch_norm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ namespace paddle {
namespace operators {

using Tensor = phi::DenseTensor;
using LoDTensor = phi::DenseTensor;
using DataLayout = phi::DataLayout;

template <typename T>
Expand Down
14 changes: 8 additions & 6 deletions paddle/fluid/operators/beam_search_decode_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ namespace operators {
struct BeamSearchDecodeFunctor {
BeamSearchDecodeFunctor(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores,
LoDTensor* id_tensor,
LoDTensor* score_tensor,
phi::DenseTensor* id_tensor,
phi::DenseTensor* score_tensor,
size_t beam_size,
int end_id)
: beam_size_(beam_size),
Expand Down Expand Up @@ -119,8 +119,8 @@ struct BeamSearchDecodeFunctor {
const LoDTensorArray& step_scores_origin_;
LoDTensorArray step_ids_ = LoDTensorArray();
LoDTensorArray step_scores_ = LoDTensorArray();
LoDTensor* id_tensor_;
LoDTensor* score_tensor_;
phi::DenseTensor* id_tensor_;
phi::DenseTensor* score_tensor_;
};

template <typename DeviceContext, typename T>
Expand Down Expand Up @@ -164,8 +164,10 @@ class BeamSearchDecodeOpKernel : public framework::OpKernel<T> {
int end_id = context.Attr<int>("end_id");

// prepare output
LoDTensor* sentenceIds = context.Output<LoDTensor>("SentenceIds");
LoDTensor* sentenceScores = context.Output<LoDTensor>("SentenceScores");
phi::DenseTensor* sentenceIds =
context.Output<phi::DenseTensor>("SentenceIds");
phi::DenseTensor* sentenceScores =
context.Output<phi::DenseTensor>("SentenceScores");

BeamSearchDecodeFunctor bs(
*ids, *scores, sentenceIds, sentenceScores, beam_size, end_id);
Expand Down
21 changes: 10 additions & 11 deletions paddle/fluid/operators/beam_search_decode_op_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ limitations under the License. */

namespace paddle {
namespace operators {
using LoDTensor = phi::DenseTensor;
using LoDTensorArray = framework::LoDTensorArray;

// all the lod have 2 levels.
Expand Down Expand Up @@ -54,15 +53,15 @@ struct BeamSearchDecoder {
* with word score.
* Param:
* sentence_vector_list: sentence_vector for each source sentence.
* id_tensor: result LoDTensor for sentences of id.
* score_tensor: result LoDTensor for sentences of score.
* id_tensor: result phi::DenseTensor for sentences of id.
* score_tensor: result phi::DenseTensor for sentences of score.
* reverse: whether ids of sentence in sentence_vector_list is reversed
* sort_by_score: whether to sort hypotheses of each sentence by scores.
*/
void ConvertSentenceVectorToLodTensor(
std::vector<SentenceVector<T>> sentence_vector_list,
LoDTensor* id_tensor,
LoDTensor* score_tensor,
phi::DenseTensor* id_tensor,
phi::DenseTensor* score_tensor,
bool reverse = true,
bool sort_by_score = true) const;

Expand All @@ -72,8 +71,8 @@ struct BeamSearchDecoder {
*/
void Backtrace(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores,
LoDTensor* id_tensor,
LoDTensor* score_tensor) const;
phi::DenseTensor* id_tensor,
phi::DenseTensor* score_tensor) const;

size_t beam_size_;
int end_id_;
Expand All @@ -82,8 +81,8 @@ struct BeamSearchDecoder {
template <typename T>
void BeamSearchDecoder<T>::ConvertSentenceVectorToLodTensor(
std::vector<SentenceVector<T>> sentence_vector_list,
LoDTensor* id_tensor,
LoDTensor* score_tensor,
phi::DenseTensor* id_tensor,
phi::DenseTensor* score_tensor,
bool reverse,
bool sort_by_score) const {
size_t src_num = sentence_vector_list.size();
Expand Down Expand Up @@ -158,8 +157,8 @@ void BeamSearchDecoder<T>::ConvertSentenceVectorToLodTensor(
template <typename T>
void BeamSearchDecoder<T>::Backtrace(const LoDTensorArray& step_ids,
const LoDTensorArray& step_scores,
LoDTensor* id_tensor,
LoDTensor* score_tensor) const {
phi::DenseTensor* id_tensor,
phi::DenseTensor* score_tensor) const {
PADDLE_ENFORCE_NE(
step_ids.empty(),
true,
Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/operators/beam_search_decode_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ limitations under the License. */

using CPUPlace = paddle::platform::CPUPlace;
using LoD = paddle::framework::LoD;
using LoDTensor = phi::DenseTensor;
using LoDTensorArray = paddle::framework::LoDTensorArray;

template <typename T>
Expand Down Expand Up @@ -59,7 +58,7 @@ void GenerateExample(const std::vector<size_t>& level_0,
lod.push_back(level_1);

// Ids
LoDTensor tensor_id;
phi::DenseTensor tensor_id;
tensor_id.set_lod(lod);
tensor_id.Resize({static_cast<int64_t>(data.size())});
// malloc memory
Expand All @@ -69,7 +68,7 @@ void GenerateExample(const std::vector<size_t>& level_0,
}

// Scores
LoDTensor tensor_score;
phi::DenseTensor tensor_score;
tensor_score.set_lod(lod);
tensor_score.Resize({static_cast<int64_t>(data.size())});
// malloc memory
Expand Down Expand Up @@ -124,8 +123,8 @@ void BeamSearchDecodeTestFrame() {

BeamSearchDecoder<T> helper(2, 1); // beam_size = 2, end_id = 1

LoDTensor id_tensor;
LoDTensor score_tensor;
phi::DenseTensor id_tensor;
phi::DenseTensor score_tensor;
helper.Backtrace(ids, scores, &id_tensor, &score_tensor);

LoD lod = id_tensor.lod();
Expand Down
15 changes: 8 additions & 7 deletions paddle/fluid/operators/beam_search_decode_op_xpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,20 +62,21 @@ class BeamSearchDecodeXPUKernel : public framework::OpKernel<T> {
int end_id = context.Attr<int>("end_id");

// prepare output
LoDTensor* sentenceIds = nullptr;
LoDTensor* sentenceScores = nullptr;
phi::DenseTensor* sentenceIds = nullptr;
phi::DenseTensor* sentenceScores = nullptr;

LoDTensor* sentenceIds_temp = context.Output<LoDTensor>("SentenceIds");
LoDTensor* sentenceScores_temp =
context.Output<LoDTensor>("SentenceScores");
phi::DenseTensor* sentenceIds_temp =
context.Output<phi::DenseTensor>("SentenceIds");
phi::DenseTensor* sentenceScores_temp =
context.Output<phi::DenseTensor>("SentenceScores");

if (platform::is_xpu_place(ids->at(0).place())) {
sentenceIds = new LoDTensor();
sentenceIds = new phi::DenseTensor();
sentenceIds->set_lod(sentenceIds_temp->lod());
}

if (platform::is_xpu_place(ids->at(0).place())) {
sentenceScores = new LoDTensor();
sentenceScores = new phi::DenseTensor();
sentenceScores->set_lod(sentenceScores_temp->lod());
}

Expand Down
Loading