Skip to content

Commit

Permalink
delete useless annotation
Browse files Browse the repository at this point in the history
  • Loading branch information
YuanRisheng committed Nov 3, 2021
1 parent bfb723c commit 890dae6
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 37 deletions.
32 changes: 0 additions & 32 deletions paddle/fluid/operators/flatten_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,6 @@ class FlattenOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

// #ifdef PADDLE_WITH_MKLDNN
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
// #endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
};
Expand Down Expand Up @@ -157,14 +149,6 @@ class FlattenGradOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
auto input_data_type = framework::OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out"));

// #ifdef PADDLE_WITH_MKLDNN
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
// #endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
};
Expand Down Expand Up @@ -227,14 +211,6 @@ class Flatten2Op : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

// #ifdef PADDLE_WITH_MKLDNN
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
// #endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
};
Expand Down Expand Up @@ -285,14 +261,6 @@ class Flatten2GradOp : public framework::OperatorWithKernel {
const framework::ExecutionContext &ctx) const override {
auto input_data_type = framework::OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out"));

// #ifdef PADDLE_WITH_MKLDNN
// if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
// return framework::OpKernelType(input_data_type, ctx.GetPlace(),
// framework::DataLayout::kMKLDNN,
// framework::LibraryType::kMKLDNN);
// }
// #endif
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
};
Expand Down
6 changes: 3 additions & 3 deletions paddle/pten/kernels/xpu/manipulation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
namespace pten {

template <typename T>
void Flatten(const XPUDeviceContext& dev_ctx,
void Flatten(const XPUContext& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis,
Expand All @@ -33,14 +33,14 @@ void Flatten(const XPUDeviceContext& dev_ctx,
// Output Tensor,
// is there a more flexible way to deal with this case?
template <typename T>
void FlattenWithXShape(const XPUDeviceContext& dev_ctx,
void FlattenWithXShape(const XPUContext& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis,
DenseTensor* out,
DenseTensor* xshape) {
Flatten<T>(dev_ctx, x, start_axis, stop_axis, out);
const auto& in_dims = x.meta().dims;
const auto& in_dims = x.dims();
std::vector<int64_t> xshape_dims(in_dims.size() + 1);
xshape_dims[0] = 0;
for (int i = 0; i < in_dims.size(); ++i) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/pten/kernels/xpu/manipulation.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ limitations under the License. */

namespace pten {

using XPUDeviceContext = paddle::platform::XPUDeviceContext;
using XPUContext = paddle::platform::XPUDeviceContext;

template <typename T>
void Flatten(const XPUDeviceContext& dev_ctx,
void Flatten(const XPUContext& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis,
Expand Down

0 comments on commit 890dae6

Please sign in to comment.