Skip to content

Commit

Permalink
[MLU] add fluid MLUOps prior_box (#46585)
Browse files Browse the repository at this point in the history
  • Loading branch information
cifar10 authored Oct 8, 2022
1 parent 146d70c commit ff37e48
Show file tree
Hide file tree
Showing 5 changed files with 386 additions and 1 deletion.
2 changes: 1 addition & 1 deletion paddle/fluid/operators/detection/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ if(WITH_XPU)
elseif(WITH_MLU)
detection_library(iou_similarity_op SRCS iou_similarity_op.cc
iou_similarity_op_mlu.cc)
detection_library(prior_box_op SRCS prior_box_op.cc)
detection_library(prior_box_op SRCS prior_box_op.cc prior_box_op_mlu.cc)
detection_library(yolo_box_op SRCS yolo_box_op.cc yolo_box_op_mlu.cc)
elseif(WITH_ASCEND_CL)
detection_library(iou_similarity_op SRCS iou_similarity_op.cc
Expand Down
104 changes: 104 additions & 0 deletions paddle/fluid/operators/detection/prior_box_op_mlu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/detection/prior_box_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h"

namespace paddle {
namespace operators {

template <typename T>
class PriorBoxMLUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<phi::DenseTensor>("Input");
auto* image = ctx.Input<phi::DenseTensor>("Image");
auto* boxes = ctx.Output<phi::DenseTensor>("Boxes");
auto* variances = ctx.Output<phi::DenseTensor>("Variances");
float step_w = ctx.Attr<float>("step_w");
float step_h = ctx.Attr<float>("step_h");
float offset = ctx.Attr<float>("offset");
bool clip = ctx.Attr<bool>("clip");
bool min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");

int im_width = image->dims()[3];
int im_height = image->dims()[2];
int width = input->dims()[3];
int height = input->dims()[2];

auto aspect_ratios = ctx.Attr<std::vector<float>>("aspect_ratios");
bool flip = ctx.Attr<bool>("flip");
std::vector<float> new_aspect_ratios;
ExpandAspectRatios(aspect_ratios, flip, &new_aspect_ratios);
auto& dev_ctx = ctx.template device_context<platform::MLUDeviceContext>();
phi::DenseTensor ratios;
paddle::framework::TensorFromVector(new_aspect_ratios, dev_ctx, &ratios);
MLUOpTensorDesc new_aspect_ratios_desc(ratios);

auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
phi::DenseTensor min;
paddle::framework::TensorFromVector(min_sizes, dev_ctx, &min);
MLUOpTensorDesc min_sizes_desc(min);

auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
phi::DenseTensor max;
paddle::framework::TensorFromVector(max_sizes, dev_ctx, &max);
MLUOpTensorDesc max_sizes_desc(max);

auto variances_attr = ctx.Attr<std::vector<float>>("variances");
phi::DenseTensor var_tensor;
paddle::framework::TensorFromVector(variances_attr, dev_ctx, &var_tensor);
MLUOpTensorDesc variances_attr_desc(var_tensor);

auto place = ctx.GetPlace();

boxes->mutable_data<T>(place);
variances->mutable_data<T>(place);

MLUOpTensorDesc var_desc(*variances);
MLUOpTensorDesc output_desc(*boxes);
MLUOP::OpPriorBox(ctx,
min_sizes_desc.get(),
GetBasePtr(&min),
new_aspect_ratios_desc.get(),
GetBasePtr(&ratios),
variances_attr_desc.get(),
GetBasePtr(&var_tensor),
max_sizes_desc.get(),
GetBasePtr(&max),
height,
width,
im_height,
im_width,
step_h,
step_w,
offset,
clip,
min_max_aspect_ratios_order,
output_desc.get(),
GetBasePtr(boxes),
var_desc.get(),
GetBasePtr(variances));
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
namespace plat = paddle::platform;

REGISTER_OP_MLU_KERNEL(prior_box, ops::PriorBoxMLUKernel<float>);
49 changes: 49 additions & 0 deletions paddle/fluid/operators/mlu/mlu_baseop.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5458,5 +5458,54 @@ MLURNNDesc::~MLURNNDesc() {
scores));
}

/* static */ void MLUOP::OpPriorBox(
const ExecutionContext& ctx,
const mluOpTensorDescriptor_t min_sizes_desc,
const void* min_sizes,
const mluOpTensorDescriptor_t aspect_ratios_desc,
const void* aspect_ratios,
const mluOpTensorDescriptor_t variances_desc,
const void* variances,
const mluOpTensorDescriptor_t max_sizes_desc,
const void* max_sizes,
const int height,
const int width,
const int im_height,
const int im_width,
const float step_h,
const float step_w,
const float offset,
const bool clip,
const bool min_max_aspect_ratios_order,
const mluOpTensorDescriptor_t output_desc,
void* output,
const mluOpTensorDescriptor_t var_desc,
void* var) {
mluOpHandle_t handle = GetMLUOpHandleFromCTX(ctx);

PADDLE_ENFORCE_MLU_SUCCESS(mluOpPriorBox(handle,
min_sizes_desc,
min_sizes,
aspect_ratios_desc,
aspect_ratios,
variances_desc,
variances,
max_sizes_desc,
max_sizes,
height,
width,
im_height,
im_width,
step_h,
step_w,
offset,
clip,
min_max_aspect_ratios_order,
output_desc,
output,
var_desc,
var));
}

} // namespace operators
} // namespace paddle
23 changes: 23 additions & 0 deletions paddle/fluid/operators/mlu/mlu_baseop.h
Original file line number Diff line number Diff line change
Expand Up @@ -2312,6 +2312,29 @@ class MLUOP {
void* boxes,
const mluOpTensorDescriptor_t scores_desc,
void* scores);

static void OpPriorBox(const ExecutionContext& ctx,
const mluOpTensorDescriptor_t min_sizes_desc,
const void* min_sizes,
const mluOpTensorDescriptor_t aspect_ratios_desc,
const void* aspect_ratios,
const mluOpTensorDescriptor_t variances_desc,
const void* variances,
const mluOpTensorDescriptor_t max_sizes_desc,
const void* max_sizes,
const int height,
const int width,
const int im_height,
const int im_width,
const float step_h,
const float step_w,
const float offset,
const bool clip,
const bool min_max_aspect_ratios_order,
const mluOpTensorDescriptor_t output_desc,
void* output,
const mluOpTensorDescriptor_t var_desc,
void* var);
};
const std::map<const std::string, std::pair<std::vector<int>, std::vector<int>>>
TransPermMap = {
Expand Down
Loading

0 comments on commit ff37e48

Please sign in to comment.