diff --git a/paddle/fluid/operators/detection/CMakeLists.txt b/paddle/fluid/operators/detection/CMakeLists.txt index c9bee1eb607059..d58c5c6e4b0b61 100644 --- a/paddle/fluid/operators/detection/CMakeLists.txt +++ b/paddle/fluid/operators/detection/CMakeLists.txt @@ -40,7 +40,6 @@ else() endif() detection_library(bipartite_match_op SRCS bipartite_match_op.cc) -detection_library(mine_hard_examples_op SRCS mine_hard_examples_op.cc) detection_library(anchor_generator_op SRCS anchor_generator_op.cc anchor_generator_op.cu) detection_library(polygon_box_transform_op SRCS polygon_box_transform_op.cc diff --git a/paddle/fluid/operators/detection/mine_hard_examples_op.cc b/paddle/fluid/operators/detection/mine_hard_examples_op.cc deleted file mode 100644 index 382705d2879e13..00000000000000 --- a/paddle/fluid/operators/detection/mine_hard_examples_op.cc +++ /dev/null @@ -1,412 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -enum MiningType { kNone = 0, kMaxNegative, kHardExample }; - -template -bool SortScoreDescend(const std::pair& pair1, - const std::pair& pair2) { - return pair1.first > pair2.first; -} - -inline bool IsEligibleMining(const MiningType mining_type, - const int match_idx, - const float match_dist, - const float neg_dist_threshold) { - if (mining_type == MiningType::kMaxNegative) { - return match_idx == -1 && match_dist < neg_dist_threshold; - } else if (mining_type == MiningType::kHardExample) { - return true; - } else { - return false; - } -} - -inline MiningType GetMiningType(std::string str) { - if (str == "max_negative") { - return MiningType::kMaxNegative; - } else if (str == "hard_example") { - return MiningType::kHardExample; - } else { - return MiningType::kNone; - } -} - -template -class MineHardExamplesKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto* in_cls_loss = ctx.Input("ClsLoss"); - auto* in_loc_loss = ctx.Input("LocLoss"); - auto* in_matched_indices = ctx.Input("MatchIndices"); - auto* in_match_dist = ctx.Input("MatchDist"); - float neg_pos_ratio = ctx.Attr("neg_pos_ratio"); - T neg_dist_threshold = - static_cast(ctx.Attr("neg_dist_threshold")); - int sample_size = ctx.Attr("sample_size"); - MiningType mining_type = - GetMiningType(ctx.Attr("mining_type")); - - auto out_neg_indices = ctx.Output("NegIndices"); - auto out_match_indices = - ctx.Output("UpdatedMatchIndices"); - - framework::TensorCopy( - *in_matched_indices, ctx.GetPlace(), out_match_indices); - - int batch_size = static_cast(in_matched_indices->dims()[0]); - int prior_num = static_cast(in_matched_indices->dims()[1]); - - auto match_indices = framework::EigenMatrix::From(*in_matched_indices); - - auto match_indices_et = - framework::EigenMatrix::From(*out_match_indices); - - auto match_dist = framework::EigenMatrix::From(*in_match_dist); - - const T* cls_loss = in_cls_loss->data(); - const T* loc_loss = nullptr; - if (in_loc_loss) { - loc_loss = in_loc_loss->data(); - } - - std::vector> all_neg_indices; - std::vector batch_starts = {0}; - for (int n = 0; n < batch_size; ++n) { - std::vector> loss_idx; - int neg_sel = 0; - for (int m = 0; m < prior_num; ++m) { - if (IsEligibleMining(mining_type, - match_indices(n, m), - match_dist(n, m), - neg_dist_threshold)) { - T loss = cls_loss[n * prior_num + m]; - if (mining_type == MiningType::kHardExample && loc_loss != nullptr) { - loss = cls_loss[n * prior_num + m] + loc_loss[n * prior_num + m]; - } - loss_idx.push_back(std::make_pair(loss, m)); - ++neg_sel; - } - } - - if (mining_type == MiningType::kMaxNegative) { - int num_pos = 0; - for (int m = 0; m < prior_num; ++m) { - if (match_indices(n, m) != -1) ++num_pos; - } - neg_sel = std::min(static_cast(num_pos * neg_pos_ratio), // NOLINT - neg_sel); - } else if (mining_type == MiningType::kHardExample) { - neg_sel = std::min(sample_size, neg_sel); - } - - std::sort(loss_idx.begin(), loss_idx.end(), SortScoreDescend); - std::set sel_indices; - std::vector neg_indices; - std::transform(loss_idx.begin(), - loss_idx.begin() + neg_sel, - std::inserter(sel_indices, sel_indices.begin()), - [](std::pair& l) -> int { - return static_cast(l.second); - }); - - if (mining_type == MiningType::kHardExample) { - for (int m = 0; m < prior_num; ++m) { - if (match_indices(n, m) > -1) { - if (sel_indices.find(m) == sel_indices.end()) { - match_indices_et(n, m) = -1; - } - } else { - if (sel_indices.find(m) != sel_indices.end()) { - neg_indices.push_back(m); - } - } - } - } else { - neg_indices.resize(sel_indices.size()); - std::copy(sel_indices.begin(), sel_indices.end(), neg_indices.begin()); - } - - all_neg_indices.push_back(neg_indices); - batch_starts.push_back(batch_starts.back() + neg_indices.size()); - } - - framework::LoD out_neg_indices_lod; - out_neg_indices_lod.emplace_back(batch_starts); - int neg_offset = 0; - auto neg_data = out_neg_indices->mutable_data( - common::make_ddim({static_cast(batch_starts.back()), 1}), - ctx.GetPlace()); - - for (auto neg_indices : all_neg_indices) { - std::copy(neg_indices.begin(), neg_indices.end(), neg_data + neg_offset); - neg_offset += static_cast(neg_indices.size()); - } - out_neg_indices->set_lod(out_neg_indices_lod); - return; - } -}; - -class MineHardExamplesOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK( - ctx->HasInput("ClsLoss"), "Input", "ClsLoss", "mine_hard_examples"); - OP_INOUT_CHECK(ctx->HasInput("MatchIndices"), - "Input", - "MatchIndices", - "mine_hard_examples"); - OP_INOUT_CHECK( - ctx->HasInput("MatchDist"), "Input", "MatchDist", "mine_hard_examples"); - OP_INOUT_CHECK(ctx->HasOutput("NegIndices"), - "Output", - "NegIndices", - "mine_hard_examples"); - OP_INOUT_CHECK(ctx->HasOutput("UpdatedMatchIndices"), - "Output", - "UpdatedMatchIndices", - "mine_hard_examples"); - - auto cls_loss_dims = ctx->GetInputDim("ClsLoss"); - auto idx_dims = ctx->GetInputDim("MatchIndices"); - auto dis_dims = ctx->GetInputDim("MatchDist"); - - PADDLE_ENFORCE_EQ(cls_loss_dims.size(), - 2UL, - phi::errors::InvalidArgument( - "The shape of ClsLoss is [N, Np]. But received %d.", - cls_loss_dims.size())); - PADDLE_ENFORCE_EQ( - idx_dims.size(), - 2UL, - phi::errors::InvalidArgument( - "The shape of MatchIndices is [N, Np]. But received %d.", - idx_dims.size())); - PADDLE_ENFORCE_EQ(dis_dims.size(), - 2UL, - phi::errors::InvalidArgument( - "The shape of MatchDist is [N, Np]. But received %d.", - dis_dims.size())); - - if (ctx->HasInput("LocLoss")) { - auto loc_loss_dims = ctx->GetInputDim("LocLoss"); - PADDLE_ENFORCE_EQ(loc_loss_dims.size(), - 2UL, - phi::errors::InvalidArgument( - "The shape of LocLoss is [N, Np]. But received %d.", - loc_loss_dims.size())); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(cls_loss_dims[0], - loc_loss_dims[0], - phi::errors::InvalidArgument( - "Batch size of ClsLoss and LocLoss must be the " - "same. But received batch size of ClsLoss was " - "%d, batch size of LocLoss was %d.", - cls_loss_dims[0], - loc_loss_dims[0])); - PADDLE_ENFORCE_EQ(cls_loss_dims[1], - loc_loss_dims[1], - phi::errors::InvalidArgument( - "Prior box number of ClsLoss and LocLoss must be " - "the same. But received box number of ClsLoss " - "was %d, box number of LocLoss was %d.", - cls_loss_dims[1], - loc_loss_dims[1])); - } - } - - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(cls_loss_dims[0], - idx_dims[0], - phi::errors::InvalidArgument( - "Batch size of ClsLoss and MatchIndices must be " - "the same. But received batch size of ClsLoss was " - "%d, batch size of MatchIndices was %d.", - cls_loss_dims[0], - idx_dims[0])); - PADDLE_ENFORCE_EQ( - cls_loss_dims[1], - idx_dims[1], - phi::errors::InvalidArgument( - "Prior box number of ClsLoss and " - "MatchIndices must be the same. But received box number of " - "ClsLoss was %d, box number of MatchIndices was %d.", - cls_loss_dims[1], - idx_dims[1])); - - PADDLE_ENFORCE_EQ(cls_loss_dims[0], - dis_dims[0], - phi::errors::InvalidArgument( - "Batch size of ClsLoss and MatchDist must be the " - "same. But received batch size of ClsLoss was %d, " - "batch size of MatchDist was %d.", - cls_loss_dims[0], - dis_dims[0])); - PADDLE_ENFORCE_EQ(cls_loss_dims[1], - idx_dims[1], - phi::errors::InvalidArgument( - "Prior box number of ClsLoss and MatchDist must be " - "the same. But received box number of ClsLoss was " - "%d, box number of MatchDist was %d.", - cls_loss_dims[1], - idx_dims[1])); - } - - auto mining_type = - GetMiningType(ctx->Attrs().Get("mining_type")); - - PADDLE_ENFORCE_NE(mining_type, - MiningType::kNone, - phi::errors::InvalidArgument( - "mining_type must be hard_example or max_negative")); - - if (mining_type == MiningType::kMaxNegative) { - auto neg_pos_ratio = ctx->Attrs().Get("neg_pos_ratio"); - auto neg_dist_threshold = ctx->Attrs().Get("neg_dist_threshold"); - PADDLE_ENFORCE_GT(neg_pos_ratio, - 0.0f, - phi::errors::InvalidArgument( - "neg_pos_ratio must greater than zero in " - "max_negative mode. But received %f.", - neg_pos_ratio)); - PADDLE_ENFORCE_LT(neg_dist_threshold, - 1.0f, - phi::errors::InvalidArgument( - "neg_dist_threshold must less than one in " - "max_negative mode. But received %f.", - neg_dist_threshold)); - PADDLE_ENFORCE_GT(neg_dist_threshold, - 0.0f, - phi::errors::InvalidArgument( - "neg_dist_threshold must greater " - "than zero in max_negative mode. But received %f.", - neg_dist_threshold)); - } else if (mining_type == MiningType::kHardExample) { - auto sample_size = ctx->Attrs().Get("sample_size"); - PADDLE_ENFORCE_GT( - sample_size, - 0, - phi::errors::InvalidArgument("sample_size must greater than zero in " - "hard_example mode. But received %d.", - sample_size)); - } - - ctx->SetOutputDim("UpdatedMatchIndices", idx_dims); - // The first dimension of NegIndices will be set correcttly in Compute. - ctx->SetOutputDim("NegIndices", {-1, 1}); - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return phi::KernelKey( - OperatorWithKernel::IndicateVarDataType(ctx, "ClsLoss"), - platform::CPUPlace()); - } -}; - -class MineHardExamplesOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput( - "ClsLoss", - "(Tensor, default Tensor), The classification loss with shape " - "[N, Np], N is the batch size and Np is the number of prior box."); - AddInput("LocLoss", - "(Tensor, optional, default Tensor), The localization loss " - "with shape [N, Np], N is the batch size and Np is the number of " - "prior box.") - .AsDispensable(); - AddInput("MatchIndices", - "(Tensor, Tensor), Matched indices with shape [N, Np], N is " - "the batch size and Np is the number of prior box. " - "MatchIndices[i][j] equal -1 means the j-th prior box in i-th " - "instance does not match any entity, otherwise means it is " - "matched to row."); - AddInput("MatchDist", - "(Tensor, default Tensor) Matched indices with shape [N, " - "Np], N is the batch size and Np is the number of prior box."); - AddAttr("neg_pos_ratio", - "(float) The ratio of the negative box to the positive " - "box. Use only when mining_type is max_negative.") - .SetDefault(1.0); - AddAttr("neg_dist_threshold", - "(float) The negative overlap upper bound for the unmatched " - "predictions. Use only when mining_type is max_negative.") - .SetDefault(0.5); - AddAttr("sample_size", - "(float) The max sample size of negative box. Use only when " - "mining_type is hard_example.") - .SetDefault(0); - AddAttr("mining_type", - "(float) The mining algorithm name, the value is " - "hard_example or max_negative.") - .SetDefault("max_negative") - .InEnum({"hard_example", "max_negative"}); - - AddOutput("NegIndices", - "(phi::DenseTensor) The output of negative example indices. " - "a phi::DenseTensor " - "with shape [Neg, 1]. The size of lod[0] minus 1 is batch size, " - "and each element is the prior box index. " - "For example, the batch size is 2, the lod is [[0, 1, 2]], " - "the sample 0's box 1(MatchIndices[0][1]) is selected, " - "and sample 1's box 0 is selected. The output NegIndices is " - "[[1], [0]]."); - - AddOutput("UpdatedMatchIndices", - "(Tensor) The output of updated MatchIndices, a tensor with " - "shape [N, Np]. Only update when mining_type is " - "hard_example. The input MatchIndices elements will be update to " - "-1 when it is not in the candidate high loss list of negative " - "examples."); - - AddComment(R"DOC( -Mine hard examples Operator. -This operator implements hard example mining to select a subset of negative box indices. -For each image, selects the box with highest losses. subject to the condition that the -box cannot have an Matcht > neg_dist_threshold when mining_type is max_negative. -The selected number is min(sample_size, max_negative_box_number) when mining_type is -hard_example, or min(neg_pos_ratio * positive_box_number, max_negative_box_number) -when mining_type is max_negative, where the max_negative_box_number is the count of -MatchIndices elements with value -1. -)DOC"); - } -}; -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OPERATOR( - mine_hard_examples, - ops::MineHardExamplesOp, - ops::MineHardExamplesOpMaker, - paddle::framework::EmptyGradOpMaker, - paddle::framework::EmptyGradOpMaker); - -PD_REGISTER_STRUCT_KERNEL(mine_hard_examples, - CPU, - ALL_LAYOUT, - ops::MineHardExamplesKernel, - float, - double) {} diff --git a/test/legacy_test/test_mine_hard_examples_op.py b/test/legacy_test/test_mine_hard_examples_op.py deleted file mode 100644 index f3f1ec4d76ad7c..00000000000000 --- a/test/legacy_test/test_mine_hard_examples_op.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from op_test import OpTest - - -class TestMineHardExamplesOp(OpTest): - def set_data(self): - self.init_test_data() - self.inputs = { - 'ClsLoss': self.cls_loss, - 'LocLoss': self.loc_loss, - 'MatchIndices': self.match_indices, - 'MatchDist': self.match_dis, - } - - self.attrs = { - 'neg_pos_ratio': self.neg_pos_ratio, - 'neg_overlap': self.neg_overlap, - 'sample_size': self.sample_size, - 'mining_type': self.mining_type, - } - - self.outputs = { - 'NegIndices': (self.neg_indices, self.neg_indices_lod), - 'UpdatedMatchIndices': self.updated_match_indices, - } - - def test_check_output(self): - # NODE(yjjiang11): This op will be deprecated. - self.check_output(check_dygraph=False) - - def test_check_grad(self): - return - - def setUp(self): - self.op_type = "mine_hard_examples" - self.set_data() - - def init_test_data(self): - self.neg_pos_ratio = 1.0 - self.neg_overlap = 0.5 - self.sample_size = 0 - self.mining_type = "max_negative" - self.cls_loss = np.array([[0.1, 0.1, 0.3], [0.3, 0.1, 0.1]]).astype( - 'float64' - ) - - self.loc_loss = np.array([[0.1, 0.2, 0.3], [0.3, 0.4, 0.1]]).astype( - 'float64' - ) - - self.match_dis = np.array([[0.2, 0.4, 0.8], [0.1, 0.9, 0.3]]).astype( - 'float64' - ) - - self.match_indices = np.array([[0, -1, -1], [-1, 0, -1]]).astype( - 'int32' - ) - - self.updated_match_indices = self.match_indices - - self.neg_indices_lod = [[1, 1]] - self.neg_indices = np.array([[1], [0]]).astype('int32') - - -class TestMineHardExamplesOpHardExample(TestMineHardExamplesOp): - def init_test_data(self): - super().init_test_data() - self.mining_type = "hard_example" - self.sample_size = 2 - - self.cls_loss = np.array([[0.5, 0.1, 0.3], [0.3, 0.1, 0.1]]).astype( - 'float64' - ) - - self.loc_loss = np.array([[0.2, 0.2, 0.3], [0.3, 0.1, 0.2]]).astype( - 'float64' - ) - - self.match_indices = np.array([[0, -1, -1], [-1, 0, -1]]).astype( - 'int32' - ) - - self.updated_match_indices = np.array( - [[0, -1, -1], [-1, -1, -1]] - ).astype('int32') - - self.neg_indices_lod = [[1, 2]] - self.neg_indices = np.array([[2], [0], [2]]).astype('int32') - - -if __name__ == '__main__': - unittest.main() diff --git a/test/white_list/compile_vs_runtime_white_list.py b/test/white_list/compile_vs_runtime_white_list.py index 1c3959cdae11f3..83767fae1484f4 100644 --- a/test/white_list/compile_vs_runtime_white_list.py +++ b/test/white_list/compile_vs_runtime_white_list.py @@ -22,7 +22,6 @@ 'sequence_pool', 'sequence_slice', 'generate_proposals', - 'mine_hard_examples', 'retinanet_detection_output', 'ctc_align', 'fusion_seqpool_cvm_concat', diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index db192979cee608..f5904c563ffa33 100755 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -343,7 +343,6 @@ 'test_fleet_rolemaker_new', 'test_imperative_base', 'dist_multi_trainer_test', - 'test_mine_hard_examples_op', 'test_post_training_quantization_lstm_model', 'aes_cipher_test', 'test_analyzer_zerocopytensor_tensor', @@ -1752,7 +1751,6 @@ 'test_mkldnn_conv_concat_relu_mkldnn_fuse_pass', 'test_mkldnn_conv_bias_fuse_pass', 'test_mkldnn_conv_activation_fuse_pass', - 'test_mine_hard_examples_op', 'test_memory_usage', 'test_matrix_nms_op', 'test_matmul_transpose_reshape_fuse_pass', diff --git a/tools/static_mode_white_list.py b/tools/static_mode_white_list.py index 48f7178fa23dca..374737e21536d6 100755 --- a/tools/static_mode_white_list.py +++ b/tools/static_mode_white_list.py @@ -316,7 +316,6 @@ 'test_memory_usage', 'test_merge_ids_op', 'test_meshgrid_op', - 'test_mine_hard_examples_op', 'test_minus_op', 'test_mish_op', 'test_modified_huber_loss_op',