From 6e18b3708cd6552c2adf3c80e20dafa90559085d Mon Sep 17 00:00:00 2001 From: wanghuancoder Date: Tue, 10 Oct 2023 16:58:01 +0800 Subject: [PATCH] [CleanOps]del smooth_l1_loss_op op (#57893) * del smooth_l1_loss_op op --- paddle/fluid/operators/smooth_l1_loss_op.cc | 231 ------------------ paddle/fluid/operators/smooth_l1_loss_op.cu | 20 -- paddle/fluid/operators/smooth_l1_loss_op.h | 185 -------------- paddle/fluid/operators/unity_build_rule.cmake | 20 +- paddle/fluid/pybind/eager_generator.h | 1 - test/legacy_test/test_smooth_l1_loss_op.py | 118 --------- tools/parallel_UT_rule.py | 2 - tools/static_mode_white_list.py | 1 - 8 files changed, 5 insertions(+), 573 deletions(-) delete mode 100644 paddle/fluid/operators/smooth_l1_loss_op.cc delete mode 100644 paddle/fluid/operators/smooth_l1_loss_op.cu delete mode 100644 paddle/fluid/operators/smooth_l1_loss_op.h delete mode 100644 test/legacy_test/test_smooth_l1_loss_op.py diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc deleted file mode 100644 index c1abfcb3e436f1..00000000000000 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ /dev/null @@ -1,231 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/smooth_l1_loss_op.h" - -#include - -namespace paddle { -namespace operators { - -class SmoothL1LossOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SmoothL1Loss"); - OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "SmoothL1Loss"); - - auto x_dims = ctx->GetInputDim("X"); - auto y_dims = ctx->GetInputDim("Y"); - bool check = true; - if ((!ctx->IsRuntime()) && - (phi::product(x_dims) <= 0 || phi::product(y_dims) <= 0)) { - check = false; - } - if (check) { - PADDLE_ENFORCE_EQ( - x_dims, - y_dims, - platform::errors::InvalidArgument( - "Input(X) ans Input(Y) of SmoothL1LossOp should " - "have the same size, but received X dim is %s, Y dim is %s", - x_dims.to_str(), - y_dims.to_str())); - } - PADDLE_ENFORCE_GE(x_dims.size(), - 2, - platform::errors::InvalidArgument( - "The tensor rank of Input(X) of SmoothL1LossOp " - "should not be less than 2, but received %d.", - x_dims.size())); - if (ctx->HasInput("InsideWeight")) { - PADDLE_ENFORCE_EQ(ctx->HasInput("OutsideWeight"), - true, - platform::errors::InvalidArgument( - "If weights are provided, must specify both " - "inside and outside weights.")); - auto dims = ctx->GetInputDim("InsideWeight"); - bool check = true; - if ((!ctx->IsRuntime()) && - (phi::product(dims) <= 0 || phi::product(x_dims) <= 0)) { - check = false; - } - if (check) { - PADDLE_ENFORCE_EQ(x_dims, - dims, - platform::errors::InvalidArgument( - "Input(X) ans Input(InsideWeight) of " - "SmoothL1LossOp should have the same size, but " - "received X dim is %s, InsideWeight dim is %s", - x_dims.to_str(), - dims.to_str())); - } - - dims = ctx->GetInputDim("OutsideWeight"); - check = true; - if ((!ctx->IsRuntime()) && - (phi::product(dims) <= 0 || phi::product(x_dims) <= 0)) { - check = false; - } - if (check) { - PADDLE_ENFORCE_EQ(x_dims, - dims, - platform::errors::InvalidArgument( - "Input(X) ans Input(OutsideWeight) of " - "SmoothL1LossOp should have the same size, but " - "received X dim is %s, OutsideWeight dim is %s", - x_dims.to_str(), - dims.to_str())); - } - } - - ctx->SetOutputDim("Diff", x_dims); - // loss is a two-rank tensor - ctx->SetOutputDim("Out", {x_dims[0], 1}); - } -}; - -class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "(Tensor, default Tensor) A tensor with rank at least 2. " - "The input value of smooth l1 loss op with shape " - "[batch_size, dim1, ..., dimN]."); - AddInput("Y", - "(Tensor, default Tensor) A tensor with rank at least 2. " - "The target value of smooth l1 loss op with same shape as X."); - AddInput("InsideWeight", - "(Tensor, default Tensor) A tensor with rank at least 2. " - "This input is optional and should have same shape with X. " - "If provided, the result of (X - Y) will be multiplied " - "by this tensor element by element.") - .AsDispensable(); - AddInput("OutsideWeight", - "(Tensor, default Tensor) A tensor with rank at least 2. " - "This input is optional and should have same shape with X. " - "If provided, the out smooth l1 loss will be multiplied by this " - "tensor element by element.") - .AsDispensable(); - AddOutput("Diff", "Intermediate variable to cache InsideWeight * (X - Y).") - .AsIntermediate(); - AddOutput("Out", - "(Tensor, default Tensor) A tensor with rank be 2. " - "The output smooth l1 loss with shape [batch_size, 1]."); - AddAttr("sigma", - "Hyper parameter of smooth l1 loss op." - "A float scalar with default value 3.0.") - .SetDefault(1.0); - AddComment(R"DOC( -Smooth L1 Loss Operator. - -This operator computes the smooth l1 loss for X and Y. -The operator takes the first dimension of X and Y as batch size. -For each instance, it computes the smooth l1 loss element by element first -and then sums all the losses. So the shape of Out is [batch_size, 1]. - -The equation is: -$$ -Out_{\sigma}(X, Y)_i = \begin{cases} -0.5 * (\sigma * (X_i - Y_i)) ^ 2 -\quad |X_i - Y_i| \lt \frac{1} {{\sigma} ^ 2} \\ -\frac{|X_i - Y_i| - 0.5}{{\sigma}^2}, -\quad otherwise -\end{cases} -$$ - -In the above equation, $Out_{\sigma}(X, Y)_i$, $X_i$ and $Y_i$ represent the ith -element of Out, X and Y. - -)DOC"); - } -}; - -class SmoothL1LossGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - auto in_dims = ctx->GetInputDim("Diff"); - auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); - - PADDLE_ENFORCE_GE( - out_dims.size(), - 2, - platform::errors::InvalidArgument( - "The tensor rank of Input(Out@Grad) should be 2, but received %d.", - out_dims.size())); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ( - out_dims[0], - in_dims[0], - platform::errors::InvalidArgument( - "The 1st dimension of Input(Out@Grad) must be " - "same as input in SmoothL1LossGradOp, but received %d and %d.", - out_dims[0], - in_dims[0])); - PADDLE_ENFORCE_EQ(out_dims[1], - 1, - platform::errors::InvalidArgument( - "The 2nd dimension of Input(Out@Grad) must be 1 in " - "SmoothL1LossGradOp, but received %d.", - out_dims[1])); - } - - auto x_grad_name = framework::GradVarName("X"); - auto y_grad_name = framework::GradVarName("Y"); - if (ctx->HasOutput(x_grad_name)) { - ctx->SetOutputDim(x_grad_name, in_dims); - } - if (ctx->HasOutput(y_grad_name)) { - ctx->SetOutputDim(y_grad_name, in_dims); - } - } -}; - -template -class SmoothL1LossGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("smooth_l1_loss_grad"); - op->SetInput("InsideWeight", this->Input("InsideWeight")); - op->SetInput("OutsideWeight", this->Input("OutsideWeight")); - op->SetInput("Diff", this->Output("Diff")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - - op->SetAttrMap(this->Attrs()); - - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y")); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -REGISTER_OPERATOR(smooth_l1_loss, - ops::SmoothL1LossOp, - ops::SmoothL1LossOpMaker, - ops::SmoothL1LossGradMaker, - ops::SmoothL1LossGradMaker); -REGISTER_OPERATOR(smooth_l1_loss_grad, ops::SmoothL1LossGradOp); -PD_REGISTER_STRUCT_KERNEL( - smooth_l1_loss, CPU, ALL_LAYOUT, ops::SmoothL1LossKernel, float) {} -PD_REGISTER_STRUCT_KERNEL( - smooth_l1_loss_grad, CPU, ALL_LAYOUT, ops::SmoothL1LossGradKernel, float) {} diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cu b/paddle/fluid/operators/smooth_l1_loss_op.cu deleted file mode 100644 index 31d528855ccbef..00000000000000 --- a/paddle/fluid/operators/smooth_l1_loss_op.cu +++ /dev/null @@ -1,20 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#include "paddle/fluid/operators/smooth_l1_loss_op.h" - -namespace ops = paddle::operators; -PD_REGISTER_STRUCT_KERNEL( - smooth_l1_loss, GPU, ALL_LAYOUT, ops::SmoothL1LossKernel, float) {} -PD_REGISTER_STRUCT_KERNEL( - smooth_l1_loss_grad, GPU, ALL_LAYOUT, ops::SmoothL1LossGradKernel, float) {} diff --git a/paddle/fluid/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h deleted file mode 100644 index bc57087d931ae7..00000000000000 --- a/paddle/fluid/operators/smooth_l1_loss_op.h +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/hostdevice.h" - -namespace paddle { -namespace operators { - -template -using EigenVector = framework::EigenVector; -template -using EigenMatrix = framework::EigenMatrix; - -template -struct SmoothL1LossForward { - HOSTDEVICE SmoothL1LossForward(const T& sigma2) : sigma2(sigma2) {} - - HOSTDEVICE T operator()(const T& val) const { - T abs_val = std::abs(val); - if (abs_val < 1.0 / sigma2) { - return 0.5 * val * val * sigma2; - } else { - return abs_val - 0.5 / sigma2; - } - } - - T sigma2; -}; - -template -class SmoothL1LossKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* in0 = context.Input("X"); - auto* in1 = context.Input("Y"); - auto* in2 = context.Input("InsideWeight"); - auto* in3 = context.Input("OutsideWeight"); - auto* out0 = context.Output("Diff"); - auto* out1 = context.Output("Out"); - - out0->mutable_data(context.GetPlace()); - out1->mutable_data(context.GetPlace()); - auto* place = - context.template device_context().eigen_device(); - - auto sigma = static_cast(context.Attr("sigma")); - T sigma2 = sigma * sigma; - bool has_weight = (in2 != nullptr) && (in3 != nullptr); - - auto x = EigenVector::Flatten(*in0); - auto y = EigenVector::Flatten(*in1); - auto diff = EigenVector::Flatten(*out0); - - diff.device(*place) = x - y; - // multiply inside weight - if (has_weight) { - auto inside_weight = EigenVector::Flatten(*in2); - // cache diff, reused in bp - diff.device(*place) = diff * inside_weight; - } - - auto in_counts = in0->numel(); - phi::DenseTensor ptensor_errors; - ptensor_errors.mutable_data({static_cast(in_counts)}, - context.GetPlace()); - auto errors = EigenVector::Flatten(ptensor_errors); - // apply smooth l1 forward - errors.device(*place) = diff.unaryExpr(SmoothL1LossForward(sigma2)); - - // multiply outside weight - if (has_weight) { - auto outside_weight = EigenVector::Flatten(*in3); - errors.device(*place) = errors * outside_weight; - } - auto loss = EigenVector::Flatten(*out1); - // first dimension of 'X' is the number of samples - auto mat_dims = - phi::make_ddim({static_cast(in0->dims()[0]), - static_cast(in_counts / in0->dims()[0])}); - auto errors_mat_view = EigenMatrix::From(ptensor_errors, mat_dims); - loss.device(*place) = errors_mat_view.sum(Eigen::array({{1}})); - } -}; - -template -struct SmoothL1LossBackward { - HOSTDEVICE SmoothL1LossBackward(const T& sigma2) : sigma2(sigma2) {} - - HOSTDEVICE T operator()(const T& val) const { - T abs_val = std::abs(val); - if (abs_val < 1.0 / sigma2) { - return sigma2 * val; - } else { - return (0 < val) - (val < 0); - } - } - - T sigma2; -}; - -template -class SmoothL1LossGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* in0 = context.Input("InsideWeight"); - auto* in1 = context.Input("OutsideWeight"); - auto* in2 = context.Input("Diff"); - auto* og = context.Input(framework::GradVarName("Out")); - auto sigma = static_cast(context.Attr("sigma")); - T sigma2 = sigma * sigma; - bool has_weight = (in0 != nullptr) && (in1 != nullptr); - - auto* place = - context.template device_context().eigen_device(); - - auto in_dims = in2->dims(); - auto counts = in2->numel(); - auto cols = counts / in_dims[0]; - auto mat_dims = - phi::make_ddim({static_cast(in_dims[0]), static_cast(cols)}); - - phi::DenseTensor ptensor_diff; - ptensor_diff.mutable_data({static_cast(counts)}, - context.GetPlace()); - auto diff = EigenVector::Flatten(ptensor_diff); - // apply smooth l1 backwoard - diff.device(*place) = EigenVector::Flatten(*in2).unaryExpr( - SmoothL1LossBackward(sigma2)); - - // compute weights - phi::DenseTensor ptensor_weights; - ptensor_weights.mutable_data(mat_dims, context.GetPlace()); - auto weights = EigenMatrix::From(ptensor_weights); - // initialize to 1.0 - weights.device(*place) = weights.constant(static_cast(1.0)); - if (has_weight) { - auto inside_weight = EigenMatrix::From(*in0, mat_dims); - auto outside_weight = EigenMatrix::From(*in1, mat_dims); - weights.device(*place) = inside_weight * outside_weight; - } - - // compute gradients - auto out_grad = EigenMatrix::From(*og); - auto diff_mat_view = EigenMatrix::From(ptensor_diff, mat_dims); - auto gradients = out_grad.broadcast( - Eigen::array({{1, static_cast(cols)}})) * - weights * diff_mat_view; - - auto* out0 = context.Output(framework::GradVarName("X")); - auto* out1 = context.Output(framework::GradVarName("Y")); - - if (out0) { - out0->mutable_data(context.GetPlace()); - auto x_grad = EigenMatrix::From(*out0, mat_dims); - x_grad.device(*place) = gradients; - } - - if (out1) { - out1->mutable_data(context.GetPlace()); - auto y_grad = EigenMatrix::From(*out1, mat_dims); - y_grad.device(*place) = -1 * gradients; - } - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/unity_build_rule.cmake b/paddle/fluid/operators/unity_build_rule.cmake index 8f4b64125479a2..2e1b6f86d6370c 100644 --- a/paddle/fluid/operators/unity_build_rule.cmake +++ b/paddle/fluid/operators/unity_build_rule.cmake @@ -276,7 +276,6 @@ register_unity_group( unfold_op.cc) register_unity_group( cc - smooth_l1_loss_op.cc uniform_random_batch_size_like_op.cc unique_op.cc unique_with_counts_op.cc @@ -421,12 +420,8 @@ register_unity_group( rank_loss_op.cu real_op.cu) register_unity_group( - cu - log_loss_op.cu - lookup_table_v2_op.cu - masked_select_op.cu - shuffle_channel_op.cu - softmax_cudnn_op.cu) + cu log_loss_op.cu lookup_table_v2_op.cu masked_select_op.cu + shuffle_channel_op.cu softmax_cudnn_op.cu) register_unity_group( cu dequantize_log_op.cu @@ -492,15 +487,10 @@ register_unity_group( arg_max_op.cu) register_unity_group(cu row_conv_op.cu tril_triu_op.cu unfold_op.cu arg_min_op.cu crop_tensor_op.cu) -register_unity_group( - cu - smooth_l1_loss_op.cu - uniform_random_batch_size_like_op.cu - unstack_op.cu - where_index_op.cu - where_op.cu - layer_norm_op.cu) +register_unity_group(cu uniform_random_batch_size_like_op.cu unstack_op.cu + where_index_op.cu where_op.cu layer_norm_op.cu) register_unity_group(cu expand_as_op.cu stack_op.cu) + # The following groups are to make better use of `/MP` which MSVC's parallel # compilation instruction when compiling in Unity Build. register_unity_group(cu activation_op.cu) diff --git a/paddle/fluid/pybind/eager_generator.h b/paddle/fluid/pybind/eager_generator.h index 9e7a2ec10063b4..0d495966793836 100644 --- a/paddle/fluid/pybind/eager_generator.h +++ b/paddle/fluid/pybind/eager_generator.h @@ -118,7 +118,6 @@ std::map> op_ins_map = { {"fake_quantize_dequantize_moving_average_abs_max", {"X", "InScale", "InAccum", "InState"}}, {"nll_loss", {"X", "Label", "Weight"}}, - {"smooth_l1_loss", {"X", "Y", "InsideWeight", "OutsideWeight"}}, {"bilinear_tensor_product", {"X", "Y", "Weight", "Bias"}}, {"gather", {"X", "Index", "Axis"}}, {"repeat_interleave", {"X", "RepeatsTensor"}}, diff --git a/test/legacy_test/test_smooth_l1_loss_op.py b/test/legacy_test/test_smooth_l1_loss_op.py deleted file mode 100644 index fb3fd40c0a8237..00000000000000 --- a/test/legacy_test/test_smooth_l1_loss_op.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from op_test import OpTest - - -def smooth_l1_loss_forward(val, sigma2): - abs_val = abs(val) - if abs_val < 1.0 / sigma2: - return 0.5 * val * val * sigma2 - else: - return abs_val - 0.5 / sigma2 - - -class TestSmoothL1LossOp1(OpTest): - def setUp(self): - self.op_type = "smooth_l1_loss" - dims = (5, 20) - self.inputs = { - 'X': np.random.random(dims).astype("float32"), - 'Y': np.random.random(dims).astype("float32"), - } - sigma = 3.0 - self.attrs = {'sigma': sigma} - sigma2 = sigma * sigma - diff = self.inputs['X'] - self.inputs['Y'] - loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2).sum(1) - loss = loss.reshape((dims[0], 1)) - self.outputs = { - 'Diff': diff.astype('float32'), - 'Out': loss.astype('float32'), - } - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02) - - def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], - 'Out', - max_relative_error=0.03, - no_grad_set=set("X"), - ) - - def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], - 'Out', - max_relative_error=0.03, - no_grad_set=set('Y'), - ) - - -class TestSmoothL1LossOp2(OpTest): - def setUp(self): - self.op_type = "smooth_l1_loss" - dims = (5, 20) - self.inputs = { - 'X': np.random.random(dims).astype("float32"), - 'Y': np.random.random(dims).astype("float32"), - 'InsideWeight': np.random.random(dims).astype("float32"), - 'OutsideWeight': np.random.random(dims).astype("float32"), - } - sigma = 3.0 - self.attrs = {'sigma': sigma} - sigma2 = sigma * sigma - diff = self.inputs['X'] - self.inputs['Y'] - diff = diff * self.inputs['InsideWeight'] - loss = np.vectorize(smooth_l1_loss_forward)(diff, sigma2) - loss = loss * self.inputs['OutsideWeight'] - loss = loss.sum(1).reshape((dims[0], 1)) - self.outputs = { - 'Diff': diff.astype('float32'), - 'Out': loss.astype('float32'), - } - - def test_check_output(self): - self.check_output() - - def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03) - - def test_check_grad_ingore_x(self): - self.check_grad( - ['Y'], - 'Out', - max_relative_error=0.03, - no_grad_set={'X', 'InsideWeight', 'OutsideWeight'}, - ) - - def test_check_grad_ingore_y(self): - self.check_grad( - ['X'], - 'Out', - max_relative_error=0.03, - no_grad_set={'Y', 'InsideWeight', 'OutsideWeight'}, - ) - - -if __name__ == '__main__': - unittest.main() diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index aa06b939934e7e..a89dafff96ab6b 100755 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -1467,7 +1467,6 @@ 'test_dict', 'test_bilinear_tensor_product_op', 'test_assert', - 'test_smooth_l1_loss_op', 'sequence_padding_test', 'test_analyzer_ernie', 'test_minimum_op', @@ -2780,7 +2779,6 @@ 'test_compare_reduce_op', 'test_clip_by_norm_op', 'test_box_coder_op', - 'test_smooth_l1_loss_op', 'test_bilinear_interp_op', 'test_spectral_norm_op', 'test_sum_mkldnn_op', diff --git a/tools/static_mode_white_list.py b/tools/static_mode_white_list.py index 60b800da70460c..a8ceadfe90a1ed 100755 --- a/tools/static_mode_white_list.py +++ b/tools/static_mode_white_list.py @@ -448,7 +448,6 @@ 'test_size_op', 'test_share_data_op', 'test_smooth_l1_loss', - 'test_smooth_l1_loss_op', 'test_softmax_with_cross_entropy_op', 'test_spectral_norm_op', 'test_split_ids_op',