diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index e8888940a99ac..fb18b3101cf14 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -2406,6 +2406,7 @@ USE_TRT_CONVERTER(elementwise_div_weight); USE_TRT_CONVERTER(elementwise_min_weight); USE_TRT_CONVERTER(elementwise_max_weight); USE_TRT_CONVERTER(elementwise_pow_weight); +USE_TRT_CONVERTER(elementwise_mod_weight); USE_TRT_CONVERTER(elementwise_floordiv_weight); USE_TRT_CONVERTER(elementwise_add_tensor); USE_TRT_CONVERTER(elementwise_sub_tensor); @@ -2415,6 +2416,7 @@ USE_TRT_CONVERTER(elementwise_max_tensor); USE_TRT_CONVERTER(elementwise_min_tensor); USE_TRT_CONVERTER(elementwise_pow_tensor); USE_TRT_CONVERTER(elementwise_floordiv_tensor); +USE_TRT_CONVERTER(elementwise_mod_tensor); USE_TRT_CONVERTER(less_than); USE_TRT_CONVERTER(greater_than); USE_TRT_CONVERTER(logical_or); diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc index 0280e418e804b..e3df8337cb8cd 100755 --- a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -163,6 +163,25 @@ class ElementwiseTensorOpConverter : public OpConverter { nvinfer1::ElementWiseOperation::kOR); RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode); + } else if (op_type_ == "mod") { + auto* div_layer = + TRT_ENGINE_ADD_LAYER(engine_, + ElementWise, + *X, + *reshape_y_tensor, + nvinfer1::ElementWiseOperation::kFLOOR_DIV); + auto* mul_layer = + TRT_ENGINE_ADD_LAYER(engine_, + ElementWise, + *(div_layer->getOutput(0)), + *reshape_y_tensor, + nvinfer1::ElementWiseOperation::kPROD); + auto* layer = TRT_ENGINE_ADD_LAYER(engine_, + ElementWise, + *X, + *(mul_layer->getOutput(0)), + nvinfer1::ElementWiseOperation::kSUB); + RreplenishLayerAndOutput(layer, "elementwise", {output_name}, test_mode); } else { auto op_pair = ops.find(op_type_); PADDLE_ENFORCE_NE( @@ -271,6 +290,10 @@ class ElementwiseTensorLessEqualOpConverter public: ElementwiseTensorLessEqualOpConverter() { op_type_ = "less_equal"; } }; +class ElementwiseTensorModOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorModOpConverter() { op_type_ = "mod"; } +}; } // namespace tensorrt } // namespace inference } // namespace paddle @@ -291,6 +314,8 @@ REGISTER_TRT_OP_CONVERTER(elementwise_pow_weight, ElementwiseTensorPowOpConverter); REGISTER_TRT_OP_CONVERTER(elementwise_floordiv_weight, ElementwiseTensorFloorDivOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_mod_weight, + ElementwiseTensorModOpConverter); REGISTER_TRT_OP_CONVERTER(elementwise_add_tensor, ElementwiseTensorAddOpConverter); @@ -308,6 +333,8 @@ REGISTER_TRT_OP_CONVERTER(elementwise_pow_tensor, ElementwiseTensorPowOpConverter); REGISTER_TRT_OP_CONVERTER(elementwise_floordiv_tensor, ElementwiseTensorFloorDivOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_mod_tensor, + ElementwiseTensorModOpConverter); REGISTER_TRT_OP_CONVERTER(less_than, ElementwiseTensorLessThanOpConverter); REGISTER_TRT_OP_CONVERTER(greater_than, ElementwiseTensorGreaterThanOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 2e2a76745ca52..7d6a2772a5679 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -74,9 +74,9 @@ class OpConverter { } if (op_desc.Type().find("elementwise") != std::string::npos) { static std::unordered_set add_tensor_op_set{ - "add", "mul", "sub", "div", "max", "min", "pow"}; + "add", "mul", "sub", "div", "max", "min", "pow", "mod"}; static std::unordered_set add_weight_op_set{ - "add", "mul", "sub", "div", "max", "min", "pow"}; + "add", "mul", "sub", "div", "max", "min", "pow", "mod"}; PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL, platform::errors::InvalidArgument( diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 029665bd11131..e79d2f78aad3d 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -1418,7 +1418,8 @@ struct SimpleOpTypeSetTeller : public Teller { if (op_type == "elementwise_add" || op_type == "elementwise_mul" || op_type == "elementwise_sub" || op_type == "elementwise_div" || op_type == "elementwise_pow" || op_type == "elementwise_min" || - op_type == "elementwise_max" || op_type == "elementwise_floordiv") { + op_type == "elementwise_max" || op_type == "elementwise_floordiv" || + op_type == "elementwise_mod") { if (desc.Input("X").size() != 1) { VLOG(3) << "The input op's Input(\"X\").size() " "should equal to 1, but received Input(\"X\").size() = " @@ -1453,12 +1454,14 @@ struct SimpleOpTypeSetTeller : public Teller { if (op_type == "elementwise_add" || op_type == "elementwise_mul" || op_type == "elementwise_sub" || op_type == "elementwise_div" || op_type == "elementwise_pow" || op_type == "elementwise_min" || - op_type == "elementwise_max" || op_type == "elementwise_floordiv") { + op_type == "elementwise_max" || op_type == "elementwise_floordiv" || + op_type == "elementwise_mod") { if (x_var_desc->GetDataType() == paddle::framework::proto::VarType_Type::VarType_Type_BOOL) { - VLOG(3) << "These operations " - "(elementwise_add/mul/sub/div/pow/min/max/floordiv) do " - "not support boolean datatype."; + VLOG(3) + << "These operations " + "(elementwise_add/mul/sub/div/pow/min/max/floordiv/mod) do " + "not support boolean datatype."; return false; } } @@ -2606,6 +2609,7 @@ struct SimpleOpTypeSetTeller : public Teller { "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", "equal", "not_equal", "less_than", @@ -2758,6 +2762,7 @@ struct SimpleOpTypeSetTeller : public Teller { "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", "equal", "not_equal", "less_than", diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py index 4d4df30acb031..ff7cc6e2dd1a2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_elementwise.py @@ -36,6 +36,10 @@ def generate_input(shape, op_type): return np.random.randint( low=1, high=10000, size=shape, dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=shape).astype( + np.float32 + ) else: return np.random.random(shape).astype(np.float32) @@ -44,6 +48,10 @@ def generate_weight(op_type): return np.random.randint( low=1, high=10000, size=[1, 32, 1, 1], dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform( + low=0.1, high=1.0, size=[1, 32, 1, 1] + ).astype(np.float32) else: return np.random.randn(1, 32, 1, 1).astype(np.float32) @@ -58,6 +66,7 @@ def generate_weight(op_type): "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", ]: for axis in [-1]: self.dims = len(shape) @@ -169,6 +178,10 @@ def generate_input(shape, op_type): return np.random.randint( low=1, high=10000, size=shape, dtype=np.int32 ) + if op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=shape).astype( + np.float32 + ) else: return np.random.random(shape).astype(np.float32) @@ -178,6 +191,10 @@ def generate_weight(op_type): return np.random.randint( low=1, high=10000, size=[1], dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=[1]).astype( + np.float32 + ) else: return np.random.randn(1).astype(np.float32) @@ -191,6 +208,7 @@ def generate_weight(op_type): "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", ]: for axis in [-1]: self.dims = len(shape) @@ -290,6 +308,10 @@ def generate_input(shape, op_type): return np.random.randint( low=1, high=10000, size=shape, dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=shape).astype( + np.float32 + ) else: return np.random.random(shape).astype(np.float32) @@ -299,6 +321,10 @@ def generate_weight(op_type): return np.random.randint( low=1, high=10000, size=[32], dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=[32]).astype( + np.float32 + ) else: return np.random.randn(32).astype(np.float32) @@ -318,6 +344,7 @@ def generate_weight(op_type): "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", ]: for axis in [-1 if len(shape) == 1 else 1]: self.dims = len(shape) @@ -442,6 +469,10 @@ def generate_input(shape, op_type): return np.random.randint( low=1, high=10000, size=shape, dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=shape).astype( + np.float32 + ) else: return np.random.random(shape).astype(np.float32) @@ -455,6 +486,7 @@ def generate_input(shape, op_type): "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", ]: for axis in [0, -1]: self.dims = len(shape) @@ -605,6 +637,10 @@ def generate_input(shape, op_type): return np.random.randint( low=1, high=10000, size=shape, dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=shape).astype( + np.float32 + ) else: return np.random.random(shape).astype(np.float32) @@ -651,6 +687,7 @@ def generate_input(shape, op_type): "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", ]: for axis in axis_list[j][i]: self.shape1 = input1_shape @@ -763,6 +800,10 @@ def generate_input(shape, op_type): return np.random.randint( low=1, high=10000, size=shape, dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=shape).astype( + np.float32 + ) else: return np.random.random(shape).astype(np.float32) @@ -772,6 +813,10 @@ def generate_weight(op_type): return np.random.randint( low=1, high=10000, size=[32], dtype=np.int32 ) + elif op_type == "elementwise_mod": + return np.random.uniform(low=0.1, high=1.0, size=[32]).astype( + np.float32 + ) else: return np.random.rand(32).astype(np.float32) @@ -791,6 +836,7 @@ def generate_weight(op_type): "elementwise_min", "elementwise_max", "elementwise_floordiv", + "elementwise_mod", ]: self.op_type = op_type for axis in [-1 if len(shape) == 1 else 1]: @@ -840,8 +886,8 @@ def generate_dynamic_shape(attrs): # The input.dims[1] must be equal to the weight's length. if self.dims == 1: self.dynamic_shape.min_input_shape = {"input_data": [4]} - self.dynamic_shape.max_input_shape = {"input_data": [256]} - self.dynamic_shape.opt_input_shape = {"input_data": [16]} + self.dynamic_shape.max_input_shape = {"input_data": [64]} + self.dynamic_shape.opt_input_shape = {"input_data": [32]} elif self.dims == 2: self.dynamic_shape.min_input_shape = {"input_data": [1, 32]} self.dynamic_shape.max_input_shape = {"input_data": [4, 32]}