From f630beef2605a7f6b803d9f96bb4c74d46346b2d Mon Sep 17 00:00:00 2001 From: qunyang Date: Wed, 15 Mar 2023 13:32:01 +0800 Subject: [PATCH 1/6] support 0-d tensor for element wise unary ops --- paddle/fluid/framework/data_transform.cc | 8 +- paddle/phi/backends/onednn/onednn_helper.h | 4 +- paddle/phi/backends/onednn/onednn_reuse.h | 8 +- .../kernels/funcs/data_layout_transform.cc | 2 +- paddle/phi/kernels/transfer_layout_kernel.cc | 7 +- .../mkldnn/test_activation_mkldnn_op.py | 158 ++++++++++++++++++ .../unittests/mkldnn/test_cast_mkldnn_op.py | 10 +- .../unittests/mkldnn/test_clip_mkldnn_op.py | 10 +- .../unittests/mkldnn/test_scale_mkldnn_op.py | 11 +- 9 files changed, 204 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 2e7a692145cb74..7505039d85a279 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -72,10 +72,12 @@ void TransformData(const phi::KernelKey &expected_kernel_type, // NHWC or NCHW phi::OneDNNContext::tls().set_cur_paddle_data_layout(lin); } + + auto out_dims = out.dims().size() != 0 ? vectorize(out.dims()) + : std::vector{1}; + dnnl::memory::desc out_mem_desc( - vectorize(out.dims()), - phi::funcs::ToOneDNNDataType(in.dtype()), - out_format); + out_dims, phi::funcs::ToOneDNNDataType(in.dtype()), out_format); out.set_mem_desc(out_mem_desc); } else { // Case2 - transfrom from ONEDNN OPKernel to Non-ONEDNN OPKernel diff --git a/paddle/phi/backends/onednn/onednn_helper.h b/paddle/phi/backends/onednn/onednn_helper.h index c9511e89a8d543..2f9557c86a699b 100644 --- a/paddle/phi/backends/onednn/onednn_helper.h +++ b/paddle/phi/backends/onednn/onednn_helper.h @@ -36,7 +36,9 @@ void* to_void_cast(const Type* t) { inline OneDNNMemoryFormat OneDNNFormatForSize(size_t dims_size, OneDNNMemoryFormat data_format) { - if (dims_size == 1) { + if (dims_size == 0) { + return OneDNNMemoryFormat::x; + } else if (dims_size == 1) { return OneDNNMemoryFormat::x; } else if (dims_size == 2) { return OneDNNMemoryFormat::nc; diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index cbd503e249872f..688cdbd82e3829 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -1646,7 +1646,13 @@ class SoftplusOneDNNHandler : public OneDNNHandlerNoCachingT { dnnl::primitive_attr attrs; attrs.set_post_ops(post_ops); - auto x_tz = phi::vectorize(x->dims()); + // if x is a 0-D tensor, then: + // x->dims() is [] and x->mem_desc().dims() is [1], we should use + // the later shape since oneDNN doesn't support 0-D shape. + // else, then: + // x->dims() == x->mem_desc().dims() + // so, we can directly use x->mem_desc().dims() here + auto x_tz = x->mem_desc().dims(); auto beta_tz = std::vector(x_tz.size(), 1); auto beta_md = dnnl::memory::desc( beta_tz, OneDNNGetDataType(), GetPlainOneDNNFormat(x_tz.size())); diff --git a/paddle/phi/kernels/funcs/data_layout_transform.cc b/paddle/phi/kernels/funcs/data_layout_transform.cc index a57a1a0ed57014..5b5ec6eba1be0f 100644 --- a/paddle/phi/kernels/funcs/data_layout_transform.cc +++ b/paddle/phi/kernels/funcs/data_layout_transform.cc @@ -65,7 +65,7 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, auto& cpu_engine = dev_ctx->GetEngine(); auto in_tz = vectorize(in.dims()); - auto out_tz = in_tz; + auto out_tz = in_tz.size() != 0 ? in_tz : std::vector{1}; auto in_type = ToOneDNNDataType(in.dtype()); PADDLE_ENFORCE_NE( diff --git a/paddle/phi/kernels/transfer_layout_kernel.cc b/paddle/phi/kernels/transfer_layout_kernel.cc index f2c57150c62461..cbd29df954d4a4 100644 --- a/paddle/phi/kernels/transfer_layout_kernel.cc +++ b/paddle/phi/kernels/transfer_layout_kernel.cc @@ -148,9 +148,10 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, OneDNNContext::tls().set_cur_paddle_data_layout(src_layout); } - dnnl::memory::desc out_mem_desc(vectorize(out->dims()), - funcs::ToOneDNNDataType(x.dtype()), - out_format); + auto out_dims = out->dims().size() != 0 ? vectorize(out->dims()) + : std::vector{1}; + dnnl::memory::desc out_mem_desc( + out_dims, funcs::ToOneDNNDataType(x.dtype()), out_format); out->set_mem_desc(out_mem_desc); } else if (src_layout == DataLayout::ONEDNN && dst_layout != DataLayout::ONEDNN) { diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 25643dc8ab1c50..5cb57703af5a6f 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -33,6 +33,18 @@ TestSqrt, TestSwish, TestTanh, + TestSoftplus, + TestAbs_ZeroDim, + TestActivation_ZeroDim, + TestHardSwish_ZeroDim, + TestLeakyRelu_ZeroDim, + TestRelu_ZeroDim, + TestRelu6_ZeroDim, + TestSigmoid_ZeroDim, + TestSqrt_ZeroDim, + TestSwish_ZeroDim, + TestTanh_ZeroDim, + TestSoftplus_ZeroDim, ) from paddle.fluid.tests.unittests.test_gelu_op import gelu @@ -46,6 +58,14 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 +class TestMKLDNNRelu_ZeroDim(TestRelu_ZeroDim): + def setUp(self): + super().setUp() + + self.attrs = {"use_mkldnn": True} + + def init_dtype(self): + self.dtype = np.float32 class TestMKLDNNRelu6Dim2(TestRelu6): def setUp(self): @@ -55,6 +75,13 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 +class TestMKLDNNRelu6_ZeroDim(TestRelu6_ZeroDim): + def setUp(self): + super().setUp() + self.attrs.update({"use_mkldnn": True}) + + def init_dtype(self): + self.dtype = np.float32 class TestMKLDNNLeakyReluDim2(TestLeakyRelu): def setUp(self): @@ -73,6 +100,22 @@ def test_check_grad(self): return self.check_grad(['X'], 'Out', check_dygraph=False) +class TestMKLDNNLeakyRelu_ZeroDim(TestLeakyRelu_ZeroDim): + def setUp(self): + super().setUp() + + self.attrs = {"use_mkldnn": True} + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output(check_dygraph=False) + + def test_check_grad(self): + if self.dtype == np.float16: + return + self.check_grad(['X'], 'Out', check_dygraph=False) class TestMKLDNNGeluDim2(TestActivation): def setUp(self): @@ -87,6 +130,18 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} +class TestMKLDNNGelu_ZeroDim(TestActivation_ZeroDim): + def setUp(self): + self.op_type = "gelu" + self.python_api = F.gelu + self.dtype = np.float32 + + x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + out = gelu(x, False) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + self.attrs = {"use_mkldnn": True} class TestMKLDNNGeluDim2Approx(TestActivation): def setUp(self): @@ -111,6 +166,14 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 +class TestMKLDNNTanh_ZeroDim(TestTanh_ZeroDim): + def setUp(self): + super().setUp() + + self.attrs = {"use_mkldnn": True} + + def init_dtype(self): + self.dtype = np.float32 class TestMKLDNNSqrtDim2(TestSqrt): def setUp(self): @@ -121,6 +184,14 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 +class TestMKLDNNSqrt_ZeroDim(TestSqrt_ZeroDim): + def setUp(self): + super().setUp() + + self.attrs = {"use_mkldnn": True} + + def init_dtype(self): + self.dtype = np.float32 class TestMKLDNNAbsDim2(TestAbs): def setUp(self): @@ -130,6 +201,13 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 +class TestMKLDNNAbs_ZeroDim(TestAbs_ZeroDim): + def setUp(self): + super().setUp() + self.attrs = {"use_mkldnn": True} + + def init_dtype(self): + self.dtype = np.float32 class TestMKLDNNSwishDim2(TestSwish): def setUp(self): @@ -141,18 +219,35 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 +class TestMKLDNNSwish_ZeroDim(TestSwish_ZeroDim): + def setUp(self): + super().setUp() + + self.attrs["use_mkldnn"] = True + self.check_eager = False + + def init_dtype(self): + self.dtype = np.float32 class TestMKLDNNHardSwishDim2(TestHardSwish): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} +class TestMKLDNNHardSwish_ZeroDim(TestHardSwish_ZeroDim): + def setUp(self): + super().setUp() + self.attrs = {"use_mkldnn": True} class TestMKLDNNSigmoidDim2(TestSigmoid): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} +class TestMKLDNNSigmoid_ZeroDim(TestSigmoid_ZeroDim): + def setUp(self): + super().setUp() + self.attrs = {"use_mkldnn": True} class TestMKLDNNReluDim4(TestRelu): def setUp(self): @@ -375,6 +470,18 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} +class TestMKLDNNMish_ZeroDim(TestActivation_ZeroDim): + def setUp(self): + self.op_type = "mish" + self.python_api = F.mish + self.dtype = np.float32 + + x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype) + out = x * np.tanh(np.log(1 + np.exp(x))) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + self.attrs = {"use_mkldnn": True} class TestMKLDNNRound(TestActivation): def setUp(self): @@ -387,6 +494,16 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} +class TestMKLDNNRound_ZeroDim(TestActivation_ZeroDim): + def setUp(self): + self.op_type = "round" + self.python_api = paddle.round + x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32) + out = np.round(x) + + self.inputs = {'X': x} + self.outputs = {'Out': out} + self.attrs = {"use_mkldnn": True} class TestMKLDNNSigmoidDim4(TestSigmoid): def setUp(self): @@ -417,6 +534,23 @@ def setUp(self): def set_alpha(self): self.alpha = 1.0 +class TestMKLDNNEluDefaultAlpha_ZeroDim(TestActivation_ZeroDim): + def setUp(self): + self.op_type = "elu" + self.python_api = F.elu + self.set_alpha() + + x = np.random.random((5, 5, 4)).astype("float32") + + self.inputs = {'X': x} + self.attrs = {'use_mkldnn': True, 'alpha': self.alpha} + self.outputs = { + 'Out': np.maximum(0, x) + + np.minimum(0, self.alpha * (np.exp(x) - 1)) + } + + def set_alpha(self): + self.alpha = 1.0 class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha): def set_alpha(self): @@ -433,6 +567,15 @@ def setUp(self): self.attrs = {'use_mkldnn': True} self.outputs = {'Out': np.exp(x)} +class TestMKLDNNExpOp_ZeroDim(TestActivation_ZeroDim): + def setUp(self): + self.op_type = "exp" + self.python_api = paddle.exp + x = np.random.random((5, 5, 4)).astype("float32") + + self.inputs = {'X': x} + self.attrs = {'use_mkldnn': True} + self.outputs = {'Out': np.exp(x)} # Check if primitives already exist in backward class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): @@ -457,6 +600,21 @@ def test_check(self): self, self.op_type, self.x, self.out, self.out_grad, self.x_grad ) +class TestMKLDNNSoftplusDim2(TestSoftplus): + def setUp(self): + super().setUp() + self.attrs.update({"use_mkldnn": True}) + + def init_dtype(self): + self.dtype = np.float32 + +class TestMKLDNNSoftplus_ZeroDim(TestSoftplus_ZeroDim): + def setUp(self): + super().setUp() + self.attrs.update({"use_mkldnn": True}) + + def init_dtype(self): + self.dtype = np.float32 if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py index 7702d979ec8acf..3e4c4cbe7db676 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py @@ -26,10 +26,11 @@ ) class TestCastBF16ToFP32MKLDNNOp(OpTest): def init_data(self): - self.out = np.random.random(size=[10, 10]).astype("float32") + self.out = np.random.random(size=self.shape).astype("float32") self.x = convert_float_to_uint16(self.out) def setUp(self): + self.init_shape() self.init_data() self.inputs = {'X': self.x} self.outputs = {'Out': self.out} @@ -57,7 +58,9 @@ def test_check_grad(self): user_defined_grads=[self.inputs['X']], user_defined_grad_outputs=[self.outputs['Out']], ) - + + def init_shape(self): + self.shape = [10, 10] class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): def init_data(self): @@ -76,6 +79,9 @@ def init_data(self): self.x = np.random.random(size=[7, 15]).astype("float32") self.out = self.x +class TestCastBF16ToFP32MKLDNNOp_ZeroDim(TestCastBF16ToFP32MKLDNNOp): + def init_shape(self): + self.shape = [] if __name__ == '__main__': paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py index 3943af40dcc87f..436c4709bff669 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py @@ -25,10 +25,10 @@ ) -@OpTestTool.skip_if_not_cpu_bf16() class TestClipOneDNNOp(OpTest): def setUp(self): self.op_type = "clip" + self.init_shape() self.set_inputs() self.set_attrs() self.set_additional_inputs() @@ -47,8 +47,11 @@ def setUp(self): self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)} + def init_shape(self): + self.shape = [10, 10] + def set_inputs(self): - self.inputs = {'X': np.random.random((10, 10)).astype(np.float32) * 25} + self.inputs = {'X': np.array(np.random.random(self.shape).astype(np.float32) * 25)} self.x_fp32 = self.inputs['X'] def set_additional_inputs(self): @@ -66,6 +69,9 @@ def test_check_output(self): def test_check_grad(self): self.check_grad(['X'], 'Out') +class TestClipOneDNNOp_ZeroDim(TestClipOneDNNOp): + def init_shape(self): + self.shape = [] class TestClipMinAsInputOneDNNOp(TestClipOneDNNOp): def set_additional_inputs(self): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py index 39594833fa2e56..5d22151d3e37f6 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py @@ -22,20 +22,27 @@ class TestScaleOp(OpTest): def setUp(self): + self.init_shape() self.op_type = "scale" - self.inputs = {'X': np.random.random((10, 10)).astype(np.float32)} + self.inputs = {'X': np.random.random(self.shape).astype(np.float32)} self.attrs = {'scale': -2.3, 'use_mkldnn': True, 'bias': 0.2} self.use_mkldnn = True self.outputs = { 'Out': (self.inputs['X'] * self.attrs['scale']) + self.attrs['bias'] } + def init_shape(self): + self.shape = [10, 10] + def test_check_output(self): self.check_output(check_dygraph=False) def test_check_grad(self): self.check_grad(['X'], 'Out') +class TestScaleOp_ZeroDim(TestScaleOp): + def init_shape(self): + self.shape = [] class TestScaleOpBiasNotAfterScale(OpTest): def setUp(self): @@ -59,6 +66,7 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') +# FIXME(xx) no use_mkldnn attr, does this case run into oneDNN? class TestScaleOpScaleTensor(OpTest): def setUp(self): self.op_type = "scale" @@ -77,6 +85,7 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') +# FIXME(xx) no use_mkldnn attr, does this case run into oneDNN? class TestScaleOpScaleTensorNotBiasAfterScale(OpTest): def setUp(self): self.op_type = "scale" From 7274eda66d5199f144c15abfb70ee656c55c2561 Mon Sep 17 00:00:00 2001 From: qunyang Date: Wed, 15 Mar 2023 14:16:11 +0800 Subject: [PATCH 2/6] fix python code style check --- .../mkldnn/test_activation_mkldnn_op.py | 53 +++++++++++++++---- .../unittests/mkldnn/test_cast_mkldnn_op.py | 5 +- .../unittests/mkldnn/test_clip_mkldnn_op.py | 6 ++- .../unittests/mkldnn/test_scale_mkldnn_op.py | 6 ++- 4 files changed, 55 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 5cb57703af5a6f..16c055b1d94c96 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -24,27 +24,27 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.test_activation_op import ( TestAbs, + TestAbs_ZeroDim, TestActivation, + TestActivation_ZeroDim, TestHardSwish, + TestHardSwish_ZeroDim, TestLeakyRelu, + TestLeakyRelu_ZeroDim, TestRelu, TestRelu6, - TestSigmoid, - TestSqrt, - TestSwish, - TestTanh, - TestSoftplus, - TestAbs_ZeroDim, - TestActivation_ZeroDim, - TestHardSwish_ZeroDim, - TestLeakyRelu_ZeroDim, - TestRelu_ZeroDim, TestRelu6_ZeroDim, + TestRelu_ZeroDim, + TestSigmoid, TestSigmoid_ZeroDim, + TestSoftplus, + TestSoftplus_ZeroDim, + TestSqrt, TestSqrt_ZeroDim, + TestSwish, TestSwish_ZeroDim, + TestTanh, TestTanh_ZeroDim, - TestSoftplus_ZeroDim, ) from paddle.fluid.tests.unittests.test_gelu_op import gelu @@ -58,6 +58,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNRelu_ZeroDim(TestRelu_ZeroDim): def setUp(self): super().setUp() @@ -67,6 +68,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNRelu6Dim2(TestRelu6): def setUp(self): super().setUp() @@ -75,6 +77,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNRelu6_ZeroDim(TestRelu6_ZeroDim): def setUp(self): super().setUp() @@ -83,6 +86,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNLeakyReluDim2(TestLeakyRelu): def setUp(self): super().setUp() @@ -100,6 +104,7 @@ def test_check_grad(self): return self.check_grad(['X'], 'Out', check_dygraph=False) + class TestMKLDNNLeakyRelu_ZeroDim(TestLeakyRelu_ZeroDim): def setUp(self): super().setUp() @@ -117,6 +122,7 @@ def test_check_grad(self): return self.check_grad(['X'], 'Out', check_dygraph=False) + class TestMKLDNNGeluDim2(TestActivation): def setUp(self): self.op_type = "gelu" @@ -130,6 +136,7 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + class TestMKLDNNGelu_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "gelu" @@ -143,6 +150,7 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + class TestMKLDNNGeluDim2Approx(TestActivation): def setUp(self): self.op_type = "gelu" @@ -166,6 +174,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNTanh_ZeroDim(TestTanh_ZeroDim): def setUp(self): super().setUp() @@ -175,6 +184,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNSqrtDim2(TestSqrt): def setUp(self): super().setUp() @@ -184,6 +194,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNSqrt_ZeroDim(TestSqrt_ZeroDim): def setUp(self): super().setUp() @@ -193,6 +204,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNAbsDim2(TestAbs): def setUp(self): super().setUp() @@ -201,6 +213,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNAbs_ZeroDim(TestAbs_ZeroDim): def setUp(self): super().setUp() @@ -209,6 +222,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNSwishDim2(TestSwish): def setUp(self): super().setUp() @@ -219,6 +233,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNSwish_ZeroDim(TestSwish_ZeroDim): def setUp(self): super().setUp() @@ -229,26 +244,31 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNHardSwishDim2(TestHardSwish): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + class TestMKLDNNHardSwish_ZeroDim(TestHardSwish_ZeroDim): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + class TestMKLDNNSigmoidDim2(TestSigmoid): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + class TestMKLDNNSigmoid_ZeroDim(TestSigmoid_ZeroDim): def setUp(self): super().setUp() self.attrs = {"use_mkldnn": True} + class TestMKLDNNReluDim4(TestRelu): def setUp(self): super().setUp() @@ -470,6 +490,7 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + class TestMKLDNNMish_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "mish" @@ -483,6 +504,7 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + class TestMKLDNNRound(TestActivation): def setUp(self): self.op_type = "round" @@ -494,6 +516,7 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + class TestMKLDNNRound_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "round" @@ -505,6 +528,7 @@ def setUp(self): self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} + class TestMKLDNNSigmoidDim4(TestSigmoid): def setUp(self): super().setUp() @@ -534,6 +558,7 @@ def setUp(self): def set_alpha(self): self.alpha = 1.0 + class TestMKLDNNEluDefaultAlpha_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "elu" @@ -552,6 +577,7 @@ def setUp(self): def set_alpha(self): self.alpha = 1.0 + class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha): def set_alpha(self): self.alpha = 2.5 @@ -567,6 +593,7 @@ def setUp(self): self.attrs = {'use_mkldnn': True} self.outputs = {'Out': np.exp(x)} + class TestMKLDNNExpOp_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "exp" @@ -577,6 +604,7 @@ def setUp(self): self.attrs = {'use_mkldnn': True} self.outputs = {'Out': np.exp(x)} + # Check if primitives already exist in backward class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): def setUp(self): @@ -600,6 +628,7 @@ def test_check(self): self, self.op_type, self.x, self.out, self.out_grad, self.x_grad ) + class TestMKLDNNSoftplusDim2(TestSoftplus): def setUp(self): super().setUp() @@ -608,6 +637,7 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + class TestMKLDNNSoftplus_ZeroDim(TestSoftplus_ZeroDim): def setUp(self): super().setUp() @@ -616,5 +646,6 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py index 3e4c4cbe7db676..b1447ee2f16cee 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_cast_mkldnn_op.py @@ -58,10 +58,11 @@ def test_check_grad(self): user_defined_grads=[self.inputs['X']], user_defined_grad_outputs=[self.outputs['Out']], ) - + def init_shape(self): self.shape = [10, 10] + class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): def init_data(self): self.x = np.random.random(size=[2, 6]).astype("float32") @@ -79,10 +80,12 @@ def init_data(self): self.x = np.random.random(size=[7, 15]).astype("float32") self.out = self.x + class TestCastBF16ToFP32MKLDNNOp_ZeroDim(TestCastBF16ToFP32MKLDNNOp): def init_shape(self): self.shape = [] + if __name__ == '__main__': paddle.enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py index 436c4709bff669..b9947210d75669 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_clip_mkldnn_op.py @@ -51,7 +51,9 @@ def init_shape(self): self.shape = [10, 10] def set_inputs(self): - self.inputs = {'X': np.array(np.random.random(self.shape).astype(np.float32) * 25)} + self.inputs = { + 'X': np.array(np.random.random(self.shape).astype(np.float32) * 25) + } self.x_fp32 = self.inputs['X'] def set_additional_inputs(self): @@ -69,10 +71,12 @@ def test_check_output(self): def test_check_grad(self): self.check_grad(['X'], 'Out') + class TestClipOneDNNOp_ZeroDim(TestClipOneDNNOp): def init_shape(self): self.shape = [] + class TestClipMinAsInputOneDNNOp(TestClipOneDNNOp): def set_additional_inputs(self): self.inputs['Min'] = np.array([6.8]).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py index 5d22151d3e37f6..af5caca3ffe60b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py @@ -31,7 +31,7 @@ def setUp(self): 'Out': (self.inputs['X'] * self.attrs['scale']) + self.attrs['bias'] } - def init_shape(self): + def init_shape(self): self.shape = [10, 10] def test_check_output(self): @@ -40,10 +40,12 @@ def test_check_output(self): def test_check_grad(self): self.check_grad(['X'], 'Out') + class TestScaleOp_ZeroDim(TestScaleOp): - def init_shape(self): + def init_shape(self): self.shape = [] + class TestScaleOpBiasNotAfterScale(OpTest): def setUp(self): self.op_type = "scale" From 13d6ee38bd1f78011b436d92c0231cfc32c8483e Mon Sep 17 00:00:00 2001 From: qunyang Date: Wed, 15 Mar 2023 15:50:57 +0800 Subject: [PATCH 3/6] fix approval check --- .../tests/unittests/mkldnn/test_activation_mkldnn_op.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index 16c055b1d94c96..d1023f781f47b8 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -114,14 +114,6 @@ def setUp(self): def init_dtype(self): self.dtype = np.float32 - def test_check_output(self): - self.check_output(check_dygraph=False) - - def test_check_grad(self): - if self.dtype == np.float16: - return - self.check_grad(['X'], 'Out', check_dygraph=False) - class TestMKLDNNGeluDim2(TestActivation): def setUp(self): From 1870101dcd9d61dead302a84d1d1c3b5a2d6e4a7 Mon Sep 17 00:00:00 2001 From: qunyang Date: Wed, 15 Mar 2023 20:20:38 +0800 Subject: [PATCH 4/6] support 0-d tensor for onednn softmax and logsoftmax kernels --- paddle/phi/backends/onednn/onednn_reuse.h | 7 ++++--- .../phi/kernels/onednn/log_softmax_kernel.cc | 5 +++-- .../mkldnn/test_log_softmax_mkldnn_op.py | 19 +++++++++++++++---- .../mkldnn/test_softmax_mkldnn_op.py | 13 +++++++++++++ .../fluid/tests/unittests/test_softmax_op.py | 1 + 5 files changed, 36 insertions(+), 9 deletions(-) diff --git a/paddle/phi/backends/onednn/onednn_reuse.h b/paddle/phi/backends/onednn/onednn_reuse.h index 688cdbd82e3829..de3536a8b5050c 100644 --- a/paddle/phi/backends/onednn/onednn_reuse.h +++ b/paddle/phi/backends/onednn/onednn_reuse.h @@ -776,7 +776,8 @@ class SoftmaxOneDNNHandler errors::InvalidArgument( "The shape of input and output tensor must be identical.")); - const int canonical_axis = funcs::CanonicalAxis(axis, x->dims().size()); + int rank = x->dims().size() != 0 ? x->dims().size() : 1; + const int canonical_axis = funcs::CanonicalAxis(axis, rank); this->AcquireForwardPrimitiveDescriptor( dnnl::prop_kind::forward_scoring, x->mem_desc(), canonical_axis); } @@ -790,8 +791,8 @@ class SoftmaxOneDNNHandler dnnl::softmax_forward, dnnl::softmax_backward>(onednn_engine, cpu_place) { - const int canonical_axis = - funcs::CanonicalAxis(axis, out_grad->dims().size()); + int rank = out_grad->dims().size() != 0 ? out_grad->dims().size() : 1; + const int canonical_axis = funcs::CanonicalAxis(axis, rank); this->AcquireForwardPrimitiveDescriptor( dnnl::prop_kind::forward_scoring, out->mem_desc(), canonical_axis); this->AcquireBackwardPrimitiveDescriptor( diff --git a/paddle/phi/kernels/onednn/log_softmax_kernel.cc b/paddle/phi/kernels/onednn/log_softmax_kernel.cc index 800d67a31cb5d7..857acaef7bf559 100644 --- a/paddle/phi/kernels/onednn/log_softmax_kernel.cc +++ b/paddle/phi/kernels/onednn/log_softmax_kernel.cc @@ -32,8 +32,10 @@ class LogSoftmaxOneDNNHandler const int axis) : funcs::OneDNNHandlerNoCachingT( onednn_engine, cpu_place) { + int rank = x.dims().size() != 0 ? x.dims().size() : 1; + const int canonical_axis = funcs::CanonicalAxis(axis, rank); this->AcquireForwardPrimitiveDescriptor( - dnnl::prop_kind::forward_inference, x.mem_desc(), axis); + dnnl::prop_kind::forward_inference, x.mem_desc(), canonical_axis); } }; @@ -43,7 +45,6 @@ void LogSoftmaxKernel(const Context& dev_ctx, int axis, DenseTensor* out) { const auto& onednn_engine = dev_ctx.GetEngine(); - axis = axis >= 0 ? axis : x.dims().size() + axis; LogSoftmaxOneDNNHandler handler( onednn_engine, dev_ctx.GetPlace(), x, axis); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py index 00a6138ae83552..97139ebe353b3b 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_log_softmax_mkldnn_op.py @@ -26,7 +26,6 @@ from paddle.fluid.tests.unittests.test_log_softmax import ref_log_softmax -@OpTestTool.skip_if_not_cpu_bf16() class TestLogSoftmaxOneDNNOp(OpTest): def setUp(self): self.op_type = 'log_softmax' @@ -35,7 +34,11 @@ def setUp(self): self.set_axis() x = np.random.uniform(0.1, 1.0, self.shape).astype(np.float32) - out = np.apply_along_axis(ref_log_softmax, self.axis, x) + out = ( + np.apply_along_axis(ref_log_softmax, self.axis, x) + if len(self.shape) > 0 + else np.array(0.0).astype(self.dtype) + ) if self.dtype == np.uint16: x = convert_float_to_uint16(x) @@ -57,6 +60,11 @@ def test_check_output(self): self.check_output_with_place(core.CPUPlace()) +class TestLogSoftmax0DOneDNNOp(TestLogSoftmaxOneDNNOp): + def set_shape(self): + self.shape = [] + + class TestLogSoftmax1DOneDNNOp(TestLogSoftmaxOneDNNOp): def set_shape(self): self.shape = [100] @@ -78,11 +86,13 @@ def set_axis(self): # BF16 TESTS +@OpTestTool.skip_if_not_cpu_bf16() class TestLogSoftmax1DBF16OneDNNOp(TestLogSoftmax1DOneDNNOp): def set_dtype(self): self.dtype = np.uint16 +@OpTestTool.skip_if_not_cpu_bf16() class TestLogSoftmaxPositiveAxisBF16OneDNNOp( TestLogSoftmaxPositiveAxisOneDNNOp ): @@ -90,9 +100,10 @@ def set_dtype(self): self.dtype = np.uint16 +@OpTestTool.skip_if_not_cpu_bf16() class TestLogSoftmax5DBF16OneDNNOp(TestLogSoftmax5DOneDNNOp): - def set_shape(self): - self.shape = [2, 3, 4, 5, 6] + def set_dtype(self): + self.dtype = np.uint16 if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py index 5d77c20361f0af..7be0238acf759a 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_softmax_mkldnn_op.py @@ -26,6 +26,7 @@ TestSoftmaxOp4, TestSoftmaxOp5, TestSoftmaxOp6, + TestSoftmaxOp_ZeroDim1, ) @@ -95,26 +96,38 @@ def init_kernel_type(self): class TestSoftmaxMKLDNNOp2(TestSoftmaxOp2): def init_kernel_type(self): self.use_mkldnn = True + # oneDNN doesn't support float64 dtype + self.dtype = np.float32 class TestSoftmaxMKLDNNOp3(TestSoftmaxOp3): def init_kernel_type(self): self.use_mkldnn = True + self.dtype = np.float32 class TestSoftmaxMKLDNNOp4(TestSoftmaxOp4): def init_kernel_type(self): self.use_mkldnn = True + self.dtype = np.float32 class TestSoftmaxMKLDNNOp5(TestSoftmaxOp5): def init_kernel_type(self): self.use_mkldnn = True + self.dtype = np.float32 class TestSoftmaxMKLDNNOp6(TestSoftmaxOp6): def init_kernel_type(self): self.use_mkldnn = True + self.dtype = np.float32 + + +class TestSoftmaxMKLDNNOp_ZeroDim(TestSoftmaxOp_ZeroDim1): + def init_kernel_type(self): + self.use_mkldnn = True + self.dtype = np.float32 # Check if primitives already exist in backward diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 44f60ce045885c..cec117fff4b3f0 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -122,6 +122,7 @@ def setUp(self): self.use_mkldnn = False # explicilty use float32 for ROCm, as MIOpen does not yet support float64 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 + self.init_kernel_type() np.random.seed(0) x = np.random.uniform(0.1, 1, []).astype(self.dtype) From 8c54371e5cc5c09b10f9de721c66187c981cecf1 Mon Sep 17 00:00:00 2001 From: yangqun Date: Sun, 19 Mar 2023 06:04:43 +0000 Subject: [PATCH 5/6] fix commnets --- paddle/fluid/framework/data_transform.cc | 10 +---- .../kernels/funcs/data_layout_transform.cc | 37 ++++++++++++------- .../phi/kernels/funcs/data_layout_transform.h | 3 ++ .../phi/kernels/onednn/log_softmax_kernel.cc | 2 +- paddle/phi/kernels/transfer_layout_kernel.cc | 8 +--- .../unittests/mkldnn/test_scale_mkldnn_op.py | 2 - 6 files changed, 31 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 7505039d85a279..2b1525787c25b7 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -60,9 +60,6 @@ void TransformData(const phi::KernelKey &expected_kernel_type, if (lin != DataLayout::ONEDNN && lout == DataLayout::ONEDNN) { // Case1 - transform from Non-ONEDNN OPKernel to ONEDNN OPKernel // Just set layout/format. No real transform occur - - auto out_format = phi::funcs::OneDNNFormatForSize( - in.dims().size(), phi::funcs::ToOneDNNFormat(lin)); out.ShareDataWith(input_tensor); // For NHWC data we need reshape of tensors as MKL-DNN // is expecting NHWC dims description order @@ -73,11 +70,8 @@ void TransformData(const phi::KernelKey &expected_kernel_type, phi::OneDNNContext::tls().set_cur_paddle_data_layout(lin); } - auto out_dims = out.dims().size() != 0 ? vectorize(out.dims()) - : std::vector{1}; - - dnnl::memory::desc out_mem_desc( - out_dims, phi::funcs::ToOneDNNDataType(in.dtype()), out_format); + dnnl::memory::desc out_mem_desc = + phi::funcs::make_memory_desc(out, lin); out.set_mem_desc(out_mem_desc); } else { // Case2 - transfrom from ONEDNN OPKernel to Non-ONEDNN OPKernel diff --git a/paddle/phi/kernels/funcs/data_layout_transform.cc b/paddle/phi/kernels/funcs/data_layout_transform.cc index 5b5ec6eba1be0f..eddb137733937a 100644 --- a/paddle/phi/kernels/funcs/data_layout_transform.cc +++ b/paddle/phi/kernels/funcs/data_layout_transform.cc @@ -51,6 +51,27 @@ void* GetDataFromTensor(const DenseTensor& tensor, } } +// This helper function is used to construct a dnnl memory descriptor from a +// reference dense tensor and a target layout. For 0-D tensor case, we will +// construct a 1-D memory descriptor with shape [1], since oneDNN didn't support +// 0-D now. +dnnl::memory::desc make_memory_desc(const phi::DenseTensor& ref_tensor, + phi::DataLayout target_layout) { + auto ref_dims = vectorize(ref_tensor.dims()); + auto ref_type = ToOneDNNDataType(ref_tensor.dtype()); + PADDLE_ENFORCE_NE(ref_type, + OneDNNDataType::undef, + errors::InvalidArgument( + "Ref tensor type (%s) is not supported by oneDNN.", + ref_tensor.dtype())); + + auto md_dims = ref_dims.size() != 0 ? ref_dims : std::vector{1}; + auto md_format = + OneDNNFormatForSize(md_dims.size(), ToOneDNNFormat(target_layout)); + dnnl::memory::desc md(md_dims, ref_type, md_format); + return md; +} + void TransDataLayoutFromOneDNN(DataLayout in_layout, DataLayout out_layout, const DenseTensor& in, @@ -64,19 +85,7 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, auto* dev_ctx = dynamic_cast(pool.Get(place)); auto& cpu_engine = dev_ctx->GetEngine(); - auto in_tz = vectorize(in.dims()); - auto out_tz = in_tz.size() != 0 ? in_tz : std::vector{1}; - - auto in_type = ToOneDNNDataType(in.dtype()); - PADDLE_ENFORCE_NE( - in_type, - OneDNNDataType::undef, - errors::InvalidArgument("Input tensor type (%s) is not supported.", - in.dtype())); - - auto out_format = - OneDNNFormatForSize(in_tz.size(), ToOneDNNFormat(out_layout)); - dnnl::memory::desc out_mem_desc(out_tz, in_type, out_format); + dnnl::memory::desc out_mem_desc = make_memory_desc(in, out_layout); // output tensor has the same dims as input. Reorder don't change dims out->set_mem_desc(out_mem_desc); @@ -85,6 +94,8 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, // Note(0x45f): Using initialized() to support slice Tensors // with shapes like [0, 0, 0]. if (in.initialized() && ((in.mem_desc() != out->mem_desc()) || always_copy)) { + auto in_tz = vectorize(in.dims()); + auto in_type = ToOneDNNDataType(in.dtype()); void* in_data = GetDataFromTensor(in, in_type); ReorderOneDNNHandler handler(in_tz, in.dtype(), in_type, cpu_engine); diff --git a/paddle/phi/kernels/funcs/data_layout_transform.h b/paddle/phi/kernels/funcs/data_layout_transform.h index 54e7d9b729f011..d85c9a68626b64 100644 --- a/paddle/phi/kernels/funcs/data_layout_transform.h +++ b/paddle/phi/kernels/funcs/data_layout_transform.h @@ -85,6 +85,9 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, bool always_copy = false); void* GetDataFromTensor(const DenseTensor& tensor, OneDNNDataType type); +dnnl::memory::desc make_memory_desc(const phi::DenseTensor& ref_tensor, + phi::DataLayout target_layout); + #endif } // namespace funcs diff --git a/paddle/phi/kernels/onednn/log_softmax_kernel.cc b/paddle/phi/kernels/onednn/log_softmax_kernel.cc index 857acaef7bf559..e7c73c524cba7b 100644 --- a/paddle/phi/kernels/onednn/log_softmax_kernel.cc +++ b/paddle/phi/kernels/onednn/log_softmax_kernel.cc @@ -32,7 +32,7 @@ class LogSoftmaxOneDNNHandler const int axis) : funcs::OneDNNHandlerNoCachingT( onednn_engine, cpu_place) { - int rank = x.dims().size() != 0 ? x.dims().size() : 1; + const int rank = x.dims().size() != 0 ? x.dims().size() : 1; const int canonical_axis = funcs::CanonicalAxis(axis, rank); this->AcquireForwardPrimitiveDescriptor( dnnl::prop_kind::forward_inference, x.mem_desc(), canonical_axis); diff --git a/paddle/phi/kernels/transfer_layout_kernel.cc b/paddle/phi/kernels/transfer_layout_kernel.cc index cbd29df954d4a4..ae9c5d3092ec60 100644 --- a/paddle/phi/kernels/transfer_layout_kernel.cc +++ b/paddle/phi/kernels/transfer_layout_kernel.cc @@ -136,9 +136,6 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, if (src_layout != DataLayout::ONEDNN && dst_layout == DataLayout::ONEDNN) { // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel // Just set layout/format. No real transform occur - auto out_format = funcs::OneDNNFormatForSize( - x.dims().size(), funcs::ToOneDNNFormat(src_layout)); - out->ShareDataWith(x); // For NHWC data we need reshape of tensors as MKL-DNN // is expecting NHWC dims description order @@ -148,10 +145,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, OneDNNContext::tls().set_cur_paddle_data_layout(src_layout); } - auto out_dims = out->dims().size() != 0 ? vectorize(out->dims()) - : std::vector{1}; - dnnl::memory::desc out_mem_desc( - out_dims, funcs::ToOneDNNDataType(x.dtype()), out_format); + dnnl::memory::desc out_mem_desc = funcs::make_memory_desc(*out, src_layout); out->set_mem_desc(out_mem_desc); } else if (src_layout == DataLayout::ONEDNN && dst_layout != DataLayout::ONEDNN) { diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py index af5caca3ffe60b..de28d91a1fcdf5 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_scale_mkldnn_op.py @@ -68,7 +68,6 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') -# FIXME(xx) no use_mkldnn attr, does this case run into oneDNN? class TestScaleOpScaleTensor(OpTest): def setUp(self): self.op_type = "scale" @@ -87,7 +86,6 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') -# FIXME(xx) no use_mkldnn attr, does this case run into oneDNN? class TestScaleOpScaleTensorNotBiasAfterScale(OpTest): def setUp(self): self.op_type = "scale" From edbd918fec5be45769e055299ed2d2a92893726e Mon Sep 17 00:00:00 2001 From: qunyang Date: Mon, 20 Mar 2023 16:38:56 +0800 Subject: [PATCH 6/6] fix some unittests --- .../unittests/mkldnn/test_activation_mkldnn_op.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py index d1023f781f47b8..bab93f40dc73b2 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py @@ -135,7 +135,7 @@ def setUp(self): self.python_api = F.gelu self.dtype = np.float32 - x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) + x = np.random.uniform(-1, 1, []).astype(self.dtype) out = gelu(x, False) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -489,7 +489,7 @@ def setUp(self): self.python_api = F.mish self.dtype = np.float32 - x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype) + x = np.random.uniform(0.1, 1, []).astype(self.dtype) out = x * np.tanh(np.log(1 + np.exp(x))) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} @@ -513,7 +513,7 @@ class TestMKLDNNRound_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "round" self.python_api = paddle.round - x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32) + x = np.random.uniform(0.1, 1, []).astype(np.float32) out = np.round(x) self.inputs = {'X': x} @@ -557,7 +557,7 @@ def setUp(self): self.python_api = F.elu self.set_alpha() - x = np.random.random((5, 5, 4)).astype("float32") + x = np.random.random(()).astype("float32") self.inputs = {'X': x} self.attrs = {'use_mkldnn': True, 'alpha': self.alpha} @@ -590,7 +590,7 @@ class TestMKLDNNExpOp_ZeroDim(TestActivation_ZeroDim): def setUp(self): self.op_type = "exp" self.python_api = paddle.exp - x = np.random.random((5, 5, 4)).astype("float32") + x = np.random.random(()).astype("float32") self.inputs = {'X': x} self.attrs = {'use_mkldnn': True}