From 3af29b4ea47ace91853fc839ba3c8cef0c816fce Mon Sep 17 00:00:00 2001 From: Trevor Morris Date: Mon, 19 Apr 2021 21:29:54 +0000 Subject: [PATCH] Support SAME padding for dynamic workloads when stride == 1 --- python/tvm/relay/frontend/tensorflow.py | 6 ++++- .../frontend/tensorflow/test_forward.py | 26 +++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 6dd164c6e35e..b0fa10982d0c 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -56,7 +56,11 @@ def list_shape_of(tensor, ndim): def _get_pad_pair(input1d, kernel1d, stride1d): - if input1d % stride1d == 0: + if isinstance(input1d, tvm.tir.Any) and stride1d != 1: + raise tvm.error.OpAttributeUnImplemented( + "SAME padding is not supported in combination with dynamic height or width when stride is not 1." + ) + if stride1d == 1 or input1d % stride1d == 0: pad = max(kernel1d - stride1d, 0) else: pad = max(kernel1d - (input1d % stride1d), 0) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 8446ef3d590b..bc8d81e267bb 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -213,6 +213,7 @@ def compare_tf_with_tvm( cuda_layout="NCHW", add_shapes_to_graph_def=True, targets=None, + ignore_in_shape=False, ): """Generic function to generate and compare tensorflow and TVM output""" @@ -259,6 +260,7 @@ def name_without_num(name): opt_level=opt_level, mode=mode, cuda_layout=cuda_layout, + ignore_in_shape=ignore_in_shape, ) # since the names from tensorflow and relay runs are not exactly same, # first len(tf_output) will be compared @@ -313,6 +315,20 @@ def _test_pooling(input_shape, **kwargs): kwargs["data_format"] = "NCHW" _test_pooling_iteration(input_shape, **kwargs) +def _test_pooling_dynamic(input_shape, np_shape, **kwargs): + """ Pooling with dynamic height and width dimensions. """ + x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1 + + with tf.Graph().as_default(): + in_data = array_ops.placeholder(shape=input_shape, dtype="float32") + nn_ops.pool(in_data, **kwargs) + + if kwargs["pooling_type"] == "MAX": + out_name = "max_pool:0" + else: + out_name = "avg_pool:0" + + compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True) @tvm.testing.uses_gpu def test_forward_pooling(): @@ -347,6 +363,16 @@ def test_forward_pooling(): strides=[2, 2, 2], ) + _test_pooling_dynamic( + input_shape=[1, None, None, 3], + np_shape=[1, 32, 32, 3], + window_shape=[2, 2], + padding="SAME", + pooling_type=pool_type, + dilation_rate=[1, 1], + strides=[1, 1], + ) + # test cases for max_pool3d & avg_pool3d with layout NCDHW # TensorFlow pool3d doesn't support NCDHW on cpu if is_gpu_available():