From 5436e16209b3150c7cab7249b2bbeb708614366d Mon Sep 17 00:00:00 2001 From: Trevor Morris Date: Wed, 21 Apr 2021 11:30:59 -0700 Subject: [PATCH] [Frontend][Tensorflow] Support SAME padding for dynamic h, w when stride == 1 (#7885) * Support SAME padding for dynamic workloads when stride == 1 * Fix lint * Fix lint --- python/tvm/relay/frontend/tensorflow.py | 7 ++++- .../frontend/tensorflow/test_forward.py | 28 +++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index 6dd164c6e35ea..997f68b02b4dc 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -56,7 +56,12 @@ def list_shape_of(tensor, ndim): def _get_pad_pair(input1d, kernel1d, stride1d): - if input1d % stride1d == 0: + if isinstance(input1d, tvm.tir.Any) and stride1d != 1: + raise tvm.error.OpAttributeUnImplemented( + "SAME padding is not supported in combination with dynamic height or width when stride" + " is not 1." + ) + if stride1d == 1 or input1d % stride1d == 0: pad = max(kernel1d - stride1d, 0) else: pad = max(kernel1d - (input1d % stride1d), 0) diff --git a/tests/python/frontend/tensorflow/test_forward.py b/tests/python/frontend/tensorflow/test_forward.py index 8446ef3d590bd..80a70e486754b 100644 --- a/tests/python/frontend/tensorflow/test_forward.py +++ b/tests/python/frontend/tensorflow/test_forward.py @@ -213,6 +213,7 @@ def compare_tf_with_tvm( cuda_layout="NCHW", add_shapes_to_graph_def=True, targets=None, + ignore_in_shape=False, ): """Generic function to generate and compare tensorflow and TVM output""" @@ -259,6 +260,7 @@ def name_without_num(name): opt_level=opt_level, mode=mode, cuda_layout=cuda_layout, + ignore_in_shape=ignore_in_shape, ) # since the names from tensorflow and relay runs are not exactly same, # first len(tf_output) will be compared @@ -314,6 +316,22 @@ def _test_pooling(input_shape, **kwargs): _test_pooling_iteration(input_shape, **kwargs) +def _test_pooling_dynamic(input_shape, np_shape, **kwargs): + """ Pooling with dynamic height and width dimensions. """ + x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1 + + with tf.Graph().as_default(): + in_data = array_ops.placeholder(shape=input_shape, dtype="float32") + nn_ops.pool(in_data, **kwargs) + + if kwargs["pooling_type"] == "MAX": + out_name = "max_pool:0" + else: + out_name = "avg_pool:0" + + compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True) + + @tvm.testing.uses_gpu def test_forward_pooling(): """ Pooling """ @@ -347,6 +365,16 @@ def test_forward_pooling(): strides=[2, 2, 2], ) + _test_pooling_dynamic( + input_shape=[1, None, None, 3], + np_shape=[1, 32, 32, 3], + window_shape=[2, 2], + padding="SAME", + pooling_type=pool_type, + dilation_rate=[1, 1], + strides=[1, 1], + ) + # test cases for max_pool3d & avg_pool3d with layout NCDHW # TensorFlow pool3d doesn't support NCDHW on cpu if is_gpu_available():