Skip to content

Commit

Permalink
[Frontend][Tensorflow] Support SAME padding for dynamic h, w when str…
Browse files Browse the repository at this point in the history
…ide == 1 (apache#7885)

* Support SAME padding for dynamic workloads when stride == 1

* Fix lint

* Fix lint
  • Loading branch information
Trevor Morris authored and trevor-m committed May 11, 2021
1 parent 549d0fb commit a13d53c
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 1 deletion.
7 changes: 6 additions & 1 deletion python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,12 @@ def list_shape_of(tensor, ndim):


def _get_pad_pair(input1d, kernel1d, stride1d):
if input1d % stride1d == 0:
if isinstance(input1d, tvm.tir.Any) and stride1d != 1:
raise tvm.error.OpAttributeUnImplemented(
"SAME padding is not supported in combination with dynamic height or width when stride"
" is not 1."
)
if stride1d == 1 or input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
pad = max(kernel1d - (input1d % stride1d), 0)
Expand Down
28 changes: 28 additions & 0 deletions tests/python/frontend/tensorflow/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@ def compare_tf_with_tvm(
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
):
"""Generic function to generate and compare tensorflow and TVM output"""

Expand Down Expand Up @@ -259,6 +260,7 @@ def name_without_num(name):
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
Expand Down Expand Up @@ -314,6 +316,22 @@ def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)


def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
""" Pooling with dynamic height and width dimensions. """
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1

with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)

if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"

compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)


@tvm.testing.uses_gpu
def test_forward_pooling():
""" Pooling """
Expand Down Expand Up @@ -347,6 +365,16 @@ def test_forward_pooling():
strides=[2, 2, 2],
)

_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)

# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
Expand Down

0 comments on commit a13d53c

Please sign in to comment.