From bea76404233cdec70895daa1905c6b5f594230aa Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 20 Apr 2020 19:54:11 +0530 Subject: [PATCH 1/4] TFLITE fill and splitv ops --- python/tvm/relay/frontend/tflite.py | 11 +++++++++ tests/python/frontend/tflite/test_forward.py | 25 ++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index d489bd34f7ac..3e4446aac948 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -86,6 +86,7 @@ def __init__(self, model, subgraph, exp_tab): 'GREATER': self.convert_greater, 'HARD_SWISH': self.convert_hard_swish, 'L2_NORMALIZATION': self.convert_l2_normalization, + 'L2_POOL_2D': self.convert_l2_pool2d, 'LESS_EQUAL': self.convert_less_equal, 'LESS': self.convert_less, 'LOCAL_RESPONSE_NORMALIZATION': self.convert_lrn, @@ -331,6 +332,10 @@ def convert_max_pool2d(self, op): """Convert TFLite max pool2d""" return self.convert_pool2d(op, "max") + def convert_l2_pool2d(self, op): + """Convert TFLite l2 pool2d""" + return self.convert_pool2d(op, "l2") + def convert_reshape(self, op): """Convert TFLite reshape""" try: @@ -1674,6 +1679,12 @@ def convert_pool2d(self, op, pool_type): assert self.has_same_qnn_params(input_tensor, output_tensor), \ "qnn.op.max_pool2d requires input and output qnn params to be same" out = _op.nn.max_pool2d(in_expr, **params) + elif pool_type == "l2": + # l2_pool_2d is equivalent to sqrt(avg_pool(sqr(in_data))) + exp_type = self.get_tensor_type_str(output_tensor.tensor.Type()) + square_exp = _op.power(in_expr, relay.const(2, exp_type)) + avg_pool_exp = _op.nn.avg_pool2d(square_exp, **params) + out = _op.sqrt(avg_pool_exp) else: raise tvm.error.OpNotImplemented( 'Operator {} is not supported for frontend TFLite.'.format(pool_type + ' pool')) diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index db4deb111850..99037a1c3751 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -487,6 +487,31 @@ def test_forward_pooling(): strides=[2, 1]) +def _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_func_name=None): + x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1 + + with tf.Graph().as_default(): + in_data = tf.placeholder( + dtype=tf.float32, name="input", shape=input_shape) + out = tf.sqrt(tf.nn.avg_pool( + tf.square(in_data), ksize=ksize, strides=strides, + padding=padding, data_format=data_format)) + out = with_fused_activation_function(out, fused_func_name) + + compare_tflite_with_tvm(x, 'input', [in_data], [out]) + + +def test_l2_pool2d(): + _test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', "NHWC", "RELU6") + _test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', "NHWC", "RELU6") + _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], 'SAME', "NHWC") + _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], 'SAME', "NHWC") + _test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'VALID', "NHWC", "RELU") + _test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], 'VALID', "NHWC") + _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], 'VALID', "NHWC") + _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], 'VALID', "NHWC", "RELU6") + + ####################################################################### # Convolution # ----------- From 9bac0b9b7959d0927ac8d35e4936d0b77762f878 Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 27 Apr 2020 15:45:53 +0530 Subject: [PATCH 2/4] l2_pool_2d op changes in comment --- python/tvm/relay/frontend/tflite.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 7ca073f71d60..a2d11ed8d31d 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -1776,7 +1776,7 @@ def convert_pool2d(self, op, pool_type): "qnn.op.max_pool2d requires input and output qnn params to be same" out = _op.nn.max_pool2d(in_expr, **params) elif pool_type == "l2": - # l2_pool_2d is equivalent to sqrt(avg_pool(sqr(in_data))) + # l2_pool_2d is equivalent to square_root(avg_pool(square(in_data))) exp_type = self.get_tensor_type_str(output_tensor.tensor.Type()) square_exp = _op.power(in_expr, relay.const(2, exp_type)) avg_pool_exp = _op.nn.avg_pool2d(square_exp, **params) From aa55c74648f9b7eb0ebd11211bea0e2e0932cd01 Mon Sep 17 00:00:00 2001 From: maheshambule Date: Mon, 27 Apr 2020 23:41:16 +0530 Subject: [PATCH 3/4] TFLite l2_pool_2d op added test case in main --- python/tvm/relay/frontend/tflite.py | 1 + tests/python/frontend/tflite/test_forward.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index a2d11ed8d31d..494b2d8116b4 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -1777,6 +1777,7 @@ def convert_pool2d(self, op, pool_type): out = _op.nn.max_pool2d(in_expr, **params) elif pool_type == "l2": # l2_pool_2d is equivalent to square_root(avg_pool(square(in_data))) + # TFLite does not have support for quantised l2_pool_2d op. exp_type = self.get_tensor_type_str(output_tensor.tensor.Type()) square_exp = _op.power(in_expr, relay.const(2, exp_type)) avg_pool_exp = _op.nn.avg_pool2d(square_exp, **params) diff --git a/tests/python/frontend/tflite/test_forward.py b/tests/python/frontend/tflite/test_forward.py index c78c56a6e863..eb65d82a6546 100644 --- a/tests/python/frontend/tflite/test_forward.py +++ b/tests/python/frontend/tflite/test_forward.py @@ -575,7 +575,7 @@ def _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_fun compare_tflite_with_tvm(x, 'input', [in_data], [out]) -def test_l2_pool2d(): +def test_forward_l2_pool2d(): _test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', "NHWC", "RELU6") _test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], 'SAME', "NHWC", "RELU6") _test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], 'SAME', "NHWC") @@ -1963,6 +1963,7 @@ def test_forward_mediapipe_hand_landmark(): test_forward_transpose_conv() test_forward_logistic() test_forward_pooling() + test_forward_l2_pool2d() test_forward_softmax() test_forward_tanh() test_forward_relu() From 75caa3cbefdaa8d316dafc7b7ea7553526787a68 Mon Sep 17 00:00:00 2001 From: maheshambule Date: Tue, 28 Apr 2020 12:14:52 +0530 Subject: [PATCH 4/4] TFLite L2_POOL_2D added check for quantized input --- python/tvm/relay/frontend/tflite.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/tflite.py b/python/tvm/relay/frontend/tflite.py index 494b2d8116b4..2065d60a299e 100644 --- a/python/tvm/relay/frontend/tflite.py +++ b/python/tvm/relay/frontend/tflite.py @@ -1776,8 +1776,11 @@ def convert_pool2d(self, op, pool_type): "qnn.op.max_pool2d requires input and output qnn params to be same" out = _op.nn.max_pool2d(in_expr, **params) elif pool_type == "l2": - # l2_pool_2d is equivalent to square_root(avg_pool(square(in_data))) - # TFLite does not have support for quantised l2_pool_2d op. + # L2_POOL_2D is equivalent to square_root(avg_pool(square(in_data))) + # TFLite does not have support for quantised L2_POOL_2D op. + assert not input_tensor.qnn_params, \ + "As TFLite does not have support for quantized L2_POOL_2D, \ + Quantized input is not expected." exp_type = self.get_tensor_type_str(output_tensor.tensor.Type()) square_exp = _op.power(in_expr, relay.const(2, exp_type)) avg_pool_exp = _op.nn.avg_pool2d(square_exp, **params)