Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TFLITE]Select op support for tflite frontend #5486

Merged
merged 3 commits into from
May 7, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 41 additions & 13 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def __init__(self, model, subgraph, exp_tab):
'LOGISTIC': self.convert_logistic,
'MAX_POOL_2D': self.convert_max_pool2d,
'MAXIMUM': self.convert_maximum,
'MEAN': self._convert_reduce_mean,
'MEAN': self.convert_reduce_mean,
'MINIMUM': self.convert_minimum,
'MIRROR_PAD': self.convert_mirror_pad,
'MUL': self.convert_mul,
Expand All @@ -109,16 +109,17 @@ def __init__(self, model, subgraph, exp_tab):
'PAD': self.convert_pad,
'POW': self.convert_pow,
'PRELU': self.convert_prelu,
'REDUCE_ANY': self._convert_reduce_any,
'REDUCE_MAX': self._convert_reduce_max,
'REDUCE_MIN': self._convert_reduce_min,
'REDUCE_PROD': self._convert_reduce_prod,
'REDUCE_ANY': self.convert_reduce_any,
'REDUCE_MAX': self.convert_reduce_max,
'REDUCE_MIN': self.convert_reduce_min,
'REDUCE_PROD': self.convert_reduce_prod,
'RELU':self.convert_relu,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are these changes relevant to this Pull request ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@u99127 for code consistency i changed, its not related to this PR.
This 5 methods were starting with _ and i removed those.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In an ideal world that would be a separate PR rather than merging it in here as that could just go in separately and pretty mechanically.

However, that's really not our development practice yet.

Ramana

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@u99127 Thanks for the review. I totally agree with you. I removed those changes as part of this PR. i will raise another PR[#5515] for those. Could you please check again. TIA.

'RESHAPE': self.convert_reshape,
'RESIZE_BILINEAR': self.convert_resize_bilinear,
'RESIZE_NEAREST_NEIGHBOR': self.convert_resize_nearest_neighbor,
'ROUND': self.convert_round,
'RSQRT': self.convert_rsqrt,
'SELECT': self.convert_select,
'SIN': self.convert_sin,
'SLICE': self.convert_slice,
'SOFTMAX': self.convert_softmax,
Expand All @@ -132,14 +133,15 @@ def __init__(self, model, subgraph, exp_tab):
'SQUEEZE': self.convert_squeeze,
'STRIDED_SLICE': self.convert_strided_slice,
'SUB': self.convert_sub,
'SUM': self._convert_reduce_sum,
'SUM': self.convert_reduce_sum,
'TAN': self.convert_tan,
'TANH':self.convert_tanh,
'TILE': self.convert_tile,
'TOPK_V2': self.convert_topk_v2,
'TRANSPOSE_CONV': self.convert_transpose_conv,
'TRANSPOSE': self.convert_transpose,
'UNPACK': self.convert_unpack,
'WHERE': self.convert_select,
'ZEROS_LIKE': self.convert_zeros_like,
}

Expand Down Expand Up @@ -1241,7 +1243,7 @@ def convert_fill(self, op):
return out

def _convert_reduce(self, relay_op, op):
"""Generic method to Convert TFLite MEAN operators"""
"""Generic method to Convert TFLite REDUCE operators"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReducerOptions import ReducerOptions
Expand Down Expand Up @@ -1285,22 +1287,22 @@ def _convert_reduce(self, relay_op, op):

return out

def _convert_reduce_min(self, op):
def convert_reduce_min(self, op):
return self._convert_reduce(_op.reduce.min, op)

def _convert_reduce_max(self, op):
def convert_reduce_max(self, op):
return self._convert_reduce(_op.reduce.max, op)

def _convert_reduce_mean(self, op):
def convert_reduce_mean(self, op):
return self._convert_reduce(_op.reduce.mean, op)

def _convert_reduce_prod(self, op):
def convert_reduce_prod(self, op):
return self._convert_reduce(_op.reduce.prod, op)

def _convert_reduce_sum(self, op):
def convert_reduce_sum(self, op):
return self._convert_reduce(_op.reduce.sum, op)

def _convert_reduce_any(self, op):
def convert_reduce_any(self, op):
return self._convert_reduce(_op.reduce.any, op)

def convert_fully_connected(self, op):
Expand Down Expand Up @@ -1697,6 +1699,18 @@ def convert_slice(self, op):

return out

def convert_select(self, op):
"""Convert TFLite SELECT"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be == 3"
cond = self.get_tensor_or_const_expr(input_tensors[0])
x = self.get_tensor_or_const_expr(input_tensors[1])
y = self.get_tensor_or_const_expr(input_tensors[2])

out = _op.where(cond, x, y)

return out

def convert_transpose(self, op):
"""transpose implementation."""
input_tensors = self.get_input_tensors(op)
Expand Down Expand Up @@ -2357,6 +2371,20 @@ def get_expr(self, input_tensor_idx):
def has_expr(self, input_tensor_idx):
return self.exp_tab.has_expr(get_tensor_name(self.subgraph, input_tensor_idx))

def get_tensor_or_const_expr(self, tensor):
siju-samuel marked this conversation as resolved.
Show resolved Hide resolved
""" Returns constant expr for constant else a tensor expr"""
if self.has_expr(tensor.tensor_idx):
# In most cases, we can assume that TOCO fuses elemwise operators
# with constants - it means both will be tensors.
expr = self.get_expr(tensor.tensor_idx)
else:
# However, in some corner cases, the elemwise operator is not fused,
# we can receive as constant.
type_str = self.get_tensor_type_str(tensor.tensor.Type())
expr = self.exp_tab.new_const(self.get_tensor_value(tensor), dtype=type_str)

return expr


def get_scalar_from_constant(expr):
""" Returns scalar value from Relay constant scalar. """
Expand Down
22 changes: 22 additions & 0 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1376,6 +1376,27 @@ def test_all_reduce():


#######################################################################
# Select, Where
# -------------

def test_forward_select():
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(
tf.int32, shape=[1, 4, 4, 3], name='input1')
input2 = tf.placeholder(
tf.int32, shape=[1, 4, 4, 3], name='input2')
mask = input1 > input2
out = tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(
0, 10, size=(1, 4, 4, 3)).astype("int32")
in_data2 = np.random.uniform(
0, 10, size=(1, 4, 4, 3)).astype("int32")

compare_tflite_with_tvm([in_data1, in_data2], [
'input1:0', 'input2:0'], [input1, input2], [out])


# Squeeze
# -------

Expand Down Expand Up @@ -2014,6 +2035,7 @@ def test_forward_mediapipe_hand_landmark():
test_forward_stridedslice()
test_forward_depthtospace()
test_forward_spacetodepth()
test_forward_select()

# NN
test_forward_convolution()
Expand Down