Skip to content

Commit

Permalink
Make minor codestyle changes
Browse files Browse the repository at this point in the history
* change operators' names to uppercase in the error messages
* remove a redundant line
* reorder functions in parser and tests for squared_difference op
  • Loading branch information
inadob committed Jan 20, 2020
1 parent cd42c14 commit 12efe01
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 27 deletions.
34 changes: 17 additions & 17 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ def convert_sub(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized sub operator is not supported yet.')
'TFlite quantized SUB operator is not supported yet.')
return self._convert_elemwise(_op.subtract, op)

def convert_mul(self, op):
Expand All @@ -710,48 +710,48 @@ def convert_div(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized div operator is not supported yet.')
'TFlite quantized DIV operator is not supported yet.')
return self._convert_elemwise(_op.divide, op)

def convert_pow(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized pow operator is not supported yet.')
'TFlite quantized POW operator is not supported yet.')
return self._convert_elemwise(_op.power, op)

def convert_squared_difference(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized SQUARED_DIFFERENCE operator is not supported yet.')
difference = self._convert_elemwise(_op.subtract, op)
# _convert_elemwise has guaranteed only have one output tensor
exp_type = self.get_tensor_type_str(self.get_output_tensors(op)[0].tensor.Type())
out = _op.power(difference, relay.const(2, exp_type))
return out

def convert_maximum(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized maximum operator is not supported yet.')
'TFlite quantized MAXIMUM operator is not supported yet.')
return self._convert_elemwise(_op.maximum, op)

def convert_minimum(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized minimum operator is not supported yet.')
'TFlite quantized MINIMUM operator is not supported yet.')
return self._convert_elemwise(_op.minimum, op)

def convert_greater(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized greater operator is not supported yet.')
'TFlite quantized GREATER operator is not supported yet.')
return self._convert_elemwise(_op.greater, op)

def convert_squared_difference(self, op):
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
'TFlite quantized squared difference operator is not supported yet.')
difference = self._convert_elemwise(_op.subtract, op)
# _convert_elemwise has guaranteed only have one output tensor
exp_type = self.get_tensor_type_str(self.get_output_tensors(op)[0].tensor.Type())
out = _op.power(difference, relay.const(2, exp_type))
return out

def convert_greater_equal(self, op):
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
Expand Down
18 changes: 8 additions & 10 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,6 +843,13 @@ def _test_pow(data):
""" One iteration of power """
return _test_elemwise(math_ops.pow, data)
#######################################################################
# Squared_difference
# ------------------

def _test_squared_difference(data):
""" One iteration of squared difference """
return _test_elemwise(math_ops.squared_difference, data)
#######################################################################
# Maximum
# -------

Expand Down Expand Up @@ -898,15 +905,6 @@ def _test_equal(data):
def _test_not_equal(data):
""" One iteration of not_equal"""
return _test_elemwise(math_ops.not_equal, data)
#######################################################################

#######################################################################
# Squared_difference
# ------------------

def _test_squared_difference(data):
""" One iteration of squared difference """
return _test_elemwise(math_ops.squared_difference, data)

def _test_forward_elemwise(testop):
""" Elewise"""
Expand Down Expand Up @@ -947,10 +945,10 @@ def test_all_elemwise():
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_pow)
_test_forward_elemwise(_test_squared_difference)
_test_forward_elemwise(_test_maximum)
_test_forward_elemwise(_test_minimum)
_test_forward_elemwise(_test_greater)
_test_forward_elemwise(_test_squared_difference)
_test_forward_elemwise(_test_greater_equal)
_test_forward_elemwise(_test_less)
_test_forward_elemwise(_test_less_equal)
Expand Down

0 comments on commit 12efe01

Please sign in to comment.