Skip to content

Commit

Permalink
use latest error handling conventions
Browse files Browse the repository at this point in the history
  • Loading branch information
markrogersjr committed Mar 20, 2019
1 parent e2ecae4 commit 869c894
Show file tree
Hide file tree
Showing 18 changed files with 314 additions and 227 deletions.
7 changes: 2 additions & 5 deletions nnvm/python/nnvm/frontend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,5 @@
from .caffe2 import from_caffe2
from .common import raise_not_supported, get_nnvm_op, required_attr, \
warn_not_used, parse_tshape, parse_bool_str
from tvm.error_handling import raise_attribute_required, \
raise_attribute_invalid, \
raise_operator_unimplemented, \
raise_attribute_unimplemented, \
warn_not_used
from tvm.error import OpError, OpNotImplemented, OpAttributeRequired, \
OpAttributeInvalid, OpAttributeUnimplemented
8 changes: 5 additions & 3 deletions nnvm/python/nnvm/frontend/caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ def get_converter(cls):

if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
raise_operator_unimplemented(cls.__name__)
raise OpNotImplemented(
'Operator {} is not implemented in frontend Caffe2.'.format(cls.__name__))


_caffe2_internal_args = {
Expand Down Expand Up @@ -175,7 +176,7 @@ def _get_axis_from_order_str(order):
return 1
if order == 'NHWC':
return 3
raise_attribute_invalid(order, 'storage order', 'concat')
raise OpAttributeInvalid('Value {} in attribute {} of operator {} is not valid.'.format(order, 'order', 'Concat'))

return AttrCvt(
op_name='concatenate',
Expand Down Expand Up @@ -425,7 +426,8 @@ def _convert_operator(self,
# Add a sanitizing step to convert all byte strings in args to strings
sym = convert_map[op_type](inputs, args, self._params)
else:
raise_operator_unimplemented(op_type)
raise OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(op_type))
return sym


Expand Down
6 changes: 4 additions & 2 deletions nnvm/python/nnvm/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@
def get_nnvm_op(op_name):
op = getattr(_sym, op_name)
if not op:
raise_operator_unimplemented(op_name)
raise OpNotImplemented(
'Operator {} is not supported.'.format(op))
return op

def required_attr(attr, key, op_name):
assert isinstance(attr, dict)
if key not in attr:
raise_attribute_required(key, op_name)
raise OpAttributeRequired(
'Required attribute {} not found in operator {}'.format(key, op_name))
return attr[key]

def parse_tshape(tshape):
Expand Down
29 changes: 17 additions & 12 deletions nnvm/python/nnvm/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def BatchnormLayerParams(op, insym, symtab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise_operator_unimplemented('instance normalization')
raise OpNotImplemented('Operator "instance normalization" is not supported in frontend CoreML.')
else:
params = {'gamma':symtab.new_const(list(op.gamma.floatValue)),
'beta':symtab.new_const(list(op.beta.floatValue)),
Expand Down Expand Up @@ -136,7 +136,8 @@ def ActivationParams(op, insym, symtab):
betasym = symtab.new_const(beta)
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
_sym.exp(insym), betasym)), alphasym)
raise_operator_unimplemented(whichActivation)
raise OpNotImplemented(
'Operator {} is not supported in frontend CoreML.'.format(whichActivation))

def ScaleLayerParams(op, insym, symtab):
"""Scale layer params."""
Expand All @@ -158,7 +159,8 @@ def PoolingLayerParams(op, insym, symtab):
return _sym.global_max_pool2d(insym)
if op.type == 1:
return _sym.global_avg_pool2d(insym)
raise_operator_unimplemented('pooling (not max or average)')
raise OpNotImplemented(
'Operator pooling (not max or average) is not supported in frontend CoreML.')

else:
params = {'pool_size':list(op.kernelSize),
Expand All @@ -178,8 +180,7 @@ def PoolingLayerParams(op, insym, symtab):
params['padding'] = padding
params['ceil_mode'] = True
else:
raise_attribute_invalid(op.WhichOneof('PoolingPaddingType'),
'PoolingPaddingType', 'pooling')
raise OpAttributeInvalid('Value {} in attribute PoolingPaddingType of operator {} is not valid.'.format(op.WhichOneOf('PoolingPaddingType'), 'pooling'))

# consume padding layer
if symtab.in_padding:
Expand All @@ -191,7 +192,7 @@ def PoolingLayerParams(op, insym, symtab):
return _sym.max_pool2d(insym, **params)
if op.type == 1:
return _sym.avg_pool2d(insym, **params)
raise_operator_unimplemented('pooling (not max or average)')
raise OpNotImplemented('Operator pooling (not max or average) is not supported in frontend CoreML.')

def SoftmaxLayerParams(op, insym, symtab):
return _sym.softmax(_sym.flatten(insym))
Expand Down Expand Up @@ -230,7 +231,8 @@ def ConcatLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
if op.sequenceConcat:
raise_operator_unimplemented('sequence concat')
raise OpNotImplemented(
'Operator Sequence Concat is not supported in frontend CoreML.')
ret = _sym.concatenate(*insyms, axis=1)
return ret

Expand All @@ -244,14 +246,16 @@ def PaddingLayerParams(op, insym, symtab):
if op.WhichOneof('PaddingType') == 'constant':
constant = op.constant
if constant.value != 0:
raise_attribute_invalid(constant.value, 'padding value', 'padding')
raise OpAttributeInvalid(
'Value {} in attribute "padding value" of operator Padding is not valid.'.format(constant.value))
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
symtab.set_padding(padding)
else:
raise_operator_unimplemented('non-constant padding')
raise OpNotImplemented(
'Operator "non-constant padding" is not supported in frontend CoreML.')
return insym

def PermuteLayerParams(op, insym, symtab):
Expand All @@ -260,8 +264,8 @@ def PermuteLayerParams(op, insym, symtab):

def UpsampleLayerParams(op, insym, symtab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise_attribute_invalid(op.scalingFactor, 'scaling factors',
'upsample')
raise OpAttributeInvalid(
'Height and width scaling factors of Upsample operator must be equal.')
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
return _sym.upsampling(insym, scale=op.scalingFactor[0], method=interpolationMode)

Expand Down Expand Up @@ -342,7 +346,8 @@ def coreml_op_to_nnvm(op, inname, outname, symtab):
"""
classname = type(op).__name__
if classname not in _convert_map:
raise_operator_unimplemented(classname)
raise OpNotImplemented(
'Operator {} is not supported in frontend CoreML.'.format(classname))
if isinstance(inname, string_types):
insym = symtab.get_var(inname)
else:
Expand Down
45 changes: 29 additions & 16 deletions nnvm/python/nnvm/frontend/darknet.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ def _darknet_maxpooling(inputs, attrs):
"""Process the max pool 2d operation."""
kernel = parse_tshape(required_attr(attrs, 'kernel', 'maxpool'))
if len(kernel) != 1:
raise_attribute_unimplemented('non-2d kernel', 'pool_2d')
raise OpAttributeUnimplemented(
'Non-2D kernels for Max Pooling are not supported in frontend Darknet.')

op_name, new_attrs = 'max_pool2d', {}
strides = int(attrs.get('stride', (1, 1)))
Expand All @@ -79,7 +80,8 @@ def _darknet_avgpooling(inputs, attrs):
"""Process the average pool 2d operation."""
kernel = parse_tshape(required_attr(attrs, 'kernel', 'avgpool'))
if len(kernel) != 1:
raise_attribute_unimplemented('non-2d kernel', 'pool_2d')
raise OpAttributeUnimplemented(
'Non-2D kernels for Average Pooling are not supported in frontend Darknet.')

op_name, new_attrs = 'avg_pool2d', {}
strides = int(attrs.get('stride', (1, 1)))
Expand All @@ -103,10 +105,11 @@ def _darknet_conv2d(inputs, attrs):
"""Process the convolution 2d operation."""
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d'))
if len(kernel) != 1:
raise_attribute_unimplemented('non 2d kernel', 'conv2d')
raise OpAttributeUnimplemented('Non-2D kernels for Conv2D are unsupported in frontend Darknet.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise_attribute_invalid(layout, 'layout', 'conv2d')
raise OpAttributeInvalid(
'Value {} in attribute "layout" of operator Conv2D is not valid.'.format(layout))
strides = int(attrs.get('stride', (1, 1)))
pads = int(attrs.get('pad', (0, 0)))

Expand Down Expand Up @@ -142,13 +145,16 @@ def _darknet_conv2d(inputs, attrs):
def _darknet_conv2d_transpose(inputs, attrs):
"""Process the convolution 2d transpose operation."""
if 'target_shape' in attrs:
raise_attribute_unimplemented('target_shape', 'conv2d_transpose')
raise OpAttributeUnimplemented(
'Attribute "target_shape" is not supported in operator Conv2D-transpose.')
kernel = parse_tshape(required_attr(attrs, 'kernel', 'conv2d_transpose'))
if len(kernel) != 2:
raise_attribute_unimplemented('non-2d kernel', 'conv2d_transpose')
raise OpAttributeUnimplemented(
'Non-2D kernels are not supported in operator Conv2D-transpose.')
layout = attrs.get('layout', 'NCHW')
if layout not in ['NCHW', 'NHWC']:
raise_attribute_invalid(layout, 'layout', 'conv2d_transpose')
raise OpAttributeInvalid(
'Value {} in attribute "layout" of operator Conv2D-transpose is not valid.'.format(layout))
op_name, new_attrs = 'conv2d_transpose', {}
new_attrs['channels'] = required_attr(attrs, 'num_filter', 'conv2d_transpose')
new_attrs['kernel_size'] = kernel
Expand Down Expand Up @@ -222,7 +228,8 @@ def _darknet_dropout(inputs, attrs):
def _darknet_reshape(inputs, attrs):
"""Process the reshape operation."""
if parse_bool_str(attrs, 'reverse'):
raise_attribute_unimplemented('reverse', 'reshape')
raise OpAttributeUnimplemented(
'Attribute "reverse" is not supported in operator Reshape.')
op_name, new_attrs = 'reshape', {}
new_attrs['shape'] = required_attr(attrs, 'shape', 'reshape')
return get_nnvm_op(op_name)(*inputs, **new_attrs), None
Expand Down Expand Up @@ -324,7 +331,8 @@ def _darknet_activations(inputs, attrs):
elif ACTIVATION.ELU == act:
act_type = 'elu'
else:
raise_operator_unimplemented('act: ' + act)
raise OpNotImplemented(
'Operator act: {} is not supported in framework Darknet.'.format(act))

if act_type in ['relu', 'tanh']:
op_name, new_attrs = act_type, {}
Expand All @@ -339,7 +347,8 @@ def _darknet_activations(inputs, attrs):
op_name, new_attrs = act_type, {}
sym = get_nnvm_op(op_name)(*inputs, **new_attrs)
else:
raise_operator_unimplemented('act_type: ' + act_type)
raise OpNotImplemented(
'Operator act: {} is not supported in framework Darknet.'.format(act))
return sym, None

def _darknet_op_not_support(inputs, attrs):
Expand Down Expand Up @@ -402,7 +411,8 @@ def _darknet_convert_symbol(op_name, inputs, attrs):
if op_name in _DARKNET_CONVERT_MAP:
sym, out_name = _DARKNET_CONVERT_MAP[op_name](inputs, attrs)
else:
raise_operator_unimplemented(op_name)
raise OpNotImplemented(
'Operator {} is not supported in frontend Darknet.'.format(op_name))
if out_name is None:
out_name = sym.list_output_names()[0].replace('_output', '')
return out_name, sym
Expand Down Expand Up @@ -449,8 +459,8 @@ def _get_convolution_weights(self, layer, opname):
return

if (layer.n * layer.c * layer.size * layer.size) != layer.nweights:
raise_attribute_invalid(layer.n * layer.c * layer.size * layer.size,
'layer weights size', 'conv2d')
raise OpAttributeInvalid(
'nweights ({}) != n * c * h * w ({}) in operator {}'.format(layer.nweights, layer.n * layer.c * layer.size ** 2, opname))

shape = (layer.n, layer.c, layer.size, layer.size)
weights = self._read_memory_buffer(shape, layer.weights)
Expand Down Expand Up @@ -630,7 +640,8 @@ def _get_darknet_attrs(self, layer, layer_num):
pass

else:
raise_operator_unimplemented(layer.type)
raise OpNotImplemented(
'Operator {} is not supported in frontend Darknet.'.format(layer.type))

return attr

Expand Down Expand Up @@ -763,7 +774,8 @@ def _handle_darknet_rnn_layers(self, layer_num, sym):

elif LAYERTYPE.LSTM == layer.type:
if layer.steps > 1:
raise_attribute_invalid(layer.steps, 'number of steps', 'RNN')
raise OpAttributeInvalid(
'Number of steps {} of RNN is not valid.'.format(layer.steps))

op_name_add = 'elemwise_add'
op_name_mul = 'elemwise_mul'
Expand Down Expand Up @@ -829,7 +841,8 @@ def _handle_darknet_rnn_layers(self, layer_num, sym):

elif LAYERTYPE.GRU == layer.type:
if layer.steps > 1:
raise_attribute_invalid(layer.steps, 'number of steps', 'RNN')
raise OpAttributeInvalid(
'Number of steps {} is not valid in RNN.'.format(layer.steps))

op_name_add = 'elemwise_add'
op_name_mul = 'elemwise_mul'
Expand Down
Loading

0 comments on commit 869c894

Please sign in to comment.