Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sync dmlc/tvm 20190328 #21

Merged
merged 18 commits into from
Mar 29, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion CONTRIBUTORS.md
Original file line number Diff line number Diff line change
@@ -1,10 +1,21 @@
TVM Contributors
================
TVM adopts the Apache style model and governs by merit. We believe that it is important to create an inclusive community where everyone can use,
TVM adopts the Apache way and governs by merit. We believe that it is important to create an inclusive community where everyone can use,
contribute to, and influence the direction of the project. We actively invite contributors who have earned the merit to be part of the development community.

See the [community structure document](http://docs.tvm.ai/contribute/community.html) for the explanation of community structure and contribution guidelines.

## Mentors

TVM is now part of the Apache Incubator.
We are fortunate to have the following mentors.

- Markus Weimer @markusweimer
- Sebastian Schelter @sscdotopen
- Byung-Gon Chun @bgchun
- Henry Saputra @hsaputra
- Timothy Chen @tnachen
- Furkan KAMACI @kamaci

## Committers

Expand Down
2 changes: 1 addition & 1 deletion Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
//
ci_lint = "tvmai/ci-lint:v0.50"
ci_gpu = "tvmai/ci-gpu:v0.51"
ci_cpu = "tvmai/ci-cpu:v0.41"
ci_cpu = "tvmai/ci-cpu:v0.50"
ci_i386 = "tvmai/ci-i386:v0.50"

// tvm libraries
Expand Down
2 changes: 1 addition & 1 deletion apps/howto_deploy/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,4 @@ lib/cpp_deploy_pack: cpp_deploy.cc lib/test_addone_sys.o lib/libtvm_runtime_pack
# Deploy using pre-built libtvm_runtime.so
lib/cpp_deploy_normal: cpp_deploy.cc lib/test_addone_sys.o
@mkdir -p $(@D)
$(CXX) $(PKG_CFLAGS) -o $@ $^ $(PKG_LDFLAGS) -ltvm_runtime
$(CXX) $(PKG_CFLAGS) -o $@ $^ -ltvm_runtime $(PKG_LDFLAGS)
4 changes: 2 additions & 2 deletions apps/howto_deploy/run_example.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ echo "Build the libraries.."
mkdir -p lib
make
echo "Run the example"
export LD_LIBRARY_PATH=../../lib:${LD_LIBRARY_PATH}
export DYLD_LIBRARY_PATH=../../lib:${DYLD_LIBRARY_PATH}
export LD_LIBRARY_PATH=../../build:${LD_LIBRARY_PATH}
export DYLD_LIBRARY_PATH=../../build:${DYLD_LIBRARY_PATH}

echo "Run the deployment with all in one packed library..."
lib/cpp_deploy_pack
Expand Down
2 changes: 1 addition & 1 deletion cmake/config.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ set(USE_MPS OFF)
set(USE_ROCBLAS OFF)

# Whether use contrib sort
set(USE_SORT OFF)
set(USE_SORT ON)

# Whether use TensorRT
# /path/to/tensorrt that contains include and lib dirs
Expand Down
2 changes: 1 addition & 1 deletion cmake/modules/contrib/BLAS.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ elseif(USE_BLAS STREQUAL "mkl")
if(NOT IS_DIRECTORY ${USE_MKL_PATH})
set(USE_MKL_PATH /opt/intel/mkl)
endif()
find_library(BLAS_LIBRARY mkl_rt ${USE_MKL_PATH}/lib/ ${USE_MKL_PATH}/lib/intel64)
find_library(BLAS_LIBRARY NAMES mkl_rt mklml_gnu HINTS ${USE_MKL_PATH}/lib/ ${USE_MKL_PATH}/lib/intel64)
include_directories(${USE_MKL_PATH}/include)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${BLAS_LIBRARY})
list(APPEND RUNTIME_SRCS ${CBLAS_CONTRIB_SRC})
Expand Down
2 changes: 1 addition & 1 deletion dmlc_tvm_commit_id
Original file line number Diff line number Diff line change
@@ -1 +1 @@
5f37cc1290355c840e36ba3a4a7ff158a97896c5
a0537ecbf80fafee22ea64bb2994332d4488b81f
18 changes: 18 additions & 0 deletions include/tvm/relay/attrs/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,24 @@ struct Conv2DWinogradAttrs : public tvm::AttrsNode<Conv2DWinogradAttrs> {
}
};

/*! \brief Attributes used in winograd weight transformation operators */
struct Conv2DWinogradNNPACKWeightTransformAttrs
: public tvm::AttrsNode<Conv2DWinogradNNPACKWeightTransformAttrs> {
int convolution_algorithm;
DataType out_dtype;

TVM_DECLARE_ATTRS(Conv2DWinogradNNPACKWeightTransformAttrs,
"relay.attrs.Conv2DWinogradNNPACKWeightTransformAttrs") {
TVM_ATTR_FIELD(convolution_algorithm)
.describe(
"The convolution algorithm for Winograd NNPACK. "
"E.g. tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8 for WT_8x8, "
"tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16 for WT_8x8_FP16");
TVM_ATTR_FIELD(out_dtype)
.set_default(NullValue<DataType>())
.describe("Output data type, set to explicit type under mixed precision setting");
}
};

/*! \brief Attributes used in softmax operators */
struct SoftmaxAttrs : public tvm::AttrsNode<SoftmaxAttrs> {
Expand Down
20 changes: 20 additions & 0 deletions nnvm/include/nnvm/top/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,26 @@ struct WinogradWeightTransformParam : public dmlc::Parameter<WinogradWeightTrans
static const constexpr int kWeight = 0;
};

struct WinogradNNPACKWeightTransformParam
: public dmlc::Parameter<WinogradNNPACKWeightTransformParam> {
int convolution_algorithm;
int out_dtype;

DMLC_DECLARE_PARAMETER(WinogradNNPACKWeightTransformParam) {
DMLC_DECLARE_FIELD(convolution_algorithm)
.describe(
"The convolution algorithm for Winograd NNPACK. "
"E.g. tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8 for WT_8x8, "
"tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16 for WT_8x8_FP16");
DMLC_DECLARE_DTYPE_FIELD(out_dtype)
.add_enum("same", -1)
.set_default(-1)
.describe("Output data type, set to explicit type under mixed precision setting");
}

static const constexpr int kWeight = 0;
};

struct WinogradConv2DParam : public dmlc::Parameter<WinogradConv2DParam> {
int channels;
TShape kernel_size;
Expand Down
13 changes: 6 additions & 7 deletions nnvm/python/nnvm/frontend/caffe2.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import absolute_import as _abs
import tvm
from nnvm import symbol as _sym
from nnvm.frontend.common import get_nnvm_op, Renamer, AttrConverter as AttrCvt
from .common import get_nnvm_op
from .onnx_caffe2_utils import dimension_picker, dimension_constraint, infer_channels, revert_caffe2_pad
from . import onnx

Expand Down Expand Up @@ -73,8 +73,8 @@ def get_converter(cls):

if hasattr(cls, '_impl'):
return getattr(cls, '_impl')
raise NotImplementedError('{} not implemented'.format(
cls.__name__))
raise tvm.error.OpNotImplemented(
'Operator {} is not implemented in frontend Caffe2.'.format(cls.__name__))


_caffe2_internal_args = {
Expand Down Expand Up @@ -176,8 +176,7 @@ def _get_axis_from_order_str(order):
return 1
if order == 'NHWC':
return 3
raise RuntimeError(
"Unsupported storage order: {} in caffe2".format(order))
raise tvm.error.OpAttributeInvalid('Value {} in attribute {} of operator {} is not valid.'.format(order, 'order', 'Concat'))

return AttrCvt(
op_name='concatenate',
Expand Down Expand Up @@ -427,8 +426,8 @@ def _convert_operator(self,
# Add a sanitizing step to convert all byte strings in args to strings
sym = convert_map[op_type](inputs, args, self._params)
else:
raise NotImplementedError(
"Operator {} not implemented.".format(op_type))
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend Caffe2.'.format(op_type))
return sym


Expand Down
18 changes: 17 additions & 1 deletion nnvm/python/nnvm/frontend/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,25 @@
def get_nnvm_op(op_name):
op = getattr(_sym, op_name)
if not op:
raise RuntimeError("Unable to map op_name {} to nnvm.sym".format(op_name))
raise OpNotImplemented(
'Operator {} is not supported.'.format(op))
return op

def required_attr(attr, key, op_name):
assert isinstance(attr, dict)
if key not in attr:
raise OpAttributeRequired(
'Required attribute {} not found in operator {}'.format(key, op_name))
return attr[key]

def parse_tshape(tshape):
"""Parse tshape in string."""
return [int(x.strip()) for x in tshape.strip('()').split(',')]

def parse_bool_str(attr, key, default='False'):
"""Parse bool string to boolean."""
return attr.get(key, default).strip().lower() in ['true', '1', 't', 'y', 'yes']

class Renamer(object):
"""A simply renamer for operators.

Expand Down
34 changes: 21 additions & 13 deletions nnvm/python/nnvm/frontend/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@
"""CoreML frontend."""
from __future__ import absolute_import as _abs
import numpy as np

import tvm
from .common import SymbolTable
from .. import symbol as _sym
from .._base import string_types
from .common import SymbolTable

__all__ = ['from_coreml']

Expand Down Expand Up @@ -83,7 +82,8 @@ def BatchnormLayerParams(op, insym, symtab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise NotImplementedError("instance normalization not implemented")
msg = 'Operator "instance normalization" is not supported in frontend CoreML.'
raise tvm.error.OpNotImplemented(msg)
else:
params = {'gamma':symtab.new_const(list(op.gamma.floatValue)),
'beta':symtab.new_const(list(op.beta.floatValue)),
Expand Down Expand Up @@ -136,7 +136,8 @@ def ActivationParams(op, insym, symtab):
betasym = symtab.new_const(beta)
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
_sym.exp(insym), betasym)), alphasym)
raise NotImplementedError('%s not implemented' % whichActivation)
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend CoreML.'.format(whichActivation))

def ScaleLayerParams(op, insym, symtab):
"""Scale layer params."""
Expand All @@ -158,7 +159,8 @@ def PoolingLayerParams(op, insym, symtab):
return _sym.global_max_pool2d(insym)
if op.type == 1:
return _sym.global_avg_pool2d(insym)
raise NotImplementedError("Only max and average pooling implemented")
raise tvm.error.OpNotImplemented(
'Operator pooling (not max or average) is not supported in frontend CoreML.')

else:
params = {'pool_size':list(op.kernelSize),
Expand All @@ -178,7 +180,8 @@ def PoolingLayerParams(op, insym, symtab):
params['padding'] = padding
params['ceil_mode'] = True
else:
raise NotImplementedError("Other convolution padding not implemented")
msg = 'Value {} in attribute PoolingPaddingType of operator Pooling is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(op.WhichOneof('PoolingPaddingType')))

# consume padding layer
if symtab.in_padding:
Expand All @@ -190,7 +193,8 @@ def PoolingLayerParams(op, insym, symtab):
return _sym.max_pool2d(insym, **params)
if op.type == 1:
return _sym.avg_pool2d(insym, **params)
raise NotImplementedError("Only max and average pooling implemented")
msg = 'Operator pooling (not max or average) is not supported in frontend CoreML.'
raise tvm.error.OpNotImplemented(msg)

def SoftmaxLayerParams(op, insym, symtab):
return _sym.softmax(_sym.flatten(insym))
Expand Down Expand Up @@ -229,7 +233,8 @@ def ConcatLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
if op.sequenceConcat:
raise NotImplementedError("Sequence Concat not supported")
raise tvm.error.OpNotImplemented(
'Operator Sequence Concat is not supported in frontend CoreML.')
ret = _sym.concatenate(*insyms, axis=1)
return ret

Expand All @@ -243,14 +248,16 @@ def PaddingLayerParams(op, insym, symtab):
if op.WhichOneof('PaddingType') == 'constant':
constant = op.constant
if constant.value != 0:
raise NotImplementedError("Padding value {} not supported.".format(constant.value))
msg = 'Value {} in attribute "padding value" of operator Padding is not valid.'
raise tvm.error.OpAttributeInvalid(msg.format(constant.value))
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
symtab.set_padding(padding)
else:
raise NotImplementedError("Only constant padding is supported now.")
raise tvm.error.OpNotImplemented(
'Operator "non-constant padding" is not supported in frontend CoreML.')
return insym

def PermuteLayerParams(op, insym, symtab):
Expand All @@ -259,8 +266,8 @@ def PermuteLayerParams(op, insym, symtab):

def UpsampleLayerParams(op, insym, symtab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise NotImplementedError("Upsampling only supported with same \
height and width scaling factor.")
raise tvm.error.OpAttributeInvalid(
'Height and width scaling factors of Upsample operator must be equal.')
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
return _sym.upsampling(insym, scale=op.scalingFactor[0], method=interpolationMode)

Expand Down Expand Up @@ -341,7 +348,8 @@ def coreml_op_to_nnvm(op, inname, outname, symtab):
"""
classname = type(op).__name__
if classname not in _convert_map:
raise NotImplementedError("%s is not supported" % (classname))
raise tvm.error.OpNotImplemented(
'Operator {} is not supported in frontend CoreML.'.format(classname))
if isinstance(inname, string_types):
insym = symtab.get_var(inname)
else:
Expand Down
Loading