Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CODEGEN][CONTRIB] Various update for CoreML codegen #5934

Merged
merged 2 commits into from
Jun 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions apps/ios_rpc/tvmrpc/TVMRuntime.mm
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "../../../src/runtime/dso_library.cc"
#include "../../../src/runtime/file_util.cc"
#include "../../../src/runtime/library_module.cc"
#include "../../../src/runtime/metadata_module.cc"
#include "../../../src/runtime/module.cc"
#include "../../../src/runtime/ndarray.cc"
#include "../../../src/runtime/object.cc"
Expand Down
42 changes: 32 additions & 10 deletions python/tvm/contrib/target/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,27 @@ def _convert_batch_flatten(builder, name, inputs, outputs, args, attrs):
output_name=outputs[0]
)

def _convert_expand_dims(builder, name, inputs, outputs, args, attrs):
if attrs.axis >= 0:
axes = list(range(attrs.axis, attrs.axis+attrs.num_newaxis))
else:
axes = list(range(attrs.axis-attrs.num_newaxis+1, attrs.axis+1))

builder.add_expand_dims(
name=name,
input_name=inputs[0],
output_name=outputs[0],
axes=axes
)

def _convert_relu(builder, name, inputs, outputs, args, attrs):
builder.add_activation(
name=name,
non_linearity='RELU',
input_name=inputs[0],
output_name=outputs[0]
)

def _convert_softmax(builder, name, inputs, outputs, args, attrs):
builder.add_softmax_nd(
name=name,
Expand Down Expand Up @@ -111,6 +132,8 @@ def _convert_global_avg_pool2d(builder, name, inputs, outputs, args, attrs):
'add' : _convert_add,
'multiply' : _convert_multiply,
'clip' : _convert_clip,
'expand_dims' : _convert_expand_dims,
'nn.relu' : _convert_relu,
'nn.batch_flatten' : _convert_batch_flatten,
'nn.softmax' : _convert_softmax,
'nn.conv2d' : _convert_conv2d,
Expand Down Expand Up @@ -207,20 +230,19 @@ def compile(self, out_dir):


@tvm._ffi.register_func("relay.ext.coremlcompiler")
def coreml_compiler(ref):
def coreml_compiler(func):
"""
Create a CoreML runtime from a Relay module.
"""
assert isinstance(func, tvm.relay.function.Function)
model_dir = os.getcwd()
if isinstance(ref, tvm.ir.module.IRModule):
for var, func in ref.functions.items():
name = var.name_hint
builder = CodegenCoreML(name, func)
builder.visit(func.body)
mlmodelc_path = "{}/{}.mlmodelc".format(model_dir, name)
if os.path.exists(mlmodelc_path):
shutil.rmtree(mlmodelc_path)
builder.compile(model_dir)
name = str(func.attrs.global_symbol)
builder = CodegenCoreML(name, func)
builder.visit(func.body)
mlmodelc_path = "{}/{}.mlmodelc".format(model_dir, name)
if os.path.exists(mlmodelc_path):
shutil.rmtree(mlmodelc_path)
builder.compile(model_dir)

ctx = tvm.cpu(0)
return coreml_runtime.create(model_dir, ctx).module
12 changes: 10 additions & 2 deletions src/runtime/contrib/coreml/coreml_runtime.mm
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@
} else if (dtype == DataType::Float(32)) {
dataType = MLMultiArrayDataTypeFloat32;
size *= sizeof(float);
} else if (dtype == DataType::Int(32)) {
dataType = MLMultiArrayDataTypeInt32;
size *= sizeof(int);
} else {
LOG(FATAL) << "unsupported data type " << dtype;
return;
Expand Down Expand Up @@ -88,6 +91,9 @@
} else if (data_desc.dataType == MLMultiArrayDataTypeFloat32) {
dtype = DataType::Float(32);
size *= sizeof(float);
} else if (data_desc.dataType == MLMultiArrayDataTypeInt32) {
dtype = DataType::Int(32);
size *= sizeof(int);
} else {
LOG(FATAL) << "unexpected data type " << data_desc.dataType;
}
Expand Down Expand Up @@ -135,7 +141,7 @@
PackedFunc CoreMLRuntime::GetFunction(const std::string& name,
const ObjectPtr<Object>& sptr_to_self) {
// Return member functions during query.
if (name == "invoke") {
if (name == "invoke" || name == "run") {
return PackedFunc(
[sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { GetModel("main").Invoke(); });
} else if (name == "set_input") {
Expand All @@ -151,7 +157,7 @@
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
*rv = GetModel("main").GetNumOutputs();
});
} else {
} else if (model_map_.count(name) != 0) {
// Return the packedfunc which executes the subgraph.
return PackedFunc([sptr_to_self, name, this](TVMArgs args, TVMRetValue* rv) {
CoreMLModel& model = GetModel(name);
Expand Down Expand Up @@ -188,6 +194,8 @@
}
*rv = out;
});
} else {
return PackedFunc();
}
}

Expand Down
115 changes: 101 additions & 14 deletions tests/python/contrib/test_coreml_codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,19 +95,6 @@ def test_annotate():
assert tvm.ir.structural_equal(mod, expected, map_free_vars=True)


@mock.patch('tvm.contrib.coreml_runtime.create')
@mock.patch('tvm.contrib.xcode.compile_coreml')
def test_construct_model(m1, m2):
mod = _create_graph_annotated()

fcompile = tvm._ffi.get_global_func("relay.ext.coremlcompiler")

for var, func in mod.functions.items():
if func.attrs and 'Compiler' in func.attrs and \
func.attrs['Compiler'] == 'coremlcompiler':
fcompile(tvm.IRModule.from_expr(func.body))


@pytest.mark.skipif(not _has_xcode(), reason="Xcode is not available")
def test_compile_and_run():
ctx=tvm.cpu()
Expand All @@ -133,7 +120,107 @@ def test_compile_and_run():
tvm.testing.assert_allclose(out.asnumpy(), expected, rtol=tol, atol=tol)


@mock.patch('tvm.contrib.coreml_runtime.create')
@mock.patch('tvm.contrib.xcode.compile_coreml')
def _construct_model(func, m1, m2):
mod = tvm.IRModule()
mod["main"] = func
mod = transform.AnnotateTarget("coremlcompiler")(mod)
mod = transform.PartitionGraph()(mod)

fcompile = tvm._ffi.get_global_func("relay.ext.coremlcompiler")

for var, func in mod.functions.items():
if func.attrs and 'Compiler' in func.attrs and \
func.attrs['Compiler'] == 'coremlcompiler':
fcompile(func)


def test_add():
shape = (10, 10)
x = relay.var('x', shape=shape)
y = x + x
func = relay.Function([x], y)
_construct_model(func)


def test_multiply():
shape = (10, 10)
x = relay.var('x', shape=shape)
y = x * x
func = relay.Function([x], y)
_construct_model(func)


def test_clip():
shape = (10, 10)
x = relay.var('x', shape=shape)
y = relay.clip(x, a_min=0.0, a_max=1.0)
func = relay.Function([x], y)
_construct_model(func)


def test_batch_flatten():
shape = (10, 10, 10)
x = relay.var('x', shape=shape)
y = relay.nn.batch_flatten(x)
func = relay.Function([x], y)
_construct_model(func)


def test_expand_dims():
shape = (10, 10)
x = relay.var('x', shape=shape)
y = relay.expand_dims(x, axis=0)
func = relay.Function([x], y)
_construct_model(func)

y = relay.expand_dims(x, axis=-1)
func = relay.Function([x], y)
_construct_model(func)


def test_relu():
shape = (10, 10)
x = relay.var('x', shape=shape)
y = relay.nn.relu(x)
func = relay.Function([x], y)
_construct_model(func)


def test_softmax():
shape = (10, 10)
x = relay.var('x', shape=shape)
y = relay.nn.softmax(x, axis=1)
func = relay.Function([x], y)
_construct_model(func)


def test_conv2d():
x = relay.var('x', shape=(1,3,224,224))
w = relay.const(np.zeros((16,3,3,3), dtype='float32'))
y = relay.nn.conv2d(x, w, strides=[2, 2], padding=[1, 1, 1, 1], kernel_size=[3, 3])
func = relay.Function([x], y)
_construct_model(func)


def test_global_avg_pool2d():
shape = (10, 10, 10, 10)
x = relay.var('x', shape=shape)
y = relay.nn.global_avg_pool2d(x)
func = relay.Function([x], y)
_construct_model(func)


if __name__ == "__main__":
test_annotate()
test_construct_model()
test_compile_and_run()
test_add()
test_multiply()
test_clip()
test_expand_dims()
test_relu()
test_batch_flatten()
test_softmax()
test_conv2d()
test_global_avg_pool2d()