From f4ada75e3b5db8548fa10bccc1209d54ade6cd84 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Wed, 16 Dec 2020 21:47:31 +0000 Subject: [PATCH 1/9] Add unit tests for onnx export of transpose, expand_dims and broadcast_add operators. --- tests/python-pytest/onnx/test_operators.py | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index 0ed1188dc9f6..1ea1391278fa 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -205,3 +205,28 @@ def test_onnx_export_fully_connected(tmp_path, dtype, num_hidden, no_bias, flatt if not no_bias: args.append(mx.nd.random.uniform(0,1,(num_hidden,))) op_export_test('FullyConnected', M, args, tmp_path) + + +@pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize('axes', [None, [1,0,2]]) +def test_onnx_export_transpose(tmp_path, dtype, axes): + if axes != None: + M = def_model('transpose', axes=axes) + else: + M = def_model('transpose') + x = mx.nd.array([[[1,2],[3,4]],[[5,6],[7,8]]], dtype=dtype) + op_export_test('transpose', M, [x], tmp_path) + +@pytest.mark.parametrize('dtype', ['float32', 'float64']) +@pytest.mark.parametrize('axis', [0, 1, 2]) +def test_onnx_export_expand_dims(tmp_path, dtype, axis): + M = def_model('expand_dims', axis=axis) + x = mx.nd.random.uniform(0, 1, (2,3,4), dtype=dtype) + op_export_test('expand_dims', M, [x], tmp_path) + +@pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64']) +def test_onnx_export_broadcast_add(tmp_path, dtype): + M = def_model('broadcast_add') + x = mx.nd.array([[1,1,1],[1,1,1]], dtype=dtype) + y = mx.nd.array([[0],[1]], dtype=dtype) + op_export_test('broadcast_add', M, [x, y], tmp_path) From 3ab0ec2bc17225e88eb391da337a8d41dc10db66 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Wed, 16 Dec 2020 22:12:10 +0000 Subject: [PATCH 2/9] Add unit test for onnx export of stack op. --- tests/python-pytest/onnx/test_operators.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index 1ea1391278fa..0633d366bcd7 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -217,6 +217,7 @@ def test_onnx_export_transpose(tmp_path, dtype, axes): x = mx.nd.array([[[1,2],[3,4]],[[5,6],[7,8]]], dtype=dtype) op_export_test('transpose', M, [x], tmp_path) + @pytest.mark.parametrize('dtype', ['float32', 'float64']) @pytest.mark.parametrize('axis', [0, 1, 2]) def test_onnx_export_expand_dims(tmp_path, dtype, axis): @@ -224,9 +225,21 @@ def test_onnx_export_expand_dims(tmp_path, dtype, axis): x = mx.nd.random.uniform(0, 1, (2,3,4), dtype=dtype) op_export_test('expand_dims', M, [x], tmp_path) + @pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64']) def test_onnx_export_broadcast_add(tmp_path, dtype): M = def_model('broadcast_add') x = mx.nd.array([[1,1,1],[1,1,1]], dtype=dtype) y = mx.nd.array([[0],[1]], dtype=dtype) op_export_test('broadcast_add', M, [x, y], tmp_path) + + +@pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64']) +@pytest.mark.parametrize('axis', [0, 1, -1]) +def test_onnx_export_stack(tmp_path, dtype, axis): + M = def_model('stack', axis=axis) + x = mx.nd.array([1,2,3,4], dtype=dtype) + y = mx.nd.array([5,6,7,8], dtype=dtype) + op_export_test('stack', M, [x, y], tmp_path) + + From 92cebf2b5f799358726a8b139293f61f5024f545 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 01:14:02 +0000 Subject: [PATCH 3/9] Add test for dropout onnx export operator. --- tests/python-pytest/onnx/test_operators.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index 0633d366bcd7..4e63999df9ee 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -243,3 +243,9 @@ def test_onnx_export_stack(tmp_path, dtype, axis): op_export_test('stack', M, [x, y], tmp_path) +@pytest.mark.parametrize('dtype', ['float32', 'float64']) +@pytest.mark.parametrize('p', [0.1, 0.2, 0.5, 0.8]) +def test_onnx_export_dropout(tmp_path, dtype, p): + M = def_model('Dropout', p=p) + x = mx.nd.array([[3,0.5,-0.5,2,7],[2,-0.4,7,3,0.2]], dtype=dtype) + op_export_test('Dropout', M, [x], tmp_path) From 9a11a8ebb08caf664aab89f4e16dfd594582f9f1 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 01:48:19 +0000 Subject: [PATCH 4/9] Parameterize model parameter onnxruntime tests and use tmp_path provided by pytest. --- tests/python-pytest/onnx/test_onnxruntime.py | 103 +++++++++---------- 1 file changed, 47 insertions(+), 56 deletions(-) diff --git a/tests/python-pytest/onnx/test_onnxruntime.py b/tests/python-pytest/onnx/test_onnxruntime.py index 052b24185735..25ef6058cc40 100644 --- a/tests/python-pytest/onnx/test_onnxruntime.py +++ b/tests/python-pytest/onnx/test_onnxruntime.py @@ -21,11 +21,29 @@ import json import os +import pytest import shutil -import tempfile - -def test_cv_model_inference_onnxruntime(): +# images that are tested and their accepted classes +test_images = [ + ['dog.jpg', [242,243]], + ['apron.jpg', [411,578,638,639,689,775]], + ['dolphin.jpg', [2,3,4,146,147,148,395]], + ['hammerheadshark.jpg', [3,4]], + ['lotus.jpg', [723,738,985]] +] + +test_models = [ + 'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25', + 'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25', + 'resnet18_v1', 'resnet18_v2', 'resnet34_v1', 'resnet34_v2', 'resnet50_v1', 'resnet50_v2', + 'resnet101_v1', 'resnet101_v2', 'resnet152_v1', 'resnet152_v2', + 'squeezenet1.0', 'squeezenet1.1', + 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn' +] + +@pytest.mark.parametrize('model', test_models) +def test_cv_model_inference_onnxruntime(tmp_path, model): def get_gluon_cv_model(model_name, tmp): tmpfile = os.path.join(tmp, model_name) ctx = mx.cpu(0) @@ -64,66 +82,39 @@ def softmax(x): e_x = np.exp(x - np.max(x)) return e_x / e_x.sum(axis=0) - def load_imgnet_labels(): - mx.test_utils.download('https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/doc/tutorials/onnx/image_net_labels.json') - return np.array(json.load(open('image_net_labels.json', 'r'))) - - def download_test_images(): - test_images = [ - ['dog.jpg',['boxer']], - ['apron.jpg', ['apron', 'maillot']], - ['dolphin.jpg', ['great white shark','grey whale']], - ['hammerheadshark.jpg', ['tiger shark']], - ['lotus.jpg', ['pinwheel','pot']] - ] + def load_imgnet_labels(tmpdir): + tmpfile = os.path.join(tmpdir, 'image_net_labels.json') + mx.test_utils.download('https://raw.githubusercontent.com/dmlc/web-data/master/mxnet/doc/tutorials/onnx/image_net_labels.json', + fname=tmpfile) + return np.array(json.load(open(tmpfile, 'r'))) + + def download_test_images(tmpdir): + global test_images for f,_ in test_images: mx.test_utils.download('https://github.com/dmlc/web-data/blob/master/mxnet/doc/tutorials/onnx/images/'+f+'?raw=true', - fname=f) + fname=os.path.join(tmpdir, f)) return test_images - test_models = [ - 'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25', - 'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25', - 'resnet18_v1', 'resnet18_v2', 'resnet34_v1', 'resnet34_v2', 'resnet50_v1', 'resnet50_v2', - 'resnet101_v1', 'resnet101_v2', 'resnet152_v1', 'resnet152_v2', - 'squeezenet1.0', 'squeezenet1.1', - 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn' - ] - labels = load_imgnet_labels() - test_images = download_test_images() - - for model in test_models: - tmpdir = tempfile.mkdtemp() - sym_file, params_file = get_gluon_cv_model(model, tmpdir) - onnx_file = export_model_to_onnx(sym_file, params_file) - #print("exported onnx file: ",onnx_file) - - # create onnxruntime session using the generated onnx file - ses_opt = onnxruntime.SessionOptions() - ses_opt.log_severity_level = 3 - session = onnxruntime.InferenceSession(onnx_file, ses_opt) - input_name = session.get_inputs()[0].name - - for img,classes in test_images: - img_data = normalize_image(img) - raw_result = session.run([], {input_name: img_data}) - res = softmax(np.array(raw_result)).tolist() - class_idx = np.argmax(res) - #print("Image top classification:",labels[class_idx]) - sort_idx = np.flip(np.squeeze(np.argsort(res))) - #print("\tTop labels: " + ",".join(labels[sort_idx[:5]])) - correct_classification = False - for label in labels[sort_idx[:5]]: - for c in classes: - if c in label: - correct_classification = True - assert correct_classification == True - - # cleanup - shutil.rmtree(tmpdir) + #labels = load_imgnet_labels(tmp_path) + test_images = download_test_images(tmp_path) + sym_file, params_file = get_gluon_cv_model(model, tmp_path) + onnx_file = export_model_to_onnx(sym_file, params_file) + + # create onnxruntime session using the generated onnx file + ses_opt = onnxruntime.SessionOptions() + ses_opt.log_severity_level = 3 + session = onnxruntime.InferenceSession(onnx_file, ses_opt) + input_name = session.get_inputs()[0].name + for img, accepted_ids in test_images: + img_data = normalize_image(os.path.join(tmp_path,img)) + raw_result = session.run([], {input_name: img_data}) + res = softmax(np.array(raw_result)).tolist() + class_idx = np.argmax(res) + assert(class_idx in accepted_ids) + shutil.rmtree(tmp_path) if __name__ == "__main__": From 183a4b7b316f43931d7239064b17a453e36a1d50 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 01:51:34 +0000 Subject: [PATCH 5/9] Remove main block. --- tests/python-pytest/onnx/test_onnxruntime.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/python-pytest/onnx/test_onnxruntime.py b/tests/python-pytest/onnx/test_onnxruntime.py index 25ef6058cc40..f332df85c14b 100644 --- a/tests/python-pytest/onnx/test_onnxruntime.py +++ b/tests/python-pytest/onnx/test_onnxruntime.py @@ -117,6 +117,4 @@ def download_test_images(tmpdir): shutil.rmtree(tmp_path) -if __name__ == "__main__": - test_cv_model_inference_onnxruntime() From 18c51c7065ed9ee636d59aa29600a4e20ab4deb0 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 06:30:49 +0000 Subject: [PATCH 6/9] Convert PosixPath to string so os.path.join works. --- tests/python-pytest/onnx/test_onnxruntime.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python-pytest/onnx/test_onnxruntime.py b/tests/python-pytest/onnx/test_onnxruntime.py index f332df85c14b..358d87658b05 100644 --- a/tests/python-pytest/onnx/test_onnxruntime.py +++ b/tests/python-pytest/onnx/test_onnxruntime.py @@ -96,6 +96,7 @@ def download_test_images(tmpdir): return test_images + tmp_path = str(tmp_path) #labels = load_imgnet_labels(tmp_path) test_images = download_test_images(tmp_path) sym_file, params_file = get_gluon_cv_model(model, tmp_path) From cbfb6f47d056dcbb6cf363bbca7435bf93601a5a Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 06:39:15 +0000 Subject: [PATCH 7/9] Expand tests for stack. --- tests/python-pytest/onnx/test_operators.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/python-pytest/onnx/test_operators.py b/tests/python-pytest/onnx/test_operators.py index 4e63999df9ee..83b74d73af5a 100644 --- a/tests/python-pytest/onnx/test_operators.py +++ b/tests/python-pytest/onnx/test_operators.py @@ -235,11 +235,15 @@ def test_onnx_export_broadcast_add(tmp_path, dtype): @pytest.mark.parametrize('dtype', ['float32', 'float64', 'int32', 'int64']) -@pytest.mark.parametrize('axis', [0, 1, -1]) +@pytest.mark.parametrize('axis', [0, 1, 2, -1]) def test_onnx_export_stack(tmp_path, dtype, axis): M = def_model('stack', axis=axis) - x = mx.nd.array([1,2,3,4], dtype=dtype) - y = mx.nd.array([5,6,7,8], dtype=dtype) + if 'int' in dtype: + x = mx.nd.random.randint(0, 10*9, (3,4,5), dtype=dtype) + y = mx.nd.random.randint(0, 10*9, (3,4,5), dtype=dtype) + else: + x = mx.nd.random.normal(0, 10*9, (3,4,5), dtype=dtype) + y = mx.nd.random.normal(0, 10*9, (3,4,5), dtype=dtype) op_export_test('stack', M, [x, y], tmp_path) From f5ae82a3b2aa845d6fc9cd8e027754248865d4e5 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 18:45:55 +0000 Subject: [PATCH 8/9] Assign name to output node ini layer_norm. --- python/mxnet/contrib/onnx/mx2onnx/_op_translations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py index e19cbc5726f6..b2d9e9838bd6 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py @@ -2298,7 +2298,7 @@ def convert_layer_norm(node, **kwargs): if axes == -1: nodes += [ make_node("Mul", [name+"_div0_out", input_nodes[1]], [name+"_mul0_out"]), - make_node("Add", [name+"_mul0_out", input_nodes[2]], [name]) + make_node("Add", [name+"_mul0_out", input_nodes[2]], [name], name=name) ] else: nodes += [ From 7616a68cc1dabe94cf846de890328f15c8bae592 Mon Sep 17 00:00:00 2001 From: Joe Evans Date: Thu, 17 Dec 2020 18:49:50 +0000 Subject: [PATCH 9/9] Refactor transpose export op and helper function, make sure we return all nodes created in export operators. --- .../contrib/onnx/mx2onnx/_op_translations.py | 122 +++++++----------- 1 file changed, 49 insertions(+), 73 deletions(-) diff --git a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py index b2d9e9838bd6..f59bb4652b17 100644 --- a/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py +++ b/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py @@ -337,7 +337,7 @@ def convert_fully_connected(node, **kwargs): in_nodes = [input_nodes[0], input_nodes[1]] if no_bias: - create_const_scalar_node(name+"_bias", np.array([0], dtype=dtype), kwargs) + nodes.append(create_const_scalar_node(name+"_bias", np.array([0], dtype=dtype), kwargs)) in_nodes.append(name+"_bias") else: in_nodes.append(input_nodes[2]) @@ -547,9 +547,8 @@ def convert_pad(node, **kwargs): return [node] -def create_helper_trans_node(op_name, input_node, node_name): +def create_helper_trans_node(node_name, input_node): """create extra transpose node for dot operator""" - node_name = op_name + "_" + node_name trans_node = onnx.helper.make_node( 'Transpose', inputs=[input_node], @@ -565,39 +564,26 @@ def convert_dot(node, **kwargs): MatMul and Transpose operators based on the values set for transpose_a, transpose_b attributes.""" name, input_nodes, attrs = get_inputs(node, kwargs) - input_node_a = input_nodes[0] - input_node_b = input_nodes[1] - - trans_a_node = None - trans_b_node = None trans_a = get_boolean_attribute_value(attrs, "transpose_a") trans_b = get_boolean_attribute_value(attrs, "transpose_b") - op_name = "transpose" + str(kwargs["idx"]) - + nodes = [] + input_nodes = [] if trans_a: - trans_a_node = create_helper_trans_node(op_name, input_nodes[0], 'a') - input_node_a = op_name+"_a" - if trans_b: - trans_b_node = create_helper_trans_node(op_name, input_nodes[1], 'b') - input_node_b = op_name+"_b" - - matmul_node = onnx.helper.make_node( - 'MatMul', - inputs=[input_node_a, input_node_b], - outputs=[name], - name=name - ) + nodes.append(create_helper_trans_node(name+"_a", input_nodes[0])) + input_nodes.append(name+"_a") + else: + input_nodes.append(input_nodes[0]) - if not trans_a and not trans_b: - return [matmul_node] - elif trans_a and not trans_b: - return [trans_a_node, matmul_node] - elif trans_b and not trans_a: - return [trans_b_node, matmul_node] + if trans_b: + nodes.append(create_helper_trans_node(name+"_b", input_nodes[1])) + input_nodes.append(name+"_b") else: - return [trans_a_node, trans_b_node, matmul_node] + input_nodes.append(input_nodes[1]) + + nodes.appennd(onnx.helper.make_node('MatMul', input_nodes, [name], name=name)) + return nodes @mx_op.register("_linalg_gemm2") @@ -1607,24 +1593,12 @@ def convert_cast(node, **kwargs): """ name, input_nodes, attrs = get_inputs(node, kwargs) - dtype = attrs["dtype"] - - # dtype can be mapped only with types from TensorProto - # float32 is mapped to float and float64 to double in onnx - # following tensorproto mapping https://github.com/onnx/onnx/blob/master/onnx/mapping.py - if dtype == 'float32': - dtype = 'float' - elif dtype == 'float64': - dtype = 'double' - - node = onnx.helper.make_node( - "Cast", - input_nodes, - [name], - to=getattr(onnx.TensorProto, dtype.upper()), - name=name, - ) - return [node] + dtype = attrs.get('dtype') + to_dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)] + nodes = [ + onnx.helper.make_node("Cast", input_nodes, [name], to=to_dtype, name=name) + ] + return nodes @mx_op.register("slice_axis") @@ -2277,15 +2251,15 @@ def convert_layer_norm(node, **kwargs): axes = int(attrs.get('axis', -1)) eps = attrs.get('eps', 9.99999975e-06) - create_tensor([axes], name+"_axes", kwargs["initializer"]) - create_tensor([axes+1], name+"_axes+1", kwargs["initializer"]) - create_tensor([], name+"_void", kwargs["initializer"]) - create_const_scalar_node(name+'_0_s', np.int64(0), kwargs) - create_const_scalar_node(name+'_1_s', np.int64(1), kwargs) - create_const_scalar_node(name+"_2_s", np.int64(2), kwargs) - create_const_scalar_node(name+"_eps", np.float32(eps), kwargs) nodes = [ + create_tensor([axes], name+"_axes", kwargs["initializer"]), + create_tensor([axes+1], name+"_axes+1", kwargs["initializer"]), + create_tensor([], name+"_void", kwargs["initializer"]), + create_const_scalar_node(name+'_0_s', np.int64(0), kwargs), + create_const_scalar_node(name+'_1_s', np.int64(1), kwargs), + create_const_scalar_node(name+"_2_s", np.int64(2), kwargs), + create_const_scalar_node(name+"_eps", np.float32(eps), kwargs), make_node("ReduceMean", [input_nodes[0]], [name+"_rm0_out"], axes=[axes]), make_node("Sub", [input_nodes[0], name+"_rm0_out"], [name+"_sub0_out"]), make_node("Pow", [name+"_sub0_out", name+"_2_s"], [name+"_pow0_out"]), @@ -2399,19 +2373,19 @@ def convert_contrib_interleaved_matmul_selfatt_valatt(node, **kwargs): att = input_nodes[1] num_heads = int(attrs.get('heads')) - create_tensor([num_heads], name+"_const_num_heads", kwargs["initializer"]) - create_tensor([0], name+"_const_0", kwargs["initializer"]) - create_tensor([1], name+"_const_1", kwargs["initializer"]) - create_tensor([2], name+"_const_2", kwargs["initializer"]) - create_tensor([3], name+"_const_3", kwargs["initializer"]) - create_tensor([4], name+"_const_4", kwargs["initializer"]) - create_tensor([5], name+"_const_5", kwargs["initializer"]) - create_tensor([0, 0, num_heads, 3, -1], name+"_reshape0_shape", kwargs["initializer"]) - create_tensor([0, 0, 0, 2, 0], name+"_slice_start", kwargs["initializer"]) - create_tensor([0, 0, 0, -1], name+"_reshape1_shape", kwargs["initializer"]) - create_tensor([0, 0, -1], name+"_reshape4_shape", kwargs["initializer"]) nodes = [ + create_tensor([num_heads], name+"_const_num_heads", kwargs["initializer"]), + create_tensor([0], name+"_const_0", kwargs["initializer"]), + create_tensor([1], name+"_const_1", kwargs["initializer"]), + create_tensor([2], name+"_const_2", kwargs["initializer"]), + create_tensor([3], name+"_const_3", kwargs["initializer"]), + create_tensor([4], name+"_const_4", kwargs["initializer"]), + create_tensor([5], name+"_const_5", kwargs["initializer"]), + create_tensor([0, 0, num_heads, 3, -1], name+"_reshape0_shape", kwargs["initializer"]), + create_tensor([0, 0, 0, 2, 0], name+"_slice_start", kwargs["initializer"]), + create_tensor([0, 0, 0, -1], name+"_reshape1_shape", kwargs["initializer"]), + create_tensor([0, 0, -1], name+"_reshape4_shape", kwargs["initializer"]), make_node("Shape", [qkv], [name+"_shape_qkv"]), make_node("Slice", [name+"_shape_qkv", name+"_const_0", name+"_const_1"], [name+"_qkv_d0"]), make_node("Slice", [name+"_shape_qkv", name+"_const_1", name+"_const_2"], [name+"_qkv_d1"]), @@ -2636,13 +2610,15 @@ def convert_arange_like(node, **kwargs): if repeat != 1: raise NotImplementedError("arange_like operator with repeat != 1 not yet implemented.") - create_const_scalar_node(name+"_start", np.array([start], dtype=dtype), kwargs) - create_const_scalar_node(name+"_step", np.array([step], dtype=dtype), kwargs) - create_const_scalar_node(name+"_half_step", np.array([float(step)*0.5], dtype=dtype), kwargs) - create_tensor([], name+'_void', kwargs["initializer"]) + nodes = [ + create_const_scalar_node(name+"_start", np.array([start], dtype=dtype), kwargs), + create_const_scalar_node(name+"_step", np.array([step], dtype=dtype), kwargs), + create_const_scalar_node(name+"_half_step", np.array([float(step)*0.5], dtype=dtype), kwargs), + create_tensor([], name+'_void', kwargs["initializer"]) + ] if axis == 'None': # output will be same shape as input - nodes = [ + nodes += [ make_node('Shape', [input_nodes[0]], [name+"_shape0_out"]), make_node("ReduceProd", [name+"_shape0_out"], [name+"_redprod0_out"]), make_node('Reshape', [name+'_redprod0_out', name+'_void'], [name+'_reshape0_out']), @@ -2655,9 +2631,9 @@ def convert_arange_like(node, **kwargs): ] else: # determine shape of axis - create_tensor([int(axis)], name+"_axis_start", kwargs["initializer"], dtype='int64') - create_tensor([int(axis)+1], name+"_axis_end", kwargs["initializer"], dtype='int64') - nodes = [ + nodes += [ + create_tensor([int(axis)], name+"_axis_start", kwargs["initializer"], dtype='int64'), + create_tensor([int(axis)+1], name+"_axis_end", kwargs["initializer"], dtype='int64'), make_node('Shape', [input_nodes[0]], [name+"_shape0_out"]), make_node('Slice', [name+"_shape0_out", name+"_axis_start", name+"_axis_end"], [name+"_slice0_out"]), make_node("ReduceProd", [name+"_slice0_out"], [name+"_reprod0_out"]),