Skip to content

Commit

Permalink
[ETHOSN] Use pytest parameterization for integration tests (#12688)
Browse files Browse the repository at this point in the history
Using pytest parameterization helps identify the particular parameter combinations that are failing for a given test. Additionally, it can be useful when parallelizing the tests. This commit makes sure that "trials" have been replaced by parameterization as well as completing a general cleanup.
  • Loading branch information
lhutton1 authored Sep 5, 2022
1 parent 28cad58 commit 5dcf622
Show file tree
Hide file tree
Showing 9 changed files with 492 additions and 465 deletions.
399 changes: 216 additions & 183 deletions tests/python/contrib/test_ethosn/test_conv2d.py

Large diffs are not rendered by default.

59 changes: 29 additions & 30 deletions tests/python/contrib/test_ethosn/test_depth_to_space.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,37 +33,35 @@ def _get_model(shape, block, dtype, layout):

@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_depth_to_space(dtype):
"""Compare Depth To Space output with TVM."""

trials = [
@pytest.mark.parametrize(
"shape",
[
(1, 16, 16, 16),
(1, 64, 32, 16),
]

],
)
def test_depth_to_space(dtype, shape):
"""Compare Depth To Space output with TVM."""
np.random.seed(0)
for shape in trials:
inputs = {
"a": tvm.nd.array(
np.random.randint(
np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype
)
)
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, 2, dtype, "NHWC")
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))

tei.verify(outputs, dtype, 1)
inputs = {
"a": tvm.nd.array(
np.random.randint(np.iinfo(dtype).min, np.iinfo(dtype).max + 1, size=shape, dtype=dtype)
)
}
outputs = []
for npu in [False, True]:
model = _get_model(shape, 2, dtype, "NHWC")
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))

tei.verify(outputs, dtype, 1)

@requires_ethosn
def test_depth_to_space_failure():
"""Check Depth To Space error messages."""

trials = [
@requires_ethosn
@pytest.mark.parametrize(
"shape,block,dtype,layout,err_msg",
[
((2, 16, 16, 16), 2, "uint8", "NHWC", "batch size=2, batch size must = 1"),
(
(1, 16, 16, 16),
Expand All @@ -74,9 +72,10 @@ def test_depth_to_space_failure():
),
((1, 16, 16, 16), 4, "uint8", "NHWC", "Only block size of 2 is supported"),
((1, 16, 16, 16), 2, "uint8", "NCHW", "Input layer must be NHWC or NHWCB"),
]

for shape, block, dtype, layout, err_msg in trials:
model = _get_model(shape, block, dtype, layout)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
],
)
def test_depth_to_space_failure(shape, block, dtype, layout, err_msg):
"""Check Depth To Space error messages."""
model = _get_model(shape, block, dtype, layout)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
95 changes: 48 additions & 47 deletions tests/python/contrib/test_ethosn/test_fullyconnected.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,62 +114,63 @@ def test_fullyconnected(shape, out_channels, dtype, input_zp, input_sc, kernel_z


@requires_ethosn
def test_fullyconnected_failure():
"""Check Fully Connected error messages."""

trials = [
(
(1, 64),
(1, 64),
0,
1024,
0,
1024,
0,
1,
"uint8",
"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)",
),
@pytest.mark.parametrize(
"shape,weight_shape,err_msg",
[
(
(1, 1, 1, 64),
(1, 64),
0,
1,
0,
1,
0,
1,
"uint8",
"Weights tensor must have I dimension equal to the number"
" of channels of the input tensor.;",
),
((1024, 64), (1, 64), 0, 1, 0, 1, 0, 1, "uint8", "batch size=1024, batch size must = 1;"),
]

((1024, 64), (1, 64), "batch size=1024, batch size must = 1;"),
],
)
def test_fullyconnected_failure(shape, weight_shape, err_msg):
"""Check Fully Connected error messages."""
np.random.seed(0)
for (

dtype = "uint8"

model, _ = _get_model(
shape,
weight_shape,
input_zp,
0,
1,
0,
1,
0,
1,
dtype,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)


@requires_ethosn
def test_fullyconnected_scale_out_of_range():
"""Check Fully Connected out of range scale error message."""
np.random.seed(0)

input_sc = 1024
kernel_sc = 1024
output_sc = 1

model, _ = _get_model(
(1, 64),
(1, 64),
0,
input_sc,
kernel_zp,
0,
kernel_sc,
output_zp,
0,
output_sc,
dtype,
err_msg,
) in trials:
model, _ = _get_model(
shape,
weight_shape,
input_zp,
input_sc,
kernel_zp,
kernel_sc,
output_zp,
output_sc,
dtype,
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
"uint8",
)
model = tei.make_ethosn_composite(model, "ethos-n.qnn_fc")
mod = tei.make_ethosn_partition(model)
expected_error_msg = (
"Overall scale (of the input * weights / output) should be in the range (2^-32, 65536)"
)
tei.test_error(mod, {}, expected_error_msg)
77 changes: 37 additions & 40 deletions tests/python/contrib/test_ethosn/test_pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,91 +38,88 @@ def _get_model(shape, typef, sizes, strides, pads, layout, dtype):

@requires_ethosn
@pytest.mark.parametrize("dtype", ["uint8", "int8"])
def test_pooling(dtype):
@pytest.mark.parametrize(
"shape,typef,size,stride,pad",
[
((1, 8, 8, 8), relay.nn.max_pool2d, (2, 2), (2, 2), (0, 0, 0, 0)),
((1, 9, 9, 9), relay.nn.max_pool2d, (3, 3), (2, 2), (0, 0, 0, 0)),
((1, 8, 8, 8), relay.nn.avg_pool2d, (3, 3), (1, 1), (1, 1, 1, 1)),
],
)
def test_pooling(dtype, shape, typef, size, stride, pad):
"""Compare Pooling output with TVM."""
np.random.seed(0)

trials = [
((1, 8, 8, 8), relay.nn.max_pool2d, (2, 2), (2, 2), (0, 0, 0, 0), "NHWC"),
((1, 9, 9, 9), relay.nn.max_pool2d, (3, 3), (2, 2), (0, 0, 0, 0), "NHWC"),
((1, 8, 8, 8), relay.nn.avg_pool2d, (3, 3), (1, 1), (1, 1, 1, 1), "NHWC"),
]
layout = "NHWC"

np.random.seed(0)
for shape, typef, size, stride, pad, layout in trials:
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=shape, dtype=dtype
)
),
}
outputs = []
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))
inputs = {
"a": tvm.nd.array(
np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max + 1, size=shape, dtype=dtype
)
),
}
outputs = []
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
for npu in [False, True]:
mod = tei.make_module(model, {})
outputs.append(tei.build_and_run(mod, inputs, 1, {}, npu=npu))

tei.verify(outputs, dtype, 1)
tei.verify(outputs, dtype, 1)


@requires_ethosn
def test_pooling_failure():
"""Check Pooling error messages."""

trials = [
@pytest.mark.parametrize(
"shape,size,stride,layout,dtype,err_msg",
[
(
(2, 8, 8, 8),
relay.nn.max_pool2d,
(2, 2),
(2, 2),
(0, 0, 0, 0),
"NHWC",
"uint8",
"batch size=2, batch size must = 1",
),
(
(1, 8, 8, 8),
relay.nn.max_pool2d,
(2, 2),
(2, 2),
(0, 0, 0, 0),
"NHWC",
"int16",
"dtype='int16', dtype must be either uint8, int8 or int32",
),
(
(1, 8, 8, 8),
relay.nn.max_pool2d,
(2, 2),
(2, 2),
(0, 0, 0, 0),
"NCHW",
"uint8",
"data format=NCHW, data format must = NHWC",
),
(
(1, 8, 8, 8),
relay.nn.max_pool2d,
(2, 2),
(2, 2, 2),
(0, 0, 0, 0),
"NHWC",
"uint8",
"stride size=3, stride size must = 2",
),
(
(1, 8, 8, 8),
relay.nn.max_pool2d,
(2, 2, 2),
(2, 2),
(0, 0, 0, 0),
"NHWC",
"uint8",
"dimensions=3, dimensions must = 2",
),
]
],
)
def test_pooling_failure(shape, size, stride, layout, dtype, err_msg):
"""Check Pooling error messages."""

typef = relay.nn.max_pool2d
pad = (0, 0, 0, 0)

for shape, typef, size, stride, pad, layout, dtype, err_msg in trials:
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
model = _get_model(shape, typef, size, stride, pad, layout, dtype)
mod = tei.make_ethosn_partition(model)
tei.test_error(mod, {}, err_msg)
Loading

0 comments on commit 5dcf622

Please sign in to comment.