Skip to content

Commit

Permalink
[ETHOSN] Only use mock inference when whole graph is offloaded (#12296)
Browse files Browse the repository at this point in the history
The mock inference functionality is only supported when the whole
graph is offloaded to the NPU, otherwise it can result in undefined
behaviour. This patch makes sure the mock inference functionality is
not run on test cases where some parts of the graph are not offloaded
to the NPU, while ensuring the module is still built as a sanity check.

Change-Id: I27052d118ff976f9adbfc3f5b5b96185318e1573
  • Loading branch information
lhutton1 authored Aug 4, 2022
1 parent 3f304c8 commit 9f360a0
Showing 1 changed file with 43 additions and 21 deletions.
64 changes: 43 additions & 21 deletions tests/python/contrib/test_ethosn/test_topologies.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import tvm
from tvm import relay
from tvm.testing import requires_ethosn
from tvm.relay.op.contrib.ethosn import Available
from tvm.relay.op.contrib.ethosn import Available, ethosn_available
from . import infrastructure as tei


Expand Down Expand Up @@ -76,19 +76,27 @@ def get_model(input_shape, dtype, var_names):

expected_host_ops = 1 if tei.get_ethosn_api_version() == 2205 else 0
npu_partitions = 2 if tei.get_ethosn_api_version() == 2205 else 1
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,

# Mock inference is only supported when the whole graph is offloaded to the NPU
if tei.get_ethosn_api_version() == 2205 and ethosn_available() == Available.SW_ONLY:
tei.build(
mod, {}, npu=npu, expected_host_ops=expected_host_ops, npu_partitions=npu_partitions
)
else:
outputs.append(
tei.build_and_run(
mod,
inputs,
1,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
)
)

tei.verify(outputs, dtype, 2)
if outputs:
tei.verify(outputs, dtype, 2)


@requires_ethosn
Expand Down Expand Up @@ -118,7 +126,6 @@ def get_model(dtype):
return out

np.random.seed(0)
outputs = []
inputs = {
"x": tvm.nd.array(
np.random.randint(
Expand All @@ -128,9 +135,12 @@ def get_model(dtype):
}
model = get_model(dtype)
mod = tei.make_module(model, {})
outputs.append(

# Mock inference is only supported when the whole graph is offloaded to the NPU
if ethosn_available() == Available.SW_ONLY:
tei.build(mod, {}, npu=True, expected_host_ops=1, npu_partitions=2)
else:
tei.build_and_run(mod, inputs, 1, {}, npu=True, expected_host_ops=1, npu_partitions=2)
)


@requires_ethosn
Expand Down Expand Up @@ -218,19 +228,31 @@ def get_model(shape, dtype, splits, axis):

expected_host_ops = 1 if tei.get_ethosn_api_version() == 2205 else 0
npu_partitions = 2 if tei.get_ethosn_api_version() == 2205 else 1
outputs.append(
tei.build_and_run(

# Mock inference is only supported when the whole graph is offloaded to the NPU
if tei.get_ethosn_api_version() == 2205 and ethosn_available() == Available.SW_ONLY:
tei.build(
mod,
inputs,
2,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
)
else:
outputs.append(
tei.build_and_run(
mod,
inputs,
2,
{},
npu=npu,
expected_host_ops=expected_host_ops,
npu_partitions=npu_partitions,
)
)

tei.verify(outputs, dtype, 0)
if outputs:
tei.verify(outputs, dtype, 0)


@pytest.mark.skipif(
Expand Down

0 comments on commit 9f360a0

Please sign in to comment.