Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions backends/arm/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ runtime.python_library(
"common/debug.py",
],
deps = [
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools:serializer",
"//caffe2:torch",
"//executorch/exir:lib",
],
Expand All @@ -37,10 +36,8 @@ runtime.python_library(
deps = [
"fbsource//third-party/pypi/flatbuffers:flatbuffers",
"fbsource//third-party/pypi/ml-dtypes:ml-dtypes",
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools:serializer",
"fbsource//third-party/tosa_tools:tosa",
":process_node",
"//executorch/exir/backend:compile_spec_schema",
"//executorch/backends/arm/operators:lib",
Expand Down Expand Up @@ -83,8 +80,7 @@ runtime.python_library(
name = "process_node",
srcs = ["process_node.py"],
deps = [
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools:tosa",
"//executorch/backends/arm/operators:node_visitor",
"//executorch/backends/arm/tosa:mapping",
"//executorch/backends/arm/tosa:quant_utils",
Expand Down
8 changes: 5 additions & 3 deletions backends/arm/arm_vela.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,19 @@
# per-io structs to simplify runtime use.
def vela_bin_pack_io(prefix, data):
vela_input_shapes = data[prefix + "_shape"]
# Vela input/output shape is fixed to 6D
vela_io_shape_dims = 6

ios = struct.pack("<i", len(vela_input_shapes))
for i in range(len(vela_input_shapes)):
io_shape = vela_input_shapes[i]
io_elem_size = data[prefix + "_elem_size"][i]
io_offset = data[prefix + "_offset"][i]
io_region = data[prefix + "_region"][i]
assert len(io_shape) <= 4
inp_pad = io_shape.tolist() + [0] * (4 - len(io_shape))
assert len(io_shape) == vela_io_shape_dims
inp_pad = io_shape.tolist()
io_struct = struct.pack(
"<iiiiiii", *inp_pad, io_elem_size, io_offset, io_region
"<iiiiiiiii", *inp_pad, io_elem_size, io_offset, io_region
)
ios += io_struct
return ios
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/debug/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ runtime.python_library(
"schema.py",
],
deps = [
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools:serializer",
"//caffe2:torch",
],
)
3 changes: 1 addition & 2 deletions backends/arm/operators/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ runtime.python_library(
name = "ops",
srcs = glob(["op_*.py", "ops_*.py"]),
deps = [
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools:tosa",
":node_visitor",
":operator_validation_utils",
"//executorch/backends/arm/tosa:mapping",
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/requirements-arm-ethos-u.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

ethos-u-vela @ git+https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela@d37febc1715edf0d236c2ff555739a8a9aadcf9a
ethos-u-vela == 4.4.0
4 changes: 2 additions & 2 deletions backends/arm/runtime/EthosUBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,8 @@ class EthosUBackend final : public ::executorch::runtime::BackendInterface {
*tensor_count = *tensor_count * tensor.size(i);
}

// The VelaIO type has a shape of fixed size 4
for (int i = 0; i < 4; i++) {
// The VelaIO type has a shape of fixed size 6
for (int i = 0; i < shapeDim; i++) {
*io_count = *io_count * io->shape[i];
}
}
Expand Down
4 changes: 3 additions & 1 deletion backends/arm/runtime/VelaBinStream.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,11 @@ typedef struct {
char data[]; // block.name specific format data
} VelaBinBlock;

constexpr int shapeDim = 6; // Number of dimensions in VelaIO

// A Vela input or output descriptor in the binary stream
typedef struct {
int shape[4]; // Up to 4D shape of input or output
int shape[shapeDim]; // Shape of input or output
int elem_size; // Element sizeof in bytes
int offset; // Offset in bytes within SRAM working data
int region; // Scratch region this belongs to
Expand Down
11 changes: 4 additions & 7 deletions backends/arm/test/ops/test_logical.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,6 @@ def forward(self, tensor: torch.Tensor):
#################


xfails = {"rand_rank4": "MLBEDSW-11031: Output diff on u85 bool transpose."}


@common.parametrize("test_data", And().test_data)
def test_logical_and_tosa_FP(test_data: input_t2):
pipeline = TosaPipelineFP[input_t2](
Expand Down Expand Up @@ -132,7 +129,7 @@ def test_logical_and_u55_INT_not_delegated(test_data: input_t2):
pipeline.run()


@common.parametrize("test_data", And().test_data, xfails=xfails)
@common.parametrize("test_data", And().test_data)
@common.XfailIfNoCorstone320
def test_logical_and_u85_INT(test_data: input_t2):
pipeline = EthosU85PipelineINT[input_t2](
Expand Down Expand Up @@ -226,7 +223,7 @@ def test_logical_xor_u55_INT_not_delegated(test_data: input_t2):
pipeline.run()


@common.parametrize("test_data", Xor().test_data, xfails=xfails)
@common.parametrize("test_data", Xor().test_data)
@common.XfailIfNoCorstone320
def test_logical_xor_u85_INT(test_data: input_t2):
pipeline = EthosU85PipelineINT[input_t2](
Expand Down Expand Up @@ -320,7 +317,7 @@ def test_logical_or_u55_INT_not_delegated(test_data: input_t2):
pipeline.run()


@common.parametrize("test_data", Or().test_data, xfails=xfails)
@common.parametrize("test_data", Or().test_data)
@common.XfailIfNoCorstone320
def test_logical_or_u85_INT(test_data: input_t2):
pipeline = EthosU85PipelineINT[input_t2](
Expand Down Expand Up @@ -414,7 +411,7 @@ def test_logical_not_u55_INT_not_delegated(test_data: input_t2):
pipeline.run()


@common.parametrize("test_data", Not().test_data, xfails=xfails)
@common.parametrize("test_data", Not().test_data)
@common.XfailIfNoCorstone320
def test_logical_not_u85_INT(test_data: input_t2):
pipeline = EthosU85PipelineINT[input_t2](
Expand Down
8 changes: 1 addition & 7 deletions backends/arm/test/ops/test_logsoftmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,7 @@ def test_log_softmax_tosa_INT(test_data):
pipeline.run()


@common.parametrize(
"test_data",
LogSoftmax.test_data,
xfails={
"randn_neg_dim": "MLBEDSW-11032: ILLEGAL_OFM_BASE error: Base addresses must be aligned to brick depth on u55."
},
)
@common.parametrize("test_data", LogSoftmax.test_data)
@common.XfailIfNoCorstone300()
def test_log_softmax_u55_INT(test_data):
data, dim = test_data()
Expand Down
8 changes: 1 addition & 7 deletions backends/arm/test/ops/test_softmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,13 +61,7 @@ def test_softmax_tosa_INT(test_data):
pipeline.run()


@common.parametrize(
"test_data",
Softmax.test_data,
{
"randn_neg_dim": "MLBEDSW-11032: ILLEGAL_OFM_BASE error: Base addresses must be aligned to brick depth on u55."
},
)
@common.parametrize("test_data", Softmax.test_data)
@common.XfailIfNoCorstone300
def test_softmax_u55_INT(test_data):
data, dim = test_data()
Expand Down
3 changes: 3 additions & 0 deletions backends/arm/test/ops/test_squeeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class SqueezeDim(torch.nn.Module):
"squeeze3d_dim_neg_2": lambda: (torch.randn(1, 1, 5), -2),
"squeeze4d_dim_pos_3": lambda: (torch.randn(1, 2, 3, 1), 3),
"squeeze4d_dim_neg_2": lambda: (torch.randn(1, 5, 1, 5), -2),
"squeeze5d_dim_neg_2": lambda: (torch.randn(1, 1, 5, 1, 5), -2),
}

def forward(self, x: torch.Tensor, dim: int):
Expand All @@ -40,6 +41,7 @@ class SqueezeDims(torch.nn.Module):
"squeeze3d_dims_0_1": lambda: (torch.randn(1, 1, 5), (0, 1)),
"squeeze4d_dims_0_neg_1": lambda: (torch.randn(1, 5, 5, 1), (0, -1)),
"squeeze4d_dims_0_neg_2": lambda: (torch.randn(1, 5, 1, 5), (0, -2)),
"squeeze5d_dims_0_neg_2": lambda: (torch.randn(1, 1, 5, 1, 5), (0, -2)),
}

def forward(self, x: torch.Tensor, dims: tuple[int]):
Expand All @@ -51,6 +53,7 @@ class Squeeze(torch.nn.Module):
"squeeze3d": lambda: (torch.randn(1, 1, 5),),
"squeeze4d_dims": lambda: (torch.randn(1, 5, 5, 1),),
"squeeze3d_dims_mix": lambda: (torch.randn(1, 5, 1, 5),),
"squeeze4d_dims_mix": lambda: (torch.randn(1, 1, 5, 1, 5),),
}

def forward(self, x: torch.Tensor):
Expand Down
36 changes: 33 additions & 3 deletions backends/arm/test/ops/test_unflatten.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import torch
from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
EthosU55PipelineINT,
EthosU85PipelineINT,
TosaPipelineFP,
TosaPipelineINT,
VgfPipeline,
Expand All @@ -30,8 +32,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.unflatten(x, self.dim, self.sizes)

test_data: dict[str, test_data_t] = {
"randn_4d": (lambda: (Unflatten(1, (2, 2)), (torch.randn(3, 4, 5, 1),))),
"rand_3d": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
"rand_3d_batch3": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
"rand_3d_batch1": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(1, 4, 4),))),
"randn_4d_dim1": (lambda: (Unflatten(1, (2, 2)), (torch.randn(3, 4, 5, 1),))),
"randn_4d_dim3": (lambda: (Unflatten(3, (2, 2)), (torch.randn(1, 1, 5, 4),))),
}


Expand All @@ -49,7 +53,33 @@ def test_unflatten_int_tosa_FP(test_data: test_data_t):
@common.parametrize("test_data", Unflatten.test_data)
def test_unflatten_int_tosa_INT(test_data: test_data_t):
module, inputs = test_data()
pipeline = TosaPipelineINT[input_t](
pipeline = TosaPipelineINT[input_t](module, inputs, Unflatten.aten_op)
pipeline.run()


xfails = {
"rand_3d_batch3": "Batch size > 1 currently not supported for FVP tests",
"randn_4d_dim1": "Batch size > 1 currently not supported for FVP tests",
}


@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
@common.XfailIfNoCorstone300
def test_unflatten_int_u55_INT(test_data: test_data_t):
module, inputs = test_data()
pipeline = EthosU55PipelineINT[input_t](
module,
inputs,
Unflatten.aten_op,
)
pipeline.run()


@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
@common.XfailIfNoCorstone320
def test_unflatten_int_u85_INT(test_data: test_data_t):
module, inputs = test_data()
pipeline = EthosU85PipelineINT[input_t](
module,
inputs,
Unflatten.aten_op,
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/test/ops/test_unsqueeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


class Unsqueeze(torch.nn.Module):
shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3)]
shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3), (1, 5, 4, 3)]
test_parameters = {}
for n in shapes:
test_parameters[f"rand_{n}"] = (torch.randn(n),)
Expand Down
4 changes: 4 additions & 0 deletions backends/arm/test/ops/test_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ class View(torch.nn.Module):
"rand_4d_4_3": lambda: (torch.rand(5, 10, 1, 1), (1, 25, 2)),
"rand_4d_4_2": lambda: (torch.rand(2, 50, 1, 1), (1, 100)),
"rand_4d_2_4_same": lambda: (torch.rand(2, 3, 2, 3), (2, 3, 3, 2)),
"rand_4d_5d": lambda: (torch.rand(1, 3, 4, 5), (1, 1, 4, 5, -1)),
"rand_5d_5d": lambda: (torch.rand(1, 1, 4, 5, 6), (1, 1, 4, -1, 6)),
"rand_5d_3d": lambda: (torch.rand(1, 1, 4, 5, 6), (2, 3, -1)),
"rand_3d_5d": lambda: (torch.rand(4, 5, 6), (1, 1, 2, -1, 3)),
}

rank_product_too_large = {
Expand Down
9 changes: 1 addition & 8 deletions backends/arm/test/passes/test_rescale_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,14 +172,7 @@ def test_quantized_rescale_tosa_bi(test_data: tuple[torch.Tensor, torch.Tensor])
pipeline.run()


u55_xfails = {
"ones": "MLBEDSW-11032: ILLEGAL_OFM_BASE error: Base addresses must be aligned to brick depth on u55.",
"randn_ones": "MLBEDSW-11032: ILLEGAL_OFM_BASE error: Base addresses must be aligned to brick depth on u55.",
"randn_large": "MLBEDSW-11032: ILLEGAL_OFM_BASE error: Base addresses must be aligned to brick depth on u55.",
}


@common.parametrize("test_data", RescaleNetwork.test_data, xfails=u55_xfails)
@common.parametrize("test_data", RescaleNetwork.test_data)
@common.XfailIfNoCorstone300
def test_quantized_rescale_u55(test_data: tuple[torch.Tensor, torch.Tensor]):
"""Tests a model with many ops that requires rescales. As more ops are quantized to int32 and
Expand Down
10 changes: 3 additions & 7 deletions backends/arm/tosa/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@ runtime.python_library(
"mapping.py",
],
deps = [
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools:serializer",
"//caffe2:torch",
":specification",
],
Expand All @@ -19,10 +18,8 @@ runtime.python_library(
],
deps = [
"fbsource//third-party/pypi/numpy:numpy",
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/serializer:serializer",
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools/v1.00/serialization_lib/python/tosa:tosa",
"fbsource//third-party/tosa_tools:serializer",
"fbsource//third-party/tosa_tools:tosa",
"//executorch/backends/arm:constants",
":mapping",
"//executorch/exir/dialects:lib",
Expand All @@ -44,7 +41,6 @@ runtime.python_library(
"utils.py",
],
deps = [
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/serializer:serializer",
":quant_utils",
"//executorch/backends/arm/operators:node_visitor",
],
Expand Down
3 changes: 1 addition & 2 deletions backends/nxp/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ runtime.python_library(
name = "neutron_sdk",
srcs = glob(["backend/**/*.py"]),
deps = [
"fbsource//third-party/pypi/neutron_converter:neutron_converter",
"fbsource//third-party/pypi/neutron_converter:neutron_converter",
],
)

Expand All @@ -68,7 +68,6 @@ runtime.python_library(
":quantizer",
"fbsource//third-party/pypi/flatbuffers:flatbuffers",
"fbsource//third-party/pypi/ml-dtypes:ml-dtypes",
"fbsource//third-party/tosa_tools/v0.80/serialization_lib/python/serializer:serializer",
"//executorch/exir:lib",
"//executorch/backends/transforms:remove_getitem_op",
"//caffe2:torch",
Expand Down
Loading