Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions backends/arm/arm_vela.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,17 +25,19 @@
# per-io structs to simplify runtime use.
def vela_bin_pack_io(prefix, data):
vela_input_shapes = data[prefix + "_shape"]
# Vela input/output shape is fixed to 6D
vela_io_shape_dims = 6

ios = struct.pack("<i", len(vela_input_shapes))
for i in range(len(vela_input_shapes)):
io_shape = vela_input_shapes[i]
io_elem_size = data[prefix + "_elem_size"][i]
io_offset = data[prefix + "_offset"][i]
io_region = data[prefix + "_region"][i]
assert len(io_shape) <= 4
inp_pad = io_shape.tolist() + [0] * (4 - len(io_shape))
assert len(io_shape) == vela_io_shape_dims
inp_pad = io_shape.tolist()
io_struct = struct.pack(
"<iiiiiii", *inp_pad, io_elem_size, io_offset, io_region
"<iiiiiiiii", *inp_pad, io_elem_size, io_offset, io_region
)
ios += io_struct
return ios
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/requirements-arm-ethos-u.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

ethos-u-vela @ git+https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela@d37febc1715edf0d236c2ff555739a8a9aadcf9a
ethos-u-vela @ git+https://gitlab.arm.com/artificial-intelligence/ethos-u/ethos-u-vela@9a43a1bf26bfc7588358d7e6e6bb2613b4981a34
4 changes: 2 additions & 2 deletions backends/arm/runtime/EthosUBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,8 @@ class EthosUBackend final : public ::executorch::runtime::BackendInterface {
*tensor_count = *tensor_count * tensor.size(i);
}

// The VelaIO type has a shape of fixed size 4
for (int i = 0; i < 4; i++) {
// The VelaIO type has a shape of fixed size 6
for (int i = 0; i < shapeDim; i++) {
*io_count = *io_count * io->shape[i];
}
}
Expand Down
6 changes: 4 additions & 2 deletions backends/arm/runtime/VelaBinStream.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2023-2024 Arm Limited and/or its affiliates.
* Copyright 2023-2025 Arm Limited and/or its affiliates.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
Expand Down Expand Up @@ -34,9 +34,11 @@ typedef struct {
char data[]; // block.name specific format data
} VelaBinBlock;

constexpr int shapeDim = 6; // Number of dimensions in VelaIO

// A Vela input or output descriptor in the binary stream
typedef struct {
int shape[4]; // Up to 4D shape of input or output
int shape[shapeDim]; // Shape of input or output
int elem_size; // Element sizeof in bytes
int offset; // Offset in bytes within SRAM working data
int region; // Scratch region this belongs to
Expand Down
3 changes: 3 additions & 0 deletions backends/arm/test/ops/test_squeeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ class SqueezeDim(torch.nn.Module):
"squeeze3d_dim_neg_2": lambda: (torch.randn(1, 1, 5), -2),
"squeeze4d_dim_pos_3": lambda: (torch.randn(1, 2, 3, 1), 3),
"squeeze4d_dim_neg_2": lambda: (torch.randn(1, 5, 1, 5), -2),
"squeeze5d_dim_neg_2": lambda: (torch.randn(1, 1, 5, 1, 5), -2),
}

def forward(self, x: torch.Tensor, dim: int):
Expand All @@ -40,6 +41,7 @@ class SqueezeDims(torch.nn.Module):
"squeeze3d_dims_0_1": lambda: (torch.randn(1, 1, 5), (0, 1)),
"squeeze4d_dims_0_neg_1": lambda: (torch.randn(1, 5, 5, 1), (0, -1)),
"squeeze4d_dims_0_neg_2": lambda: (torch.randn(1, 5, 1, 5), (0, -2)),
"squeeze5d_dims_0_neg_2": lambda: (torch.randn(1, 1, 5, 1, 5), (0, -2)),
}

def forward(self, x: torch.Tensor, dims: tuple[int]):
Expand All @@ -51,6 +53,7 @@ class Squeeze(torch.nn.Module):
"squeeze3d": lambda: (torch.randn(1, 1, 5),),
"squeeze4d_dims": lambda: (torch.randn(1, 5, 5, 1),),
"squeeze3d_dims_mix": lambda: (torch.randn(1, 5, 1, 5),),
"squeeze4d_dims_mix": lambda: (torch.randn(1, 1, 5, 1, 5),),
}

def forward(self, x: torch.Tensor):
Expand Down
36 changes: 33 additions & 3 deletions backends/arm/test/ops/test_unflatten.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
import torch
from executorch.backends.arm.test import common
from executorch.backends.arm.test.tester.test_pipeline import (
EthosU55PipelineINT,
EthosU85PipelineINT,
TosaPipelineFP,
TosaPipelineINT,
VgfPipeline,
Expand All @@ -30,8 +32,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.unflatten(x, self.dim, self.sizes)

test_data: dict[str, test_data_t] = {
"randn_4d": (lambda: (Unflatten(1, (2, 2)), (torch.randn(3, 4, 5, 1),))),
"rand_3d": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
"rand_3d_batch3": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
"rand_3d_batch1": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(1, 4, 4),))),
"randn_4d_dim1": (lambda: (Unflatten(1, (2, 2)), (torch.randn(3, 4, 5, 1),))),
"randn_4d_dim3": (lambda: (Unflatten(3, (2, 2)), (torch.randn(1, 1, 5, 4),))),
}


Expand All @@ -49,7 +53,33 @@ def test_unflatten_int_tosa_FP(test_data: test_data_t):
@common.parametrize("test_data", Unflatten.test_data)
def test_unflatten_int_tosa_INT(test_data: test_data_t):
module, inputs = test_data()
pipeline = TosaPipelineINT[input_t](
pipeline = TosaPipelineINT[input_t](module, inputs, Unflatten.aten_op)
pipeline.run()


xfails = {
"rand_3d_batch3": "Batch size > 1 currently not supported for FVP tests",
"randn_4d_dim1": "Batch size > 1 currently not supported for FVP tests",
}


@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
@common.XfailIfNoCorstone300
def test_unflatten_int_u55_INT(test_data: test_data_t):
module, inputs = test_data()
pipeline = EthosU55PipelineINT[input_t](
module,
inputs,
Unflatten.aten_op,
)
pipeline.run()


@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
@common.XfailIfNoCorstone320
def test_unflatten_int_u85_INT(test_data: test_data_t):
module, inputs = test_data()
pipeline = EthosU85PipelineINT[input_t](
module,
inputs,
Unflatten.aten_op,
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/test/ops/test_unsqueeze.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@


class Unsqueeze(torch.nn.Module):
shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3)]
shapes: list[int | Sequence[int]] = [5, (5, 5), (5, 4), (5, 4, 3), (1, 5, 4, 3)]
test_parameters = {}
for n in shapes:
test_parameters[f"rand_{n}"] = (torch.randn(n),)
Expand Down
4 changes: 4 additions & 0 deletions backends/arm/test/ops/test_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ class View(torch.nn.Module):
"rand_4d_4_3": lambda: (torch.rand(5, 10, 1, 1), (1, 25, 2)),
"rand_4d_4_2": lambda: (torch.rand(2, 50, 1, 1), (1, 100)),
"rand_4d_2_4_same": lambda: (torch.rand(2, 3, 2, 3), (2, 3, 3, 2)),
"rand_4d_5d": lambda: (torch.rand(1, 3, 4, 5), (1, 1, 4, 5, -1)),
"rand_5d_5d": lambda: (torch.rand(1, 1, 4, 5, 6), (1, 1, 4, -1, 6)),
"rand_5d_3d": lambda: (torch.rand(1, 1, 4, 5, 6), (2, 3, -1)),
"rand_3d_5d": lambda: (torch.rand(4, 5, 6), (1, 1, 2, -1, 3)),
}

rank_product_too_large = {
Expand Down
Loading