Skip to content

Commit

Permalink
Update base for Update on "use-pt-pinned-commit for test-arm-{backend…
Browse files Browse the repository at this point in the history
…,reference}-delegation"

Without this, these builds don't respect the torchgen pinned commit and thus fail with #7546.

Differential Revision: [D67996459](https://our.internmc.facebook.com/intern/diff/D67996459/)

[ghstack-poisoned]
  • Loading branch information
swolchok committed Jan 28, 2025
1 parent 765115b commit ba5b236
Show file tree
Hide file tree
Showing 437 changed files with 2,660 additions and 2,743 deletions.
6 changes: 5 additions & 1 deletion backends/cadence/aot/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
ExecutorchProgramManager,
to_edge,
)
from executorch.exir.dialects._ops import ops as exir_ops
from executorch.exir.pass_base import PassResult
from executorch.exir.passes import ToOutVarPass
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
Expand Down Expand Up @@ -186,14 +187,17 @@ def export_to_edge(
edge_prog_manager = to_edge(
expo_program,
compile_config=EdgeCompileConfig(
_skip_dim_order=True,
# Allow specific non-core aten ops in the IR.
_core_aten_ops_exception_list=[
torch.ops.aten._native_batch_norm_legit_functional.default,
torch.ops.aten.linear.default,
torch.ops.aten.linalg_vector_norm.default,
torch.ops.aten.unfold.default,
torch.ops.aten.angle.default,
# cadence replaced to_dim_order_copy with _to_copy for performance
# skip _to_copy op to get around of dim order check
# We should remove this op once cadence can support dim order
exir_ops.edge.aten._to_copy.default,
],
),
constant_methods=constant_methods,
Expand Down
73 changes: 73 additions & 0 deletions backends/cadence/aot/replace_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

# pyre-unsafe

import copy
import math
from operator import neg
from typing import cast, Dict, Iterable, Sequence, Set, Tuple
Expand All @@ -35,7 +36,12 @@
from executorch.backends.cadence.aot.utils import get_edge_overload_packet
from executorch.exir.dialects._ops import ops as exir_ops
from executorch.exir.dialects.edge._ops import EdgeOpOverload, EdgeOpOverloadPacket
from executorch.exir.dim_order_utils import get_memory_format
from executorch.exir.pass_base import ExportPass, NodeMetadata, PassResult, ProxyValue
from executorch.exir.passes.dim_order_ops_registry import (
DimOrderOpsMap,
MemoryFormatOpsMap,
)
from torch._subclasses import FakeTensor
from torch.fx.node import Argument

Expand Down Expand Up @@ -1799,6 +1805,72 @@ def call_operator(
)


@register_cadence_pass(CadencePassAttribute(opt_level=0))
class ReplaceToDimOrderCopyWithToCopyPass(ExportPass):
"""
dim_order_ops::to_dim_order_copy is not supported, so this is an opt_level=0 pass.
If the dim order is sequential, we don't need the extra work with strides and
can just use to_copy.
"""

def call_operator(
self,
op,
args: Tuple[Argument, ...],
kwargs: Dict[str, Argument],
meta: NodeMetadata,
) -> ProxyValue:
if op not in DimOrderOpsMap:
return super().call_operator(op, args, kwargs, meta)

# new kwargs with dim_order, and no memory_format for the new op
nkwargs = dict(copy.deepcopy(kwargs)) # orig kwargs are immutable

ndim = None

# can always get the shape, assuming rank is specialized

# pyre-ignore[16]: `None` has no attribute `to_tensor`
if isinstance(args[0], ProxyValue) and args[0].is_tensor():
# pyre-ignore[16]: `None` has no attribute `to_tensor`
ndim = args[0].to_tensor().dim()
elif isinstance(args[0], torch.Tensor):
# pyre-ignore[16]: `None` has no attribute `dim`
ndim = args[0].dim()
elif isinstance(args[0], torch.fx.immutable_collections.immutable_list):
# pyre-ignore[6]: Incompatible parameter type
ndim = len(args[0])
else:
assert 0, f"Expecting a Tensor or a ProxyValue but got {type(args[0])}"

# get the "to" memory format for the EdgeOp
contiguous_dim_order = list(range(ndim))
dim_order = nkwargs.pop("dim_order", None)

# Cadence only supports contiguous memory format
assert (
dim_order is None
# pyre-ignore[6]: Incompatible parameter type
or len(dim_order) == 0
or dim_order == contiguous_dim_order
), "Expected dim order in congituous or prevserve memory format, but got {}".format(
dim_order
)

# bring back memory format
# pyre-ignore[6]: Incompatible parameter type
nkwargs["memory_format"] = get_memory_format(dim_order)

memory_format_op = MemoryFormatOpsMap[op]

return super().call_operator(
memory_format_op,
args,
nkwargs,
meta,
)


@register_cadence_pass(CadencePassAttribute(opt_level=0))
class ReplaceFullLikeWithFullPass(ExportPass):
"""
Expand Down Expand Up @@ -2108,4 +2180,5 @@ class CadenceReplaceOpsInGraph:
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
ReplaceAtenLinalgVectorNormWithCadenceLinalgVectorNormPass,
ReplaceToDimOrderCopyWithToCopyPass,
]
6 changes: 3 additions & 3 deletions backends/cadence/hifi/operators/op_add.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>

using executorch::aten::Scalar;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::runtime::can_cast;
using executorch::runtime::CppTypeToScalarType;
using executorch::runtime::KernelRuntimeContext;
Expand Down
2 changes: 1 addition & 1 deletion backends/cadence/hifi/operators/op_cat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ namespace native {

Tensor& cat_out(
RuntimeContext& ctx,
executorch::aten::ArrayRef<Tensor> tensors,
exec_aten::ArrayRef<Tensor> tensors,
int64_t dim,
Tensor& out) {
if (dim < 0) {
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_clamp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ namespace native {
Tensor& clamp_tensor_out(
RuntimeContext& ctx,
const Tensor& in,
const executorch::aten::optional<Tensor>& min_opt,
const executorch::aten::optional<Tensor>& max_opt,
const exec_aten::optional<Tensor>& min_opt,
const exec_aten::optional<Tensor>& max_opt,
Tensor& out) {
(void)ctx;

Expand Down
8 changes: 4 additions & 4 deletions backends/cadence/hifi/operators/op_div.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,10 @@
#include <executorch/runtime/platform/assert.h>
#include <cmath>

using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::Scalar;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using torch::executor::Error;

namespace cadence {
Expand Down Expand Up @@ -165,7 +165,7 @@ Tensor& div_out_mode(
RuntimeContext& ctx,
const Tensor& a,
const Tensor& b,
executorch::aten::optional<executorch::aten::string_view> mode,
exec_aten::optional<exec_aten::string_view> mode,
Tensor& out) {
ET_KERNEL_CHECK(
ctx,
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_maximum.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::can_cast;
using executorch::runtime::canCast;
using executorch::runtime::CppTypeToScalarType;
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_minimum.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@
#include <executorch/kernels/portable/cpu/util/math_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::can_cast;
using executorch::runtime::canCast;
using executorch::runtime::CppTypeToScalarType;
Expand Down
6 changes: 3 additions & 3 deletions backends/cadence/hifi/operators/op_mul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>

using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::Scalar;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::can_cast;
using executorch::runtime::CppTypeToScalarType;
using torch::executor::Error;
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_rsqrt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@

#include <executorch/backends/cadence/hifi/kernels/kernels.h>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;

namespace cadence {
namespace impl {
Expand Down
6 changes: 3 additions & 3 deletions backends/cadence/hifi/operators/op_sigmoid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,17 @@
#include <executorch/kernels/portable/cpu/util/functional_util.h>
#include <executorch/runtime/kernel/kernel_includes.h>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using torch::executor::Error;

namespace cadence {
namespace impl {
namespace HiFi {
namespace native {

using Tensor = executorch::aten::Tensor;
using Tensor = exec_aten::Tensor;

Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
(void)ctx;
Expand Down
2 changes: 1 addition & 1 deletion backends/cadence/hifi/operators/op_softmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ Tensor& softmax_out(
// Adjust for negative dim
dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim;

const executorch::aten::optional<int64_t>& dim_t = dim;
const exec_aten::optional<int64_t>& dim_t = dim;
const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim());
const size_t size = in.size(d);

Expand Down
6 changes: 3 additions & 3 deletions backends/cadence/hifi/operators/op_sub.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@
#include <executorch/runtime/kernel/kernel_includes.h>
#include <executorch/runtime/platform/assert.h>

using exec_aten::Scalar;
using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::Scalar;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using executorch::runtime::can_cast;
using executorch::runtime::CppTypeToScalarType;
using torch::executor::Error;
Expand Down
4 changes: 2 additions & 2 deletions backends/cadence/hifi/operators/op_tanh.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@
#include <executorch/runtime/kernel/kernel_includes.h>
#include <cmath>

using exec_aten::ScalarType;
using exec_aten::Tensor;
using executorch::aten::RuntimeContext;
using executorch::aten::ScalarType;
using executorch::aten::Tensor;
using torch::executor::Error;

namespace cadence {
Expand Down
8 changes: 4 additions & 4 deletions backends/cadence/hifi/operators/quantized_linear_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ void quantized_linear_out(
int64_t out_zero_point,
__ET_UNUSED const optional<Tensor>& offset,
Tensor& out) {
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
_quantized_linear_asym8u(
in,
weight,
Expand All @@ -231,7 +231,7 @@ void quantized_linear_out(
out_zero_point,
offset,
out);
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
_quantized_linear_asym8s(
in,
weight,
Expand Down Expand Up @@ -261,7 +261,7 @@ void quantized_linear_per_tensor_out(
int64_t out_zero_point,
__ET_UNUSED const optional<Tensor>& offset,
Tensor& out) {
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
if (out.scalar_type() == exec_aten::ScalarType::Byte) {
_quantized_linear_per_tensor_asym8u(
in,
weight,
Expand All @@ -273,7 +273,7 @@ void quantized_linear_per_tensor_out(
out_zero_point,
offset,
out);
} else if (out.scalar_type() == executorch::aten::ScalarType::Char) {
} else if (out.scalar_type() == exec_aten::ScalarType::Char) {
_quantized_linear_per_tensor_asym8s(
in,
weight,
Expand Down
6 changes: 3 additions & 3 deletions backends/vulkan/runtime/VulkanBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -417,10 +417,10 @@ bool maybe_update_scalar_tensor(
executorch::aten::Tensor& scalar_tensor_src) {
const int32_t cur_val = graph->read_symint(ref);
int32_t scalar_tensor_val = 0;
executorch::aten::ScalarType dtype = scalar_tensor_src.scalar_type();
if (dtype == executorch::aten::ScalarType::Int) {
exec_aten::ScalarType dtype = scalar_tensor_src.scalar_type();
if (dtype == exec_aten::ScalarType::Int) {
scalar_tensor_val = *scalar_tensor_src.const_data_ptr<int32_t>();
} else if (dtype == executorch::aten::ScalarType::Long) {
} else if (dtype == exec_aten::ScalarType::Long) {
scalar_tensor_val = int32_t(*scalar_tensor_src.const_data_ptr<int64_t>());
}
bool was_updated = false;
Expand Down
7 changes: 3 additions & 4 deletions codegen/tools/gen_selected_op_variants.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from torchgen.code_template import CodeTemplate


ops_and_dtypes_template_str = """((executorch::aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))"""
ops_and_dtypes_template_str = """((exec_aten::string_view(operator_name).compare("$operator_name") == 0)\n && ($dtype_checks))"""
ops_and_dtypes_template = CodeTemplate(ops_and_dtypes_template_str)

selected_kernel_dtypes_h_template_str = """#pragma once
Expand All @@ -27,7 +27,7 @@
inline constexpr bool should_include_kernel_dtype(
const char *operator_name,
executorch::aten::ScalarType scalar_type
exec_aten::ScalarType scalar_type
) {
return $body;
}
Expand Down Expand Up @@ -91,8 +91,7 @@ def write_selected_op_variants(yaml_file_path: str, output_dir: str) -> None:
dtype_set = set([x.split(";")[0] for x in tensor_meta])
dtype_list = sorted([dtype_enum_to_type[x] for x in dtype_set])
conditions = [
"scalar_type == executorch::aten::ScalarType::" + x
for x in dtype_list
"scalar_type == exec_aten::ScalarType::" + x for x in dtype_list
]
body_parts.append(
ops_and_dtypes_template.substitute(
Expand Down
14 changes: 7 additions & 7 deletions codegen/tools/test/test_gen_selected_op_variants.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,13 @@ def test_generates_correct_header(self) -> None:
inline constexpr bool should_include_kernel_dtype(
const char *operator_name,
executorch::aten::ScalarType scalar_type
exec_aten::ScalarType scalar_type
) {
return ((executorch::aten::string_view(operator_name).compare("add.out") == 0)
&& (scalar_type == executorch::aten::ScalarType::Float || scalar_type == executorch::aten::ScalarType::Int))
|| ((executorch::aten::string_view(operator_name).compare("mul.out") == 0)
&& (scalar_type == executorch::aten::ScalarType::Float))
|| ((executorch::aten::string_view(operator_name).compare("sub.out") == 0)
return ((exec_aten::string_view(operator_name).compare("add.out") == 0)
&& (scalar_type == exec_aten::ScalarType::Float || scalar_type == exec_aten::ScalarType::Int))
|| ((exec_aten::string_view(operator_name).compare("mul.out") == 0)
&& (scalar_type == exec_aten::ScalarType::Float))
|| ((exec_aten::string_view(operator_name).compare("sub.out") == 0)
&& (true));
}
""",
Expand Down Expand Up @@ -124,7 +124,7 @@ def test_generates_correct_header(self) -> None:
inline constexpr bool should_include_kernel_dtype(
const char *operator_name,
executorch::aten::ScalarType scalar_type
exec_aten::ScalarType scalar_type
) {
return true;
}
Expand Down
Loading

0 comments on commit ba5b236

Please sign in to comment.