From 24a3a545ec739e98ed7ebb29876d26767629567b Mon Sep 17 00:00:00 2001 From: Andrew Grebenisan Date: Thu, 16 Oct 2025 14:03:53 -0700 Subject: [PATCH] Cadence ops: Get rid of linalg vector norm (#15140) Summary: Not used. Reviewed By: skrtskrtfb Differential Revision: D84673741 --- backends/cadence/aot/ops_registrations.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/backends/cadence/aot/ops_registrations.py b/backends/cadence/aot/ops_registrations.py index 854b2137ae7..795e0a5c684 100644 --- a/backends/cadence/aot/ops_registrations.py +++ b/backends/cadence/aot/ops_registrations.py @@ -65,7 +65,6 @@ def _validate_ref_impl_exists() -> None: "cadence::dequantize_per_tensor_asym8u", "cadence::dequantize_per_tensor_asym32s", "cadence::dequantize_per_tensor_asym16u", - "cadence::linalg_vector_norm", "cadence::quantized_conv2d_nchw", # We should only support per_tensor variant, should remove "cadence::quantize_per_tensor_asym32s", "cadence::quantized_relu", # We should only support per_tensor variant, should remove @@ -447,7 +446,6 @@ def register_fake( "im2row.per_tensor(Tensor input, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, " "int in_zero_point, bool channel_last=False) -> (Tensor out)" ) -lib.define("linalg_vector_norm(Tensor X) -> (Tensor Y)") lib.define( "linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)" ) @@ -603,7 +601,6 @@ def register_fake( lib.define( "fully_connected.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)" ) -lib.define("linalg_vector_norm.out(Tensor X, *, Tensor(a!) out) -> Tensor(a!)") lib.define( "quantized_fully_connected.out(Tensor src, Tensor weight, Tensor bias, int src_zero_point, " "Tensor weight_zero_point, Tensor out_multiplier, Tensor out_shift, int out_zero_point, Tensor? offset, *, Tensor(a!) out) -> Tensor(a!)" @@ -2007,15 +2004,6 @@ def im2row_per_tensor_meta( return input.new_empty(output_size, dtype=input.dtype) -# Define the abstract implementations of the operators as required -@register_fake("cadence::linalg_vector_norm") -def linalg_vector_norm_meta( - X: torch.Tensor, -) -> torch.Tensor: - # Output of norm is a scalar, so we return a [] tensor - return X.new_empty([], dtype=X.dtype) - - @register_fake("cadence::linalg_svd") def linalg_svd_meta( A: torch.Tensor,