From f7b577b8f40fe70a5acca6597f79eedfd15449d1 Mon Sep 17 00:00:00 2001 From: Justin Chu Date: Tue, 23 Sep 2025 11:59:36 -0700 Subject: [PATCH 1/3] [torchlib] Simplify linalg_vector_norm to remove the redundant Abs This happens in some of the LORA models. When we use ReduceL1/ReduceL2 or when ord is an even number, we don't need to take Abs of the input Signed-off-by: Justin Chu --- onnxscript/function_libs/torch_lib/ops/linalg.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/onnxscript/function_libs/torch_lib/ops/linalg.py b/onnxscript/function_libs/torch_lib/ops/linalg.py index 05bac181ca..516f9c8b4c 100644 --- a/onnxscript/function_libs/torch_lib/ops/linalg.py +++ b/onnxscript/function_libs/torch_lib/ops/linalg.py @@ -330,8 +330,9 @@ def aten_linalg_vector_norm( keepdim = False else: dim = op.Reshape(dim, op.Constant(value_ints=[-1])) - self = op.Abs(self) + if math.isinf(ord): + self = op.Abs(self) if ord > 0: return op.ReduceMax(self, dim, keepdims=keepdim) else: @@ -345,6 +346,9 @@ def aten_linalg_vector_norm( elif ord == 2.0: return op.ReduceL2(self, dim, keepdims=keepdim) else: + if ord % 2 != 0: + # Not-even integer, use abs + self = op.Abs(self) self_pow = op.Pow(self, ord) exp = op.CastLike(1 / ord, self) return op.Pow(op.ReduceSum(self_pow, dim, keepdims=keepdim), exp) From 87f4aacda75d2087f6ade27ee5f9a11925097428 Mon Sep 17 00:00:00 2001 From: Justin Chu Date: Tue, 23 Sep 2025 12:02:40 -0700 Subject: [PATCH 2/3] update Signed-off-by: Justin Chu --- onnxscript/function_libs/torch_lib/ops/linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnxscript/function_libs/torch_lib/ops/linalg.py b/onnxscript/function_libs/torch_lib/ops/linalg.py index 516f9c8b4c..1504ce0cbd 100644 --- a/onnxscript/function_libs/torch_lib/ops/linalg.py +++ b/onnxscript/function_libs/torch_lib/ops/linalg.py @@ -346,7 +346,7 @@ def aten_linalg_vector_norm( elif ord == 2.0: return op.ReduceL2(self, dim, keepdims=keepdim) else: - if ord % 2 != 0: + if ord < 0 or ord % 2 != 0: # Not-even integer, use abs self = op.Abs(self) self_pow = op.Pow(self, ord) From 5a8e7ee5a891670ff1287920f5e6de0390feddbe Mon Sep 17 00:00:00 2001 From: Justin Chu Date: Tue, 23 Sep 2025 12:04:21 -0700 Subject: [PATCH 3/3] comment Signed-off-by: Justin Chu --- onnxscript/function_libs/torch_lib/ops/linalg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/onnxscript/function_libs/torch_lib/ops/linalg.py b/onnxscript/function_libs/torch_lib/ops/linalg.py index 1504ce0cbd..c9d870bd86 100644 --- a/onnxscript/function_libs/torch_lib/ops/linalg.py +++ b/onnxscript/function_libs/torch_lib/ops/linalg.py @@ -347,7 +347,7 @@ def aten_linalg_vector_norm( return op.ReduceL2(self, dim, keepdims=keepdim) else: if ord < 0 or ord % 2 != 0: - # Not-even integer, use abs + # Not an even integer (could be odd, fractional or negative), use Abs self = op.Abs(self) self_pow = op.Pow(self, ord) exp = op.CastLike(1 / ord, self)