diff --git a/paddle/fluid/pir/dialect/op_generator/vjp_interface_black_list.py b/paddle/fluid/pir/dialect/op_generator/vjp_interface_black_list.py index c63e0c4e41833..d5f1fed6cac98 100644 --- a/paddle/fluid/pir/dialect/op_generator/vjp_interface_black_list.py +++ b/paddle/fluid/pir/dialect/op_generator/vjp_interface_black_list.py @@ -24,7 +24,6 @@ vjp_interface_black_list = [ - 'frobenius_norm', 'write_to_array', 'fused_attention', 'fused_feedforward', diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 47eda81f5d0ca..5fccce1f2b2ac 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -246,8 +246,8 @@ invoke : zeros_like(out_grad) - backward_op : frobenius_norm_grad - forward : frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out) - args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all) + forward : frobenius_norm(Tensor x, IntArray axis, bool keep_dim, bool reduce_all) -> Tensor(out) + args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis, bool keep_dim, bool reduce_all) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 01acb338c987b..8b9e5ff45dc9b 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -452,10 +452,10 @@ inplace: (x -> out) - op : frobenius_norm - args : (Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) + args : (Tensor x, IntArray axis, bool keep_dim, bool reduce_all) output : Tensor(out) infer_meta : - func : ReduceInferMetaBase + func : ReduceIntArrayAxisInferMetaBase kernel : func : frobenius_norm backward : frobenius_norm_grad diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 9f8def740385b..2dc2657491068 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -256,7 +256,7 @@ args : (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) output : Tensor(out) infer_meta : - func : ReduceInferMetaBase + func : ReduceIntArrayAxisInferMetaBase kernel : func : frobenius_norm param : [x, axis, keepdim, reduce_all] diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 1dd9355549c02..57d7c6370469f 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -3263,6 +3263,7 @@ void ReduceInferMeta(const MetaTensor& x, if (axis.empty()) { reduce_all = true; } + ReduceInferMetaBase(x, axis, keep_dim, reduce_all, out); } diff --git a/paddle/phi/kernels/frobenius_norm_grad_kernel.h b/paddle/phi/kernels/frobenius_norm_grad_kernel.h index 65db8dd9e0a10..78494c4423f7e 100644 --- a/paddle/phi/kernels/frobenius_norm_grad_kernel.h +++ b/paddle/phi/kernels/frobenius_norm_grad_kernel.h @@ -16,6 +16,7 @@ #include +#include "paddle/phi/common/int_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { @@ -25,7 +26,7 @@ void FrobeniusNormGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& out, const DenseTensor& dout, - const std::vector& axis, + const IntArray& axis, bool keep_dim, bool reduce_all, DenseTensor* dx); diff --git a/paddle/phi/kernels/frobenius_norm_kernel.h b/paddle/phi/kernels/frobenius_norm_kernel.h index 30122cb416094..45ddb6123b85d 100644 --- a/paddle/phi/kernels/frobenius_norm_kernel.h +++ b/paddle/phi/kernels/frobenius_norm_kernel.h @@ -16,6 +16,7 @@ #include +#include "paddle/phi/common/int_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { @@ -23,7 +24,7 @@ namespace phi { template void FrobeniusNormKernel(const Context& ctx, const DenseTensor& x, - const std::vector& axis, + const IntArray& axis, bool keep_dim, bool reduce_all, DenseTensor* out); diff --git a/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu b/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu index f2be0f073a87d..5bb59357bc976 100644 --- a/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/frobenius_norm_kernel.cu @@ -24,14 +24,14 @@ namespace phi { template void FrobeniusNormKernel(const Context& dev_ctx, const DenseTensor& x, - const std::vector& dims, + const IntArray& dims, bool keep_dim, bool reduce_all, DenseTensor* out) { - reduce_all = recompute_reduce_all(x, dims, reduce_all); + reduce_all = recompute_reduce_all(x, dims.GetData(), reduce_all); auto out_dtype = x.dtype(); phi::Reduce( - dev_ctx, x, reduce_all, dims, keep_dim, out_dtype, out); + dev_ctx, x, reduce_all, dims.GetData(), keep_dim, out_dtype, out); SqrtKernel(dev_ctx, *out, out); } diff --git a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h index 385ea68e6e707..7954441f30c2b 100644 --- a/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/frobenius_norm_grad_kernel_impl.h @@ -25,13 +25,13 @@ void FrobeniusNormGradKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& out, const DenseTensor& dout, - const std::vector& axis, + const IntArray& axis, bool keep_dim, bool reduce_all, DenseTensor* dx) { - reduce_all = recompute_reduce_all(x, axis, reduce_all); + reduce_all = recompute_reduce_all(x, axis.GetData(), reduce_all); ReduceGradKernel( - ctx, x, out, dout, axis, keep_dim, reduce_all, dx); + ctx, x, out, dout, axis.GetData(), keep_dim, reduce_all, dx); } } // namespace phi diff --git a/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h b/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h index 7dbc3ab3af7ba..eab028a1caccf 100644 --- a/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h +++ b/paddle/phi/kernels/impl/frobenius_norm_kernel_impl.h @@ -23,13 +23,13 @@ namespace phi { template void FrobeniusNormKernel(const Context& ctx, const DenseTensor& x, - const std::vector& axis, + const IntArray& axis, bool keep_dim, bool reduce_all, DenseTensor* out) { - reduce_all = recompute_reduce_all(x, axis, reduce_all); + reduce_all = recompute_reduce_all(x, axis.GetData(), reduce_all); Reduce( - ctx, x, reduce_all, axis, keep_dim, x.dtype(), out); + ctx, x, reduce_all, axis.GetData(), keep_dim, x.dtype(), out); } } // namespace phi diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 71016a2208c15..285c0dec929d3 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -382,7 +382,7 @@ def frobenius_norm(input, dim=None, keepdim=False, name=None): "The dim of frobenius norm op should be None or two elements list!" ) - if in_dynamic_mode(): + if in_dynamic_or_pir_mode(): if dim is None: return _C_ops.frobenius_norm(input, [], keepdim, True) return _C_ops.frobenius_norm(input, dim, keepdim, False) diff --git a/test/legacy_test/test_norm_all.py b/test/legacy_test/test_norm_all.py index 58be677975742..86eea3a4c8eb0 100644 --- a/test/legacy_test/test_norm_all.py +++ b/test/legacy_test/test_norm_all.py @@ -102,10 +102,10 @@ def setUp(self): self.outputs = {'Out': norm} def test_check_output(self): - self.check_output() + self.check_output(check_pir=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) def init_test_case(self): self.shape = [2, 3, 4, 5] @@ -126,7 +126,7 @@ def init_dtype(self): self.dtype = "float32" def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_pir=True) class TestPnormOp(OpTest):