Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

为 paddle.linalg.norm 进行功能升级与对齐- 添加 vector_norm #61155

Merged
merged 6 commits into from
Jan 29, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/paddle/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,13 @@
solve,
svd,
triangular_solve,
vector_norm,
)

__all__ = [
'cholesky',
'norm',
'vector_norm',
'cond',
'cov',
'corrcoef',
Expand Down
284 changes: 228 additions & 56 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,231 @@ def __check_input(x, y):
return out


def vector_norm(x, p=2.0, axis=None, keepdim=False, name=None):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Returns the vector norm (the 1-norm, the Euclidean or 2-norm, and in general the p-norm)
of a given tensor.
Args:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Args:
Args:

x (Tensor): Tensor, data type float32, float64.
p (int|float, optional): None for porder=2.0. Default None.
axis (int|list, optional): None for last dimension. Default None.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Returns:
Returns:

Tensor: results of vector_norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
Comment on lines +301 to +302
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Examples:
.. code-block:: python
Examples:
.. code-block:: python

>>> import paddle
>>> x = paddle.arange(24, dtype="float32").reshape([2, 3, 4]) - 12
>>> print(x)
Tensor(shape=[2, 3, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[[[-12., -11., -10., -9. ],
[-8. , -7. , -6. , -5. ],
[-4. , -3. , -2. , -1. ]],
[[ 0. , 1. , 2. , 3. ],
[ 4. , 5. , 6. , 7. ],
[ 8. , 9. , 10., 11.]]])
>>> out_vector_norm = paddle.linalg.vector_norm(x=x,p=2,axis=None,keepdim=False)
>>> print(out_vector_norm)
Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True,
34.)
>>> out_vector_norm = paddle.linalg.vector_norm(x=x,p=0,axis=[0,1],keepdim=False)
>>> print(out_vector_norm)
Tensor(shape=[4], dtype=int64, place=Place(gpu:0), stop_gradient=True,
[5, 6, 6, 6])
>>> out_vector_norm = paddle.linalg.vector_norm(x=x,p=np.inf,axis=[1,2],keepdim=False)
>>> print(out_vector_norm)
Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[12., 11.])
>>> out_vector_norm = paddle.linalg.vector_norm(x=x,p=1,axis=1,keepdim=False)
>>> print(out_vector_norm)
Tensor(shape=[2, 4], dtype=float32, place=Place(gpu:0), stop_gradient=True,
[[24., 21., 18., 15.],
[12., 15., 18., 21.]])
"""

def zero_norm(
input, porder=None, axis=axis, keepdim=False, asvector=False, name=None
):
return paddle.count_nonzero(
input, axis=axis, keepdim=keepdim, name=name
)

def inf_norm(
input, porder=None, axis=axis, keepdim=False, asvector=False, name=None
):
if in_dynamic_mode():
out = _C_ops.abs(input)
if porder == np.float64('inf'):
return _C_ops.max(out, axis, keepdim)
else:
return _C_ops.min(out, axis, keepdim)
else:
helper = LayerHelper('inf_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
helper.append_op(
type='abs', inputs={'X': input}, outputs={'Out': out}
)
reduce_out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)
reduce_all, axis = _get_reduce_axis(axis, x)
reduce_type = (
'reduce_max' if porder == np.float64('inf') else 'reduce_min'
)
helper.append_op(
type=reduce_type,
inputs={'X': out},
outputs={'Out': reduce_out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all,
},
)

return reduce_out

def vector_norm_axis_tuple(
input, porder=2, axis=None, keepdim=False, asvector=False, name=None
):
"""
NOTE:
This function calculates the vector norm for dim >= 2.
"""
if in_dynamic_or_pir_mode():
abs_out = _C_ops.abs(input)
pow_out = _C_ops.pow(abs_out, porder)
sum_out = _C_ops.sum(pow_out, axis, None, keepdim)
out = _C_ops.pow(sum_out, float(1.0 / porder))
return out

block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype()
)
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype()
)
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out}
)
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype()
)

block.append_op(
type='pow',
inputs={'X': abs_out},
outputs={'Out': pow_out},
attrs={'factor': porder},
)
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype()
)
reduce_all, axis = _get_reduce_axis(axis, x)
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all,
},
)
block.append_op(
type='pow',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={'factor': float(1.0 / porder)},
)
return out

def vector_norm_axis_int(
input, porder=2, axis=None, keepdim=False, asvector=False, name=None
):
"""
NOTE:
This function calculates the vector norm for len(axis) == 1.
"""
if in_dynamic_or_pir_mode():
if axis is None:
axis = -1
return _C_ops.p_norm(input, porder, axis, 1e-12, keepdim, asvector)
else:
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(
input,
'input',
['float16', 'uint16', 'float32', 'float64'],
'p_norm',
)

attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'asvector': asvector,
'epsilon': 1e-12,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)

helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs,
)
return out

if not isinstance(p, (int, float)):
raise ValueError(f"only valid p type is int and float, found {type(p)}")

asvector = False
if axis is None:
axis = -1
asvector = True

if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]

# when len(axis) == 1, use the original op to calculate
if isinstance(axis, int):
return vector_norm_axis_int(
x,
axis=axis,
porder=p,
keepdim=keepdim,
asvector=asvector,
name=name,
)

# when len(axis) >= 1, calculate by combining other Python apis
elif isinstance(axis, list):
if p == np.inf or p == -np.inf:
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif p == 0:
return zero_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
return vector_norm_axis_tuple(
x, porder=p, axis=axis, keepdim=keepdim, name=name
)


Copy link
Contributor

@zhwesky2010 zhwesky2010 Jan 25, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

原来的那个内部函数vector_norm也一起删掉吧,这个相当于暴露了之前的内部函数vector_norm,可以覆盖其功能

def norm(x, p='fro', axis=None, keepdim=False, name=None):
"""

Expand Down Expand Up @@ -521,56 +746,6 @@ def nuclear_norm(input, axis=axis, keepdim=False, name=None):

return out

def vector_norm(
input, porder=None, axis=None, keepdim=False, asvector=False, name=None
):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0. Default None.
axis (int, optional): None for last dimension. Default None.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
asvector (bool, optional): Whether keep the result as a vector, Default False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
"""
if in_dynamic_or_pir_mode():
if axis is None:
axis = -1
return _C_ops.p_norm(input, porder, axis, 1e-12, keepdim, asvector)
else:
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(
input,
'input',
['float16', 'uint16', 'float32', 'float64'],
'p_norm',
)

attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'asvector': asvector,
'epsilon': 1e-12,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
)

helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs,
)
return out

def inf_norm(
input, porder=None, axis=axis, keepdim=False, asvector=False, name=None
):
Expand Down Expand Up @@ -673,10 +848,9 @@ def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
elif isinstance(p, (int, float)):
return vector_norm(
x,
porder=p,
p=p,
axis=axis,
keepdim=keepdim,
asvector=True,
name=name,
)
else:
Expand All @@ -695,10 +869,9 @@ def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
if p == "fro":
return vector_norm(
x,
porder=2,
p=2,
axis=axis,
keepdim=keepdim,
asvector=False,
name=name,
)

Expand All @@ -710,9 +883,8 @@ def p_matrix_norm(input, porder=1.0, axis=axis, keepdim=False, name=None):
return vector_norm(
x,
axis=axis,
porder=p,
p=p,
keepdim=keepdim,
asvector=False,
name=name,
)
else:
Expand Down
Loading