Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【SCU】【Paddle Tensor No.25】新增paddle.vecdot , paddle.linalg.vecdot #69477

Merged
merged 33 commits into from
Nov 27, 2024
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
9acb5b0
add_vecdot
PolaKuma Nov 18, 2024
74c2c27
update vecdot
PolaKuma Nov 18, 2024
7888577
fix codestyle
PolaKuma Nov 18, 2024
7b1e0dd
fix codestyle
PolaKuma Nov 19, 2024
f714d88
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 19, 2024
ebd6c02
Update test/legacy_test/test_linalg_vecdot.py
PolaKuma Nov 19, 2024
4193810
Update python/paddle/tensor/linalg.py
PolaKuma Nov 19, 2024
abffd22
update_vecdot
PolaKuma Nov 19, 2024
ef6edd2
fix_codestyle
PolaKuma Nov 19, 2024
2696682
fix codestyle
PolaKuma Nov 19, 2024
fe9ec10
fix codestyle
PolaKuma Nov 19, 2024
e77b6ca
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 19, 2024
9e1be50
update
PolaKuma Nov 20, 2024
b851f62
Merge branch 'develop' into add_vecdot
PolaKuma Nov 20, 2024
12c2e11
skip_xpu
PolaKuma Nov 21, 2024
64a53fe
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 21, 2024
31f669f
Update test_linalg_vecdot.py
PolaKuma Nov 21, 2024
0c014c7
fix codestyle
PolaKuma Nov 21, 2024
1a63df0
fix codestyle again
PolaKuma Nov 21, 2024
b67324c
Merge branch 'develop' into add_vecdot
PolaKuma Nov 21, 2024
a970f65
fix
PolaKuma Nov 22, 2024
3034ea1
Merge branch 'add_vecdot' of https://github.com/PolaKuma/Paddle into …
PolaKuma Nov 22, 2024
77744cb
fix
PolaKuma Nov 22, 2024
50b557d
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 22, 2024
e299388
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 25, 2024
06793f5
fix
PolaKuma Nov 25, 2024
18c5ed5
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 25, 2024
314606a
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 26, 2024
0d65079
fix again
PolaKuma Nov 26, 2024
254eb8d
Merge branch 'add_vecdot' of https://github.com/PolaKuma/Paddle into …
PolaKuma Nov 26, 2024
9e90b92
change_example
PolaKuma Nov 26, 2024
0690cd1
delete
PolaKuma Nov 26, 2024
dc52bc3
Merge branch 'PaddlePaddle:develop' into add_vecdot
PolaKuma Nov 26, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@
t_,
transpose,
transpose_,
vecdot,
)
from .tensor.logic import (
allclose,
Expand Down Expand Up @@ -1211,4 +1212,5 @@
'to_dlpack',
'inf',
'newaxis',
'vecdot',
]
2 changes: 2 additions & 0 deletions python/paddle/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@
svd,
svd_lowrank,
triangular_solve,
vecdot,
vector_norm,
)

Expand All @@ -54,6 +55,7 @@
'cholesky_inverse',
'norm',
'matrix_norm',
'vecdot',
'vector_norm',
'cond',
'cov',
Expand Down
1 change: 1 addition & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@
t_,
transpose,
transpose_,
vecdot,
)
from .logic import ( # noqa: F401
allclose,
Expand Down
43 changes: 43 additions & 0 deletions python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -1867,6 +1867,49 @@ def dot(x: Tensor, y: Tensor, name: str | None = None) -> Tensor:
return out


def vecdot(
x: Tensor,
y: Tensor,
axis: int = -1,
name: str | None = None,
) -> Tensor:
"""
Computes the dot product of two tensors along a specified axis.

This function multiplies two tensors element-wise and sums them along a specified axis to compute their dot product. It supports tensors of any dimensionality, including 0-D tensors, as long as the shapes of `x` and `y` are broadcastable along the specified axis.

Args:
x (Tensor): The first input tensor. It should be a tensor with dtype of float32, float64, int32, int64, complex64, or complex128.
y (Tensor): The second input tensor. Its shape must be broadcastable with `x` along the specified `axis`, and it must have the same dtype as `x`.
axis (int, optional): The axis along which to compute the dot product. Default is -1, which indicates the last axis.
name (str|None, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`

Returns:
Tensor: A tensor containing the dot product of `x` and `y` along the specified axis.

Examples:

.. code-block:: python

>>> import paddle
>>> x = paddle.to_tensor([1, 2, 3], dtype='float32')
>>> y = paddle.to_tensor([4, 5, 6], dtype='float32')
>>> result = paddle.linalg.vecdot(x, y)
>>> print(result)
Tensor(shape=[], dtype=float32, place=Place(cpu), stop_gradient=True,
[32.0])

>>> x2 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
>>> y2 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
>>> result2 = paddle.linalg.vecdot(x2, y2, axis=1)
>>> print(result2)
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[14.0, 77.0])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

看下样例代码的输出的空格缩进是否与实际输出完全一致:
image

"""
out = (x.conj() * y).sum(axis=axis)
return out


def cov(
x: Tensor,
rowvar: bool = True,
Expand Down
261 changes: 261 additions & 0 deletions test/legacy_test/test_linalg_vecdot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


import sys
import unittest

import numpy as np

import paddle

if sys.platform == 'win32':
RTOL = {'float32': 1e-02, 'float64': 1e-04}
ATOL = {'float32': 1e-02, 'float64': 1e-04}
else:
RTOL = {'float32': 1e-06, 'float64': 1e-15}
ATOL = {'float32': 1e-06, 'float64': 1e-15}


class VecDotTestCase(unittest.TestCase):
def setUp(self):
self.init_config()
self.generate_input()
self.generate_expected_output()
self.places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self.places.append(paddle.CUDAPlace(0))

def generate_input(self):
np.random.seed(123)
self.x = np.random.random(self.input_shape).astype(self.dtype)
self.y = np.random.random(self.input_shape).astype(self.dtype)

def generate_expected_output(self):
self.expected_output = np.sum(self.x * self.y, axis=self.axis)

def init_config(self):
self.dtype = 'float64'
self.input_shape = (3, 4)
self.axis = -1

def test_dygraph(self):
for place in self.places:
paddle.disable_static(place)
x_tensor = paddle.to_tensor(self.x, dtype=self.dtype, place=place)
y_tensor = paddle.to_tensor(self.y, dtype=self.dtype, place=place)
result = paddle.vecdot(x_tensor, y_tensor, axis=self.axis)

np.testing.assert_allclose(
result.numpy(),
self.expected_output,
rtol=RTOL[self.dtype],
atol=ATOL[self.dtype],
)

def test_static(self):
paddle.enable_static()
for place in self.places:
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = paddle.static.data(
name="x", shape=self.input_shape, dtype=self.dtype
)
y = paddle.static.data(
name="y", shape=self.input_shape, dtype=self.dtype
)

result = paddle.vecdot(x, y, axis=self.axis)
exe = paddle.static.Executor(place)
output = exe.run(
feed={"x": self.x, "y": self.y},
fetch_list=[result],
)[0]

np.testing.assert_allclose(
output,
self.expected_output,
rtol=RTOL[self.dtype],
atol=ATOL[self.dtype],
)


class VecDotTestCaseFloat32(VecDotTestCase):
def init_config(self):
self.dtype = 'float32'
self.input_shape = (3, 4)
self.axis = -1


class VecDotTestCaseHigherDim(VecDotTestCase):
def init_config(self):
self.dtype = 'float64'
self.input_shape = (2, 3, 4)
self.axis = -1


class VecDotTestCaseAxis(VecDotTestCase):
def init_config(self):
self.dtype = 'float64'
self.input_shape = (3, 4, 5)
self.axis = 1


PolaKuma marked this conversation as resolved.
Show resolved Hide resolved
class VecDotTestCaseError(unittest.TestCase):
def test_axis_mismatch(self):
with self.assertRaises(ValueError):
x = paddle.rand([3, 4], dtype="float32")
y = paddle.rand([3, 5], dtype="float32")
paddle.vecdot(x, y, axis=-1)

def test_dtype_mismatch(self):
with self.assertRaises(TypeError):
x = paddle.rand([3, 4], dtype="float32")
y = paddle.rand([3, 4], dtype="int32")
paddle.vecdot(x, y, axis=-1)
Comment on lines +128 to +132
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个单测需要跳过XPU:

@unittest.skipIf(
    core.is_compiled_with_xpu(),
    "Skip XPU for not support uniform(dtype=int)",
)



class VecDotTestCaseComplex(unittest.TestCase):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个类跳过XPU:

@unittest.skipIf(
    core.is_compiled_with_xpu(),
    "Skip XPU for not support complex",
)

def run_test_dynamic(self):
paddle.disable_static()
x = paddle.to_tensor(
[[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]], dtype="complex64"
)
y = paddle.to_tensor(
[[9 + 1j, 8 + 2j], [7 + 3j, 6 + 4j]], dtype="complex64"
)
result = paddle.vecdot(x, y, axis=-1)
expected = np.sum((x.numpy().conj() * y.numpy()), axis=-1)
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-5, atol=1e-5
)

def run_test_static(self):
paddle.enable_static()
place = paddle.CPUPlace()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name="x", shape=[2, 2], dtype="complex64")
y = paddle.static.data(name="y", shape=[2, 2], dtype="complex64")
result = paddle.vecdot(x, y, axis=-1)
exe = paddle.static.Executor(place)
output = exe.run(
feed={
"x": np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]]).astype(
"complex64"
),
"y": np.array([[9 + 1j, 8 + 2j], [7 + 3j, 6 + 4j]]).astype(
"complex64"
),
},
fetch_list=[result],
)[0]
expected = np.sum(
np.conj(np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]])).astype(
"complex64"
)
* np.array([[9 + 1j, 8 + 2j], [7 + 3j, 6 + 4j]]).astype(
"complex64"
),
axis=-1,
)
np.testing.assert_allclose(output, expected, rtol=1e-5, atol=1e-5)

def test_complex_conjugate(self):
self.run_test_dynamic()
self.run_test_static()


class VecDotTestCaseTypePromotion1(unittest.TestCase):
def test_float32_float64_promotion(self):
paddle.disable_static()
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0]], dtype="float32")
y = paddle.to_tensor([[5.0, 6.0], [7.0, 8.0]], dtype="float64")
result = paddle.vecdot(x, y, axis=-1)

expected = np.sum(x.numpy().astype("float64") * y.numpy(), axis=-1)
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-6, atol=1e-6
)


class VecDotTestCaseTypePromotion2(unittest.TestCase):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

同上,跳过XPU,

@unittest.skipIf(
    core.is_compiled_with_xpu(),
    "Skip XPU for not support complex",
)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已加

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已加

这里是不是忘记加了?
image

def test_float64_complex64_promotion(self):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个也是,所有complex类型的都加上对xpu的skip,否则CI过不了

paddle.disable_static()
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0]], dtype="float64")
y = paddle.to_tensor(
[[5 + 6j, 7 + 8j], [9 + 1j, 2 + 3j]], dtype="complex64"
)
result = paddle.vecdot(x, y, axis=-1)

expected = np.sum(x.numpy().astype("complex64") * y.numpy(), axis=-1)
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-5, atol=1e-5
)


class VecDotTestCaseBroadcast0DTensor(unittest.TestCase):
def test_0d_tensor_broadcast(self):
paddle.disable_static()
x = paddle.to_tensor(2.0, dtype="float32")
y = paddle.to_tensor(3.0, dtype="float32")
result = paddle.vecdot(x, y)

expected = x.numpy() * y.numpy()
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-6, atol=1e-6
)


class VecDotTestCaseBroadcast1DTensor(unittest.TestCase):
def test_1d_tensor_broadcast(self):
paddle.disable_static()
x = paddle.to_tensor([1.0, 2.0, 3.0], dtype="float32")
y = paddle.to_tensor([4.0, 5.0, 6.0], dtype="float32")
result = paddle.vecdot(x, y)

expected = np.dot(x.numpy(), y.numpy())
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-6, atol=1e-6
)


class VecDotTestCaseBroadcast1DNDTensor(unittest.TestCase):
def test_1d_nd_tensor_broadcast(self):
paddle.disable_static()
x = paddle.to_tensor([1.0, 2.0], dtype="float32")
y = paddle.to_tensor([[3.0, 4.0], [5.0, 6.0]], dtype="float32")
result = paddle.vecdot(x, y, axis=-1)

expected = np.sum(x.numpy() * y.numpy(), axis=-1)
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-6, atol=1e-6
)


class VecDotTestCaseBroadcastNDTensor(unittest.TestCase):
def test_nd_nd_tensor_broadcast(self):
paddle.disable_static()
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0]], dtype="float32")
y = paddle.to_tensor([5.0, 6.0], dtype="float32")
result = paddle.vecdot(x, y, axis=-1)

expected = np.sum(x.numpy() * y.numpy(), axis=-1)
np.testing.assert_allclose(
result.numpy(), expected, rtol=1e-6, atol=1e-6
)


if __name__ == '__main__':
unittest.main()