Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add linalg pinv api #35804

Merged
merged 4 commits into from
Sep 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@
from .tensor.linalg import matrix_power # noqa: F401
from .tensor.linalg import svd # noqa: F401
from .tensor.linalg import eigh # noqa: F401
from .tensor.linalg import pinv # noqa: F401
from .tensor.logic import equal # noqa: F401
from .tensor.logic import greater_equal # noqa: F401
from .tensor.logic import greater_than # noqa: F401
Expand Down
251 changes: 251 additions & 0 deletions python/paddle/fluid/tests/unittests/test_linalg_pinv_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci
from gradient_checker import grad_check
from decorator_helper import prog_scope


class LinalgPinvTestCase(unittest.TestCase):
def setUp(self):
self.init_config()
self.generate_input()
self.generate_output()
self.places = [paddle.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(paddle.CUDAPlace(0))

def generate_input(self):
self._input_shape = (5, 5)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)

def generate_output(self):
self._output_data = np.linalg.pinv(self._input_data, \
rcond=self.rcond, hermitian=self.hermitian)

def init_config(self):
self.dtype = 'float64'
self.rcond = 1e-15
self.hermitian = False

def test_dygraph(self):
for place in self.places:
paddle.disable_static(place)
x = paddle.to_tensor(self._input_data, place=place)
out = paddle.linalg.pinv(
x, rcond=self.rcond, hermitian=self.hermitian).numpy()
if (np.abs(out - self._output_data) < 1e-6).any():
pass
else:
print("EXPECTED: \n", self._output_data)
print("GOT : \n", out)
raise RuntimeError("Check PINV dygraph Failed")

def test_static(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = paddle.fluid.data(
name="input",
shape=self._input_shape,
dtype=self._input_data.dtype)
out = paddle.linalg.pinv(
x, rcond=self.rcond, hermitian=self.hermitian)
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input": self._input_data},
fetch_list=[out])
if (np.abs(fetches[0] - self._output_data) < 1e-6).any():
pass
else:
print("EXPECTED: \n", self._output_data)
print("GOT : \n", fetches[0])
raise RuntimeError("Check PINV static Failed")

def test_grad(self):
for place in self.places:
x = paddle.to_tensor(
self._input_data, place=place, stop_gradient=False)
out = paddle.linalg.pinv(
x, rcond=self.rcond, hermitian=self.hermitian)
try:
out.backward()
x_grad = x.grad
# print(x_grad)
except:
raise RuntimeError("Check PINV Grad Failed")


class LinalgPinvTestCase1(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (4, 5)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCase2(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (5, 4)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCaseBatch1(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCaseBatch2(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 4, 5)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCaseBatch3(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 4)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCaseBatch4(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 6, 5, 4)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCaseBatchBig(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (2, 200, 300)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)


class LinalgPinvTestCaseFP32(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)

def init_config(self):
self.dtype = 'float32'
self.rcond = 1e-15
self.hermitian = False


class LinalgPinvTestCaseRcond(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
self._input_data = np.random.random(self._input_shape).astype(
self.dtype)

def init_config(self):
self.dtype = 'float64'
self.rcond = 1e-10
self.hermitian = False


class LinalgPinvTestCaseHermitian1(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (5, 5)
x = np.random.random(self._input_shape).astype(self.dtype) + \
1J * np.random.random(self._input_shape).astype(self.dtype)
self._input_data = x + x.transpose().conj()

def init_config(self):
self.dtype = 'float64'
self.rcond = 1e-15
self.hermitian = True


class LinalgPinvTestCaseHermitian2(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
x = np.random.random(self._input_shape).astype(self.dtype) + \
1J * np.random.random(self._input_shape).astype(self.dtype)
self._input_data = x + x.transpose((0, 2, 1)).conj()

def init_config(self):
self.dtype = 'float64'
self.rcond = 1e-15
self.hermitian = True


class LinalgPinvTestCaseHermitian3(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
x = np.random.random(self._input_shape).astype(self.dtype) + \
1J * np.random.random(self._input_shape).astype(self.dtype)
self._input_data = x + x.transpose((0, 2, 1)).conj()

def init_config(self):
self.dtype = 'float32'
self.rcond = 1e-15
self.hermitian = True


class LinalgPinvTestCaseHermitian4(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (5, 5)
x = np.random.random(self._input_shape).astype(self.dtype)
self._input_data = x + x.transpose()

def init_config(self):
self.dtype = 'float64'
self.rcond = 1e-15
self.hermitian = True


class LinalgPinvTestCaseHermitian5(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
x = np.random.random(self._input_shape).astype(self.dtype)
self._input_data = x + x.transpose((0, 2, 1))

def init_config(self):
self.dtype = 'float64'
self.rcond = 1e-15
self.hermitian = True


class LinalgPinvTestCaseHermitianFP32(LinalgPinvTestCase):
def generate_input(self):
self._input_shape = (3, 5, 5)
x = np.random.random(self._input_shape).astype(self.dtype)
self._input_data = x + x.transpose((0, 2, 1))

def init_config(self):
self.dtype = 'float32'
self.rcond = 1e-15
self.hermitian = True


if __name__ == '__main__':
unittest.main()
4 changes: 3 additions & 1 deletion python/paddle/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from .tensor.linalg import matrix_rank
from .tensor.linalg import svd
from .tensor.linalg import eigh # noqa: F401
from .tensor.linalg import pinv

__all__ = [
'cholesky', #noqa
Expand All @@ -31,5 +32,6 @@
'matrix_rank',
'svd',
'matrix_power',
'eigh'
'eigh',
'pinv'
]
1 change: 1 addition & 0 deletions python/paddle/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
from .linalg import multi_dot # noqa: F401
from .linalg import svd # noqa: F401
from .linalg import eigh # noqa: F401
from .linalg import pinv # noqa: F401
from .logic import equal # noqa: F401
from .logic import greater_equal # noqa: F401
from .logic import greater_than # noqa: F401
Expand Down
Loading