Skip to content

Commit ec05c8e

Browse files
committed
use paddle.tensor to instead paddle.to_tensor
1 parent 2d2476e commit ec05c8e

File tree

3 files changed

+166
-28
lines changed

3 files changed

+166
-28
lines changed

python/paddle/__init__.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -604,6 +604,37 @@
604604
to_dlpack,
605605
)
606606

607+
608+
class _TensorMethodOrModule:
609+
def __init__(self):
610+
import paddle.tensor as tensor_module
611+
612+
from .tensor.creation import tensor as tensor_api
613+
614+
self.module = tensor_module
615+
self.method = tensor_api
616+
617+
def __call__(self, *args, **kwargs):
618+
return self.method(*args, **kwargs)
619+
620+
def __getattr__(self, name):
621+
return getattr(self.module, name)
622+
623+
def __repr__(self):
624+
return repr(self.method)
625+
626+
def __str__(self):
627+
return str(self.method)
628+
629+
def __type__(self):
630+
return type(self.method)
631+
632+
def __dir__(self):
633+
return dir(self.module)
634+
635+
636+
tensor = _TensorMethodOrModule() # noqa: F811
637+
607638
# CINN has to set a flag to include a lib
608639
if is_compiled_with_cinn():
609640
import os

python/paddle/tensor/creation.py

Lines changed: 125 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@
6464
__all__ = []
6565

6666
_warned_in_to_tensor = False
67+
_warned_in_use_to_tensor = False
6768

6869

6970
def _complex_to_real_dtype(dtype: DTypeLike) -> DTypeLike:
@@ -877,6 +878,121 @@ def _to_tensor_static(
877878
return output
878879

879880

881+
def tensor(
882+
data: TensorLike | NestedNumericSequence,
883+
dtype: DTypeLike | None = None,
884+
device: PlaceLike | None = None,
885+
requires_grad: bool = False,
886+
pin_memory: bool = False,
887+
) -> paddle.Tensor:
888+
r"""
889+
Constructs a ``paddle.Tensor`` from ``data`` ,
890+
which can be scalar, tuple, list, numpy\.ndarray, paddle\.Tensor.
891+
892+
If the ``data`` is already a Tensor, copy will be performed and return a new tensor.
893+
If you only want to change stop_gradient property, please call ``Tensor.stop_gradient = stop_gradient`` directly.
894+
895+
.. code-block:: text
896+
897+
We use the dtype conversion rules following this:
898+
Keep dtype
899+
np.number ───────────► paddle.Tensor
900+
(0-D Tensor)
901+
default_dtype
902+
Python Number ───────────────► paddle.Tensor
903+
(0-D Tensor)
904+
Keep dtype
905+
np.ndarray ───────────► paddle.Tensor
906+
907+
Args:
908+
data(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor.
909+
Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor.
910+
dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' ,
911+
'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8',
912+
'complex64' , 'complex128'. Default: None, infers dtype from ``data``
913+
except for python float number which gets dtype from ``get_default_type`` .
914+
device(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be
915+
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``device`` is
916+
string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs.
917+
requires_grad(bool, optional): Whether to block the gradient propagation of Autograd. Default: False.
918+
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
919+
920+
Returns:
921+
Tensor: A Tensor constructed from ``data`` .
922+
923+
Examples:
924+
.. code-block:: python
925+
926+
>>> import paddle
927+
928+
>>> type(paddle.tensor(1))
929+
<class 'paddle.Tensor'>
930+
931+
>>> paddle.tensor(1)
932+
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
933+
1)
934+
935+
>>> x = paddle.tensor(1, requires_grad=True)
936+
>>> print(x)
937+
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=False,
938+
1)
939+
940+
>>> paddle.tensor(x) # A new tensor will be created with default stop_gradient=True
941+
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
942+
1)
943+
944+
>>> paddle.tensor([[0.1, 0.2], [0.3, 0.4]], device=paddle.CPUPlace(), requires_grad=True)
945+
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
946+
[[0.10000000, 0.20000000],
947+
[0.30000001, 0.40000001]])
948+
949+
>>> type(paddle.tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64'))
950+
<class 'paddle.Tensor'>
951+
952+
>>> paddle.tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64')
953+
Tensor(shape=[2, 2], dtype=complex64, place=Place(cpu), stop_gradient=True,
954+
[[(1+1j), (2+0j)],
955+
[(3+2j), (4+0j)]])
956+
"""
957+
if isinstance(device, str) and "cuda" in device:
958+
device = device.replace("cuda", "gpu")
959+
stop_gradient = not requires_grad
960+
place = _get_paddle_place(device)
961+
if place is None:
962+
place = _current_expected_place_()
963+
if in_dynamic_mode():
964+
is_tensor = paddle.is_tensor(data)
965+
if not is_tensor and hasattr(data, "__cuda_array_interface__"):
966+
if not core.is_compiled_with_cuda():
967+
raise RuntimeError(
968+
"PaddlePaddle is not compiled with CUDA, but trying to create a Tensor from a CUDA array."
969+
)
970+
tensor = core.tensor_from_cuda_array_interface(data)
971+
else:
972+
if is_tensor:
973+
global _warned_in_to_tensor
974+
if not _warned_in_to_tensor:
975+
warnings.warn(
976+
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), "
977+
"rather than paddle.to_tensor(sourceTensor).",
978+
stacklevel=2,
979+
)
980+
_warned_in_to_tensor = True
981+
tensor = _to_tensor_non_static(data, dtype, place, stop_gradient)
982+
if pin_memory:
983+
tensor = tensor.pin_memory()
984+
return tensor
985+
# call assign for static graph
986+
else:
987+
re_exp = re.compile(r'[(](.+?)[)]', re.DOTALL)
988+
place_str = re.findall(re_exp, str(place))[0]
989+
with paddle.static.device_guard(place_str):
990+
tensor = _to_tensor_static(data, dtype, stop_gradient)
991+
if pin_memory:
992+
tensor = tensor.pin_memory()
993+
return tensor
994+
995+
880996
@ParamAliasDecorator({"place": ["device"]})
881997
def to_tensor(
882998
data: TensorLike | NestedNumericSequence,
@@ -957,34 +1073,15 @@ def to_tensor(
9571073
[[(1+1j), (2+0j)],
9581074
[(3+2j), (4+0j)]])
9591075
"""
960-
place = _get_paddle_place(place)
961-
if place is None:
962-
place = _current_expected_place_()
963-
if in_dynamic_mode():
964-
is_tensor = paddle.is_tensor(data)
965-
if not is_tensor and hasattr(data, "__cuda_array_interface__"):
966-
if not core.is_compiled_with_cuda():
967-
raise RuntimeError(
968-
"PaddlePaddle is not compiled with CUDA, but trying to create a Tensor from a CUDA array."
969-
)
970-
return core.tensor_from_cuda_array_interface(data)
971-
if is_tensor:
972-
global _warned_in_to_tensor
973-
if not _warned_in_to_tensor:
974-
warnings.warn(
975-
"To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), "
976-
"rather than paddle.to_tensor(sourceTensor).",
977-
stacklevel=2,
978-
)
979-
_warned_in_to_tensor = True
980-
return _to_tensor_non_static(data, dtype, place, stop_gradient)
981-
982-
# call assign for static graph
983-
else:
984-
re_exp = re.compile(r'[(](.+?)[)]', re.DOTALL)
985-
place_str = re.findall(re_exp, str(place))[0]
986-
with paddle.static.device_guard(place_str):
987-
return _to_tensor_static(data, dtype, stop_gradient)
1076+
global _warned_in_use_to_tensor
1077+
if not _warned_in_use_to_tensor:
1078+
warnings.warn(
1079+
"`paddle.to_tensor` will be deprecated. Please use `paddle.tensor` instead."
1080+
)
1081+
_warned_in_use_to_tensor = True
1082+
return tensor(
1083+
data, dtype=dtype, device=place, requires_grad=not stop_gradient
1084+
)
9881085

9891086

9901087
class MmapStorage(paddle.base.core.MmapStorage):

test/legacy_test/test_eager_tensor.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1351,6 +1351,16 @@ def test_to_tensor_from___cuda_array_interface__(self):
13511351
flag = paddle.tensor.creation._warned_in_to_tensor
13521352
self.assertTrue(flag)
13531353

1354+
def test_tensor_for_use_to_tensor(self):
1355+
# check the warning , when use to_tensor
1356+
with (
1357+
dygraph_guard(),
1358+
warnings.catch_warnings(record=True) as w,
1359+
):
1360+
x = paddle.to_tensor([1, 2, 3])
1361+
flag = paddle.tensor.creation._warned_in_use_to_tensor
1362+
self.assertTrue(flag)
1363+
13541364
def test_dlpack_device(self):
13551365
"""test Tensor.__dlpack_device__"""
13561366
with dygraph_guard():

0 commit comments

Comments
 (0)