|
64 | 64 | __all__ = [] |
65 | 65 |
|
66 | 66 | _warned_in_to_tensor = False |
| 67 | +_warned_in_use_to_tensor = False |
67 | 68 |
|
68 | 69 |
|
69 | 70 | def _complex_to_real_dtype(dtype: DTypeLike) -> DTypeLike: |
@@ -877,6 +878,121 @@ def _to_tensor_static( |
877 | 878 | return output |
878 | 879 |
|
879 | 880 |
|
| 881 | +def tensor( |
| 882 | + data: TensorLike | NestedNumericSequence, |
| 883 | + dtype: DTypeLike | None = None, |
| 884 | + device: PlaceLike | None = None, |
| 885 | + requires_grad: bool = False, |
| 886 | + pin_memory: bool = False, |
| 887 | +) -> paddle.Tensor: |
| 888 | + r""" |
| 889 | + Constructs a ``paddle.Tensor`` from ``data`` , |
| 890 | + which can be scalar, tuple, list, numpy\.ndarray, paddle\.Tensor. |
| 891 | +
|
| 892 | + If the ``data`` is already a Tensor, copy will be performed and return a new tensor. |
| 893 | + If you only want to change stop_gradient property, please call ``Tensor.stop_gradient = stop_gradient`` directly. |
| 894 | +
|
| 895 | + .. code-block:: text |
| 896 | +
|
| 897 | + We use the dtype conversion rules following this: |
| 898 | + Keep dtype |
| 899 | + np.number ───────────► paddle.Tensor |
| 900 | + (0-D Tensor) |
| 901 | + default_dtype |
| 902 | + Python Number ───────────────► paddle.Tensor |
| 903 | + (0-D Tensor) |
| 904 | + Keep dtype |
| 905 | + np.ndarray ───────────► paddle.Tensor |
| 906 | +
|
| 907 | + Args: |
| 908 | + data(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor. |
| 909 | + Can be a scalar, list, tuple, numpy\.ndarray, paddle\.Tensor. |
| 910 | + dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be 'bool' , 'float16' , |
| 911 | + 'float32' , 'float64' , 'int8' , 'int16' , 'int32' , 'int64' , 'uint8', |
| 912 | + 'complex64' , 'complex128'. Default: None, infers dtype from ``data`` |
| 913 | + except for python float number which gets dtype from ``get_default_type`` . |
| 914 | + device(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be |
| 915 | + CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``device`` is |
| 916 | + string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs. |
| 917 | + requires_grad(bool, optional): Whether to block the gradient propagation of Autograd. Default: False. |
| 918 | + pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False |
| 919 | +
|
| 920 | + Returns: |
| 921 | + Tensor: A Tensor constructed from ``data`` . |
| 922 | +
|
| 923 | + Examples: |
| 924 | + .. code-block:: python |
| 925 | +
|
| 926 | + >>> import paddle |
| 927 | +
|
| 928 | + >>> type(paddle.tensor(1)) |
| 929 | + <class 'paddle.Tensor'> |
| 930 | +
|
| 931 | + >>> paddle.tensor(1) |
| 932 | + Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True, |
| 933 | + 1) |
| 934 | +
|
| 935 | + >>> x = paddle.tensor(1, requires_grad=True) |
| 936 | + >>> print(x) |
| 937 | + Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=False, |
| 938 | + 1) |
| 939 | +
|
| 940 | + >>> paddle.tensor(x) # A new tensor will be created with default stop_gradient=True |
| 941 | + Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True, |
| 942 | + 1) |
| 943 | +
|
| 944 | + >>> paddle.tensor([[0.1, 0.2], [0.3, 0.4]], device=paddle.CPUPlace(), requires_grad=True) |
| 945 | + Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False, |
| 946 | + [[0.10000000, 0.20000000], |
| 947 | + [0.30000001, 0.40000001]]) |
| 948 | +
|
| 949 | + >>> type(paddle.tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64')) |
| 950 | + <class 'paddle.Tensor'> |
| 951 | +
|
| 952 | + >>> paddle.tensor([[1+1j, 2], [3+2j, 4]], dtype='complex64') |
| 953 | + Tensor(shape=[2, 2], dtype=complex64, place=Place(cpu), stop_gradient=True, |
| 954 | + [[(1+1j), (2+0j)], |
| 955 | + [(3+2j), (4+0j)]]) |
| 956 | + """ |
| 957 | + if isinstance(device, str) and "cuda" in device: |
| 958 | + device = device.replace("cuda", "gpu") |
| 959 | + stop_gradient = not requires_grad |
| 960 | + place = _get_paddle_place(device) |
| 961 | + if place is None: |
| 962 | + place = _current_expected_place_() |
| 963 | + if in_dynamic_mode(): |
| 964 | + is_tensor = paddle.is_tensor(data) |
| 965 | + if not is_tensor and hasattr(data, "__cuda_array_interface__"): |
| 966 | + if not core.is_compiled_with_cuda(): |
| 967 | + raise RuntimeError( |
| 968 | + "PaddlePaddle is not compiled with CUDA, but trying to create a Tensor from a CUDA array." |
| 969 | + ) |
| 970 | + tensor = core.tensor_from_cuda_array_interface(data) |
| 971 | + else: |
| 972 | + if is_tensor: |
| 973 | + global _warned_in_to_tensor |
| 974 | + if not _warned_in_to_tensor: |
| 975 | + warnings.warn( |
| 976 | + "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), " |
| 977 | + "rather than paddle.to_tensor(sourceTensor).", |
| 978 | + stacklevel=2, |
| 979 | + ) |
| 980 | + _warned_in_to_tensor = True |
| 981 | + tensor = _to_tensor_non_static(data, dtype, place, stop_gradient) |
| 982 | + if pin_memory: |
| 983 | + tensor = tensor.pin_memory() |
| 984 | + return tensor |
| 985 | + # call assign for static graph |
| 986 | + else: |
| 987 | + re_exp = re.compile(r'[(](.+?)[)]', re.DOTALL) |
| 988 | + place_str = re.findall(re_exp, str(place))[0] |
| 989 | + with paddle.static.device_guard(place_str): |
| 990 | + tensor = _to_tensor_static(data, dtype, stop_gradient) |
| 991 | + if pin_memory: |
| 992 | + tensor = tensor.pin_memory() |
| 993 | + return tensor |
| 994 | + |
| 995 | + |
880 | 996 | @ParamAliasDecorator({"place": ["device"]}) |
881 | 997 | def to_tensor( |
882 | 998 | data: TensorLike | NestedNumericSequence, |
@@ -957,34 +1073,15 @@ def to_tensor( |
957 | 1073 | [[(1+1j), (2+0j)], |
958 | 1074 | [(3+2j), (4+0j)]]) |
959 | 1075 | """ |
960 | | - place = _get_paddle_place(place) |
961 | | - if place is None: |
962 | | - place = _current_expected_place_() |
963 | | - if in_dynamic_mode(): |
964 | | - is_tensor = paddle.is_tensor(data) |
965 | | - if not is_tensor and hasattr(data, "__cuda_array_interface__"): |
966 | | - if not core.is_compiled_with_cuda(): |
967 | | - raise RuntimeError( |
968 | | - "PaddlePaddle is not compiled with CUDA, but trying to create a Tensor from a CUDA array." |
969 | | - ) |
970 | | - return core.tensor_from_cuda_array_interface(data) |
971 | | - if is_tensor: |
972 | | - global _warned_in_to_tensor |
973 | | - if not _warned_in_to_tensor: |
974 | | - warnings.warn( |
975 | | - "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), " |
976 | | - "rather than paddle.to_tensor(sourceTensor).", |
977 | | - stacklevel=2, |
978 | | - ) |
979 | | - _warned_in_to_tensor = True |
980 | | - return _to_tensor_non_static(data, dtype, place, stop_gradient) |
981 | | - |
982 | | - # call assign for static graph |
983 | | - else: |
984 | | - re_exp = re.compile(r'[(](.+?)[)]', re.DOTALL) |
985 | | - place_str = re.findall(re_exp, str(place))[0] |
986 | | - with paddle.static.device_guard(place_str): |
987 | | - return _to_tensor_static(data, dtype, stop_gradient) |
| 1076 | + global _warned_in_use_to_tensor |
| 1077 | + if not _warned_in_use_to_tensor: |
| 1078 | + warnings.warn( |
| 1079 | + "`paddle.to_tensor` will be deprecated. Please use `paddle.tensor` instead." |
| 1080 | + ) |
| 1081 | + _warned_in_use_to_tensor = True |
| 1082 | + return tensor( |
| 1083 | + data, dtype=dtype, device=place, requires_grad=not stop_gradient |
| 1084 | + ) |
988 | 1085 |
|
989 | 1086 |
|
990 | 1087 | class MmapStorage(paddle.base.core.MmapStorage): |
|
0 commit comments