Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions paddle/phi/core/compat/convert_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,13 @@ Backend TransToPhiBackend(const phi::Place& place) {
}
case AllocationType::XPU:
return Backend::XPU;
case AllocationType::XPUPINNED: {
if (FLAGS_pinned_memory_as_cpu_backend) {
return Backend::CPU;
} else {
return Backend::XPU;
}
}
case AllocationType::IPU:
return Backend::IPU;
case AllocationType::UNDEFINED:
Expand Down
6 changes: 6 additions & 0 deletions python/paddle/base/dygraph/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@

import paddle
from paddle import _C_ops
from paddle.utils.decorator_utils import (
size_args_decorator_patch,
)

from .. import core
from ..framework import convert_np_dtype_to_dtype_
Expand Down Expand Up @@ -312,6 +315,7 @@ def _new_full_(
pin_memory=pin_memory,
)

@size_args_decorator_patch
def _new_empty_(
var: Tensor,
size: ShapeLike,
Expand All @@ -334,6 +338,7 @@ def _new_empty_(
pin_memory=pin_memory,
)

@size_args_decorator_patch
def _new_ones_(
var: Tensor,
size: ShapeLike,
Expand All @@ -357,6 +362,7 @@ def _new_ones_(
pin_memory=pin_memory,
)

@size_args_decorator_patch
def _new_zeros_(
var: Tensor,
size: ShapeLike,
Expand Down
10 changes: 8 additions & 2 deletions python/paddle/base/dygraph/tensor_patch_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -1157,10 +1157,16 @@ def cuda(

@framework.dygraph_only
def pin_memory(self: Tensor, blocking: bool = True) -> Tensor:
if self.place.is_cuda_pinned_place():
if (
self.place.is_cuda_pinned_place()
or self.place.is_xpu_pinned_place()
):
return self
else:
res = self._copy_to(core.CUDAPinnedPlace(), blocking)
if paddle.device.is_compiled_with_xpu():
res = self._copy_to(core.XPUPinnedPlace(), blocking)
else:
res = self._copy_to(core.CUDAPinnedPlace(), blocking)
res.stop_gradient = self.stop_gradient
res.persistable = self.persistable
return res
Expand Down
6 changes: 6 additions & 0 deletions python/paddle/pir/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@
from paddle import _C_ops
from paddle.base.libpaddle import DataType
from paddle.base.wrapped_decorator import wrap_decorator
from paddle.utils.decorator_utils import (
size_args_decorator_patch,
)

from . import Value

Expand Down Expand Up @@ -686,6 +689,7 @@ def _new_full_(
pin_memory=pin_memory,
)

@size_args_decorator_patch
def _new_empty_(
self,
size: ShapeLike,
Expand Down Expand Up @@ -731,6 +735,7 @@ def _new_empty_(
pin_memory=pin_memory,
)

@size_args_decorator_patch
def _new_ones_(
self,
size: ShapeLike,
Expand Down Expand Up @@ -777,6 +782,7 @@ def _new_ones_(
pin_memory=pin_memory,
)

@size_args_decorator_patch
def _new_zeros_(
self,
size: ShapeLike,
Expand Down
14 changes: 14 additions & 0 deletions python/paddle/tensor/creation.py
Original file line number Diff line number Diff line change
Expand Up @@ -1792,6 +1792,8 @@ def _check_attr(attr, message):
)
if requires_grad is True:
tensor.stop_gradient = False
if out is not None:
out.stop_gradient = False
if pin_memory and in_dynamic_mode():
tensor = tensor.pin_memory()
return tensor
Expand Down Expand Up @@ -1960,6 +1962,8 @@ def full(
)
if requires_grad is True:
tensor.stop_gradient = False
if out is not None:
out.stop_gradient = False
if pin_memory and in_dynamic_mode():
tensor = tensor.pin_memory()
return tensor
Expand Down Expand Up @@ -2109,6 +2113,8 @@ def arange(
out=out,
)
tensor.stop_gradient = not requires_grad
if out is not None:
out.stop_gradient = not requires_grad
if pin_memory and in_dynamic_mode():
tensor = tensor.pin_memory()
return tensor
Expand Down Expand Up @@ -2161,6 +2167,8 @@ def arange(
out=out,
)
tensor.stop_gradient = not requires_grad
if out is not None:
out.stop_gradient = not requires_grad
if pin_memory and in_dynamic_mode():
tensor = tensor.pin_memory()
return tensor
Expand Down Expand Up @@ -2299,6 +2307,8 @@ def range(
out=out,
)
tensor.stop_gradient = not requires_grad
if out is not None:
out.stop_gradient = not requires_grad
return tensor

if not isinstance(start, (Variable, paddle.pir.Value)):
Expand Down Expand Up @@ -2332,6 +2342,8 @@ def range(
out=out,
)
tensor.stop_gradient = not requires_grad
if out is not None:
out.stop_gradient = not requires_grad
return tensor


Expand Down Expand Up @@ -3013,6 +3025,8 @@ def empty(
tensor = tensor.pin_memory()
if requires_grad is True:
tensor.stop_gradient = False
if out is not None:
out.stop_gradient = False
return tensor
else:
helper = LayerHelper("empty", **locals())
Expand Down
29 changes: 29 additions & 0 deletions python/paddle/utils/decorator_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,35 @@ def wrapped_func(*args: Any, **kwargs: Any) -> Any:
return wrapped_func


def size_args_decorator_patch(method: Callable) -> Callable:
"""
A decorator that allow *size for patching method to Tensor.
e.g. Tensor.method(*size, *, ...).

Usage Example:

paddle.randn([]).new_ones(1, dtype=paddle.float32)
paddle.randn([]).new_ones(1, 2, 3, dtype=paddle.float32)
paddle.randn([]).new_ones([1, 2, 3], dtype=paddle.float32)
paddle.randn([]).new_ones(size=[1, 2, 3], dtype=paddle.float32)
paddle.randn([]).new_ones([1, 2, 3], paddle.float32)
"""

@functools.wraps(method)
def wrapped_func(*args: Any, **kwargs: Any) -> Any:
if len(args) >= 2 and isinstance(args[1], int):
# args[0]: Tensor
# args[1:]: *size
kwargs['size'] = list(args[1:])
args = (args[0],)

return method(*args, **kwargs)

wrapped_func.__signature__ = inspect.signature(method)

return wrapped_func


class VariableArgsDecorator(DecoratorBase):
def __init__(self, var: str) -> None:
super().__init__()
Expand Down
Loading