Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Typing][C-60] Add type annotations for python/paddle/incubate/autograd/primapi.py #66016

Merged
merged 3 commits into from
Jul 14, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 27 additions & 8 deletions python/paddle/incubate/autograd/primapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

import logging
import typing
from typing import TYPE_CHECKING, Sequence, TypeVar

import paddle
from paddle.base import backward, core, framework
from paddle.base.core import prim_config
from paddle.incubate.autograd import primx, utils

if TYPE_CHECKING:
from paddle import Tensor
from paddle.base.framework import Block

_TensorOrTensorsT = TypeVar("_TensorOrTensorsT", Tensor, Sequence[Tensor])


@framework.static_only
def forward_grad(outputs, inputs, grad_inputs=None):
def forward_grad(
outputs: _TensorOrTensorsT,
inputs: _TensorOrTensorsT,
grad_inputs: _TensorOrTensorsT | None = None,
) -> _TensorOrTensorsT:
"""Forward mode of automatic differentiation.

Note:
Expand All @@ -42,6 +55,7 @@ def forward_grad(outputs, inputs, grad_inputs=None):

.. code-block:: python

>>> # doctest: +SKIP('Transform NOT has linearize')
>>> import numpy as np
>>> import paddle

Expand Down Expand Up @@ -113,7 +127,11 @@ def forward_grad(outputs, inputs, grad_inputs=None):


@framework.static_only
def grad(outputs, inputs, grad_outputs=None):
def grad(
outputs: _TensorOrTensorsT,
inputs: _TensorOrTensorsT,
grad_outputs: _TensorOrTensorsT | None = None,
) -> _TensorOrTensorsT:
"""Reverse mode of automatic differentiation.

Note:
Expand All @@ -133,6 +151,7 @@ def grad(outputs, inputs, grad_outputs=None):

.. code-block:: python

>>> # doctest: +SKIP('Transform NOT has linearize')
>>> import numpy as np
>>> import paddle

Expand Down Expand Up @@ -234,12 +253,12 @@ def grad(outputs, inputs, grad_outputs=None):

@framework.static_only
def to_prim(
blocks,
blacklist=frozenset(),
whitelist=frozenset(),
start_idx=-1,
backward_length=-1,
):
blocks: Block | Sequence[Block],
blacklist: set[str] | frozenset[str] = frozenset(),
whitelist: set[str] | frozenset[str] = frozenset(),
start_idx: int = -1,
backward_length: int = -1,
) -> None:
"""Search nonbasic ops which have be registered composite rules and replace them with primitive ops.
The operators in blacklist will be excluded from program when lowering into primitives, and only the
operators in whitelist will be lowering. The priority of blacklist is higher than whitelist, it means
Expand Down