Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Typing][A-17] Add type annotations for conv layers #65183

Merged
merged 8 commits into from
Jun 28, 2024
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
213 changes: 116 additions & 97 deletions python/paddle/nn/layer/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,26 @@

# TODO: define classes of convolutional neural network

from __future__ import annotations

from paddle import ParamAttr
from typing import Any, Literal, Sequence

from typing_extensions import TypeAlias

from ..._typing import (
DataLayout1D,
DataLayout2D,
DataLayout3D,
IntSequence,
ShapeLike,
)

PaddingSizeStr: TypeAlias = Literal["valid", "same"]
PaddingMode: TypeAlias = Literal["zeros", "reflect", "replicate", "circular"]

import numpy as np
import paddle

from paddle import get_flags

Expand All @@ -32,13 +51,13 @@
__all__ = []


def _get_default_param_initializer(num_channels, filter_size):
def _get_default_param_initializer(num_channels: int, filter_size: int) -> paddle.Tensor:
filter_elem_num = num_channels * np.prod(filter_size)
std = (2.0 / filter_elem_num) ** 0.5
return Normal(0.0, std)


def _reverse_repeat_list(t, n):
def _reverse_repeat_list(t: Sequence[int], n: int) -> list:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里的 t 应该不只限于 int ~ 可以试一下 type var ~

def _reverse_repeat_list(t: Sequence[_T], n: int) -> list[_T]:

"""Reverse the order of `t` and repeat each element for `n` times.
This can be used to translate padding arg used by Conv and Pooling modules
to the ones used by `F.pad`.
Expand All @@ -49,21 +68,21 @@ def _reverse_repeat_list(t, n):
class _ConvNd(Layer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
transposed,
dims,
stride=1,
padding=0,
padding_mode='zeros',
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW",
):
in_channels: Any,
out_channels: Any,
kernel_size: Any,
transposed: Any,
dims: Any,
stride: int | IntSequence = 1,
padding: int | IntSequence = 0,
padding_mode: str = 'zeros',
output_padding: int | IntSequence = 0,
dilation: int | IntSequence = 1,
groups: int = 1,
weight_attr: Any | None = None,
bias_attr: Any | None = None,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里可以不用 Any ~

data_format: str = "NCHW",
) -> None:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

尽量不要用 Any ~

另外,data_format 在 _typing 模块里面应该有公用类型 ~

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

不要直接从 paddlepaddle-stubs copy,要思考,_ConvNd 明显没有标注

super().__init__()
assert (
weight_attr is not False
Expand Down Expand Up @@ -142,7 +161,7 @@ def __init__(
in_channels // groups,
] + self._kernel_size

def _get_default_param_initializer():
def _get_default_param_initializer() -> None:
if transposed:
return None
filter_elem_num = np.prod(self._kernel_size) * self._in_channels
Expand Down Expand Up @@ -186,7 +205,7 @@ def _get_default_param_initializer():
):
self._use_cudnn = False

def extra_repr(self):
def extra_repr(self) -> str:
main_str = '{_in_channels}, {_out_channels}, kernel_size={_kernel_size}'
if self._stride != [1] * len(self._stride):
main_str += ', stride={_stride}'
Expand Down Expand Up @@ -328,18 +347,18 @@ class Conv1D(_ConvNd):

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCL",
):
in_channels: int,
out_channels: int,
kernel_size: int | IntSequence,
stride: int | IntSequence = 1,
padding: int | IntSequence | PaddingSizeStr = 0,
dilation: int | IntSequence = 1,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

根据不同的 Conv ,这里的输入可能不一样 ~ 参考 #65191

后面几个也是 ~

groups: int = 1,
padding_mode: PaddingMode = 'zeros',
weight_attr: ParamAttr | None = None,
bias_attr: ParamAttr | bool | None = None,
data_format: DataLayout1D = "NCL",
) -> None:
super().__init__(
in_channels,
out_channels,
Expand All @@ -356,7 +375,7 @@ def __init__(
data_format=data_format,
)

def forward(self, x):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
padding = 0
if self._padding_mode != "zeros":
x = F.pad(
Expand Down Expand Up @@ -515,18 +534,18 @@ class Conv1DTranspose(_ConvNd):

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
dilation=1,
weight_attr=None,
bias_attr=None,
data_format="NCL",
):
in_channels: int,
out_channels: int,
kernel_size: int | IntSequence,
stride: int | IntSequence = 1,
padding: int | IntSequence | PaddingSizeStr = 0,
output_padding: int | IntSequence = 0,
groups: int = 1,
dilation: int | IntSequence = 1,
weight_attr: ParamAttr | None = None,
bias_attr: ParamAttr | bool | None = None,
data_format: DataLayout1D = "NCL",
) -> None:
super().__init__(
in_channels,
out_channels,
Expand All @@ -543,7 +562,7 @@ def __init__(
data_format=data_format,
)

def forward(self, x, output_size=None):
def forward(self, x: paddle.Tensor, output_size: ShapeLike | None = None) -> paddle.Tensor:
out = F.conv1d_transpose(
x,
self.weight,
Expand Down Expand Up @@ -671,18 +690,18 @@ class Conv2D(_ConvNd):

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCHW",
):
in_channels: int,
out_channels: int,
kernel_size: int | IntSequence,
stride: int | IntSequence = 1,
padding: int | IntSequence | PaddingSizeStr = 0,
dilation: int | IntSequence = 1,
groups: int = 1,
padding_mode: PaddingMode = 'zeros',
weight_attr: ParamAttr | None = None,
bias_attr: ParamAttr | bool | None = None,
data_format: DataLayout2D = "NCHW",
) -> None:
super().__init__(
in_channels,
out_channels,
Expand All @@ -699,7 +718,7 @@ def __init__(
data_format=data_format,
)

def forward(self, x):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
if self._padding_mode != 'zeros':
x = F.pad(
x,
Expand Down Expand Up @@ -846,18 +865,18 @@ class Conv2DTranspose(_ConvNd):

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCHW",
):
in_channels: int,
out_channels: int,
kernel_size: int | IntSequence,
stride: int | IntSequence = 1,
padding: int | IntSequence | PaddingSizeStr = 0,
output_padding: int | IntSequence = 0,
dilation: int | IntSequence = 1,
groups: int = 1,
weight_attr: ParamAttr | None = None,
bias_attr: ParamAttr | bool | None = None,
data_format: DataLayout2D = "NCHW",
) -> None:
super().__init__(
in_channels,
out_channels,
Expand All @@ -874,7 +893,7 @@ def __init__(
data_format=data_format,
)

def forward(self, x, output_size=None):
def forward(self, x: paddle.Tensor, output_size: ShapeLike | None = None) -> paddle.Tensor:
if output_size is None:
output_padding = self.output_padding
else:
Expand Down Expand Up @@ -1003,18 +1022,18 @@ class Conv3D(_ConvNd):

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
padding_mode='zeros',
weight_attr=None,
bias_attr=None,
data_format="NCDHW",
):
in_channels: int,
out_channels: int,
kernel_size: int | IntSequence,
stride: int | IntSequence = 1,
padding: int | IntSequence | PaddingSizeStr = 0,
dilation: int | IntSequence = 1,
groups: int = 1,
padding_mode: PaddingMode = 'zeros',
weight_attr: ParamAttr | None = None,
bias_attr: ParamAttr | bool | None = None,
data_format: DataLayout3D = "NCDHW",
) -> None:
super().__init__(
in_channels,
out_channels,
Expand All @@ -1031,7 +1050,7 @@ def __init__(
data_format=data_format,
)

def forward(self, x):
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
if self._padding_mode != 'zeros':
x = F.pad(
x,
Expand Down Expand Up @@ -1182,18 +1201,18 @@ class Conv3DTranspose(_ConvNd):

def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
dilation=1,
groups=1,
weight_attr=None,
bias_attr=None,
data_format="NCDHW",
):
in_channels: int,
out_channels: int,
kernel_size: int | IntSequence,
stride: int | IntSequence = 1,
padding: int | IntSequence | PaddingSizeStr = 0,
output_padding: int | IntSequence = 0,
dilation: int | IntSequence = 1,
groups: int = 1,
weight_attr: ParamAttr | None = None,
bias_attr: ParamAttr | bool | None = None,
data_format: DataLayout3D = "NCDHW",
) -> None:
super().__init__(
in_channels,
out_channels,
Expand All @@ -1210,7 +1229,7 @@ def __init__(
data_format=data_format,
)

def forward(self, x, output_size=None):
def forward(self, x: paddle.Tensor, output_size: ShapeLike | None = None) -> paddle.Tensor:
if output_size is None:
output_padding = self.output_padding
else:
Expand Down