Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Typing][B-02,B-04] Add type annotations for python/paddle/distribution/{bernoulli, binomial}.py #65727

Merged
merged 3 commits into from
Jul 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 21 additions & 10 deletions python/paddle/distribution/bernoulli.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations

from typing import TYPE_CHECKING, Sequence

import numpy as np

Expand All @@ -26,6 +28,10 @@
softplus,
)

if TYPE_CHECKING:
from paddle import Tensor
from paddle._typing.dtype_like import _DTypeLiteral

# Smallest representable number
EPS = {
'float32': paddle.finfo(paddle.float32).eps,
Expand Down Expand Up @@ -91,7 +97,12 @@ class Bernoulli(exponential_family.ExponentialFamily):
0.61086434)
"""

def __init__(self, probs, name=None):
name: str
probs: Tensor
logits: Tensor
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

看看 dtype 是不是也可以写下?

dtype: _DTypeLiteral

def __init__(self, probs: float | Tensor, name: str | None = None) -> None:
self.name = name or 'Bernoulli'
if not in_dynamic_mode():
check_type(
Expand All @@ -116,7 +127,7 @@ def __init__(self, probs, name=None):
super().__init__(batch_shape=self.probs.shape, event_shape=())

@property
def mean(self):
def mean(self) -> Tensor:
"""Mean of Bernoulli distribution.

Returns:
Expand All @@ -125,15 +136,15 @@ def mean(self):
return self.probs

@property
def variance(self):
def variance(self) -> Tensor:
"""Variance of Bernoulli distribution.

Returns:
Tensor: Variance value of distribution.
"""
return paddle.multiply(self.probs, (1 - self.probs))

def sample(self, shape):
def sample(self, shape: Sequence[int]) -> Tensor:
"""Sample from Bernoulli distribution.

Args:
Expand Down Expand Up @@ -180,7 +191,7 @@ def sample(self, shape):
with paddle.no_grad():
return paddle.bernoulli(self.probs.expand(shape), name=name)

def rsample(self, shape, temperature=1.0):
def rsample(self, shape: Sequence[int], temperature: float = 1.0) -> Tensor:
"""Sample from Bernoulli distribution (reparameterized).

The `rsample` is a continuously approximate of Bernoulli distribution reparameterized sample method.
Expand Down Expand Up @@ -275,7 +286,7 @@ def rsample(self, shape, temperature=1.0):
temperature,
)

def cdf(self, value):
def cdf(self, value: Tensor) -> Tensor:
r"""Cumulative distribution function(CDF) evaluated at value.

.. math::
Expand Down Expand Up @@ -322,7 +333,7 @@ def cdf(self, value):
name=name,
)

def log_prob(self, value):
def log_prob(self, value: Tensor) -> Tensor:
"""Log of probability density function.

Args:
Expand Down Expand Up @@ -353,7 +364,7 @@ def log_prob(self, value):
logits, value, reduction='none', name=name
)

def prob(self, value):
def prob(self, value: Tensor) -> Tensor:
r"""Probability density function(PDF) evaluated at value.

.. math::
Expand Down Expand Up @@ -388,7 +399,7 @@ def prob(self, value):

return self.log_prob(value).exp(name=name)

def entropy(self):
def entropy(self) -> Tensor:
r"""Entropy of Bernoulli distribution.

.. math::
Expand Down Expand Up @@ -418,7 +429,7 @@ def entropy(self):
self.logits, self.probs, reduction='none', name=name
)

def kl_divergence(self, other):
def kl_divergence(self, other: Bernoulli) -> Tensor:
r"""The KL-divergence between two Bernoulli distributions.

.. math::
Expand Down
5 changes: 3 additions & 2 deletions python/paddle/distribution/beta.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from __future__ import annotations

import numbers
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Sequence

import paddle
from paddle.distribution import dirichlet, exponential_family
Expand Down Expand Up @@ -91,6 +91,7 @@ class Beta(exponential_family.ExponentialFamily):
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
[-1.91923141, -0.38095081])
"""

alpha: Tensor
beta: Tensor

Expand Down Expand Up @@ -142,7 +143,7 @@ def log_prob(self, value: Tensor) -> Tensor:
"""
return self._dirichlet.log_prob(paddle.stack([value, 1.0 - value], -1))

def sample(self, shape=()):
def sample(self, shape: Sequence[int] = ()) -> Tensor:
"""Sample from beta distribution with sample shape.

Args:
Expand Down
41 changes: 28 additions & 13 deletions python/paddle/distribution/binomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import annotations

from collections.abc import Sequence
from typing import TYPE_CHECKING

import paddle
from paddle.distribution import distribution

if TYPE_CHECKING:
from paddle import Tensor
from paddle._typing.dtype_like import _DTypeLiteral


class Binomial(distribution.Distribution):
r"""
Expand Down Expand Up @@ -67,7 +74,13 @@ class Binomial(distribution.Distribution):
[2.94053698, 3.00781751, 2.51124287])
"""

def __init__(self, total_count, probs):
dtype: _DTypeLiteral
total_count: Tensor
probs: Tensor

def __init__(
self, total_count: int | Tensor, probs: float | Tensor
) -> None:
self.dtype = paddle.get_default_dtype()
self.total_count, self.probs = self._to_tensor(total_count, probs)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

下面的 _to_tensor 可以顺手标一下,文档里都给出类型了

另外我目测了下,返回值应该是 list[Tensor]?虽然确实是两个元素就是了,文档里写的可能并不准确


Expand All @@ -77,11 +90,13 @@ def __init__(self, total_count, probs):
batch_shape = self.total_count.shape
super().__init__(batch_shape)

def _to_tensor(self, total_count, probs):
def _to_tensor(
self, total_count: int | Tensor, probs: float | Tensor
) -> list[Tensor]:
"""Convert the input parameters into Tensors if they were not and broadcast them

Returns:
Tuple[Tensor, Tensor]: converted total_count and probs.
list[Tensor]: converted total_count and probs.
"""
# convert type
if isinstance(probs, float):
Expand All @@ -97,7 +112,7 @@ def _to_tensor(self, total_count, probs):
return paddle.broadcast_tensors([total_count, probs])

@property
def mean(self):
def mean(self) -> Tensor:
"""Mean of binomial distribution.

Returns:
Expand All @@ -106,15 +121,15 @@ def mean(self):
return self.total_count * self.probs

@property
def variance(self):
def variance(self) -> Tensor:
"""Variance of binomial distribution.

Returns:
Tensor: variance value.
"""
return self.total_count * self.probs * (1 - self.probs)

def sample(self, shape=()):
def sample(self, shape: Sequence[int] = ()) -> Tensor:
"""Generate binomial samples of the specified shape. The final shape would be ``shape+batch_shape`` .

Args:
Expand All @@ -139,7 +154,7 @@ def sample(self, shape=()):
)
return paddle.cast(sample, self.dtype)

def entropy(self):
def entropy(self) -> Tensor:
r"""Shannon entropy in nats.

The entropy is
Expand All @@ -159,7 +174,7 @@ def entropy(self):
log_prob = self.log_prob(values)
return -(paddle.exp(log_prob) * log_prob).sum(0)

def _enumerate_support(self):
def _enumerate_support(self) -> Tensor:
"""Return the support of binomial distribution [0, 1, ... ,n]

Returns:
Expand All @@ -171,14 +186,14 @@ def _enumerate_support(self):
values = values.reshape((-1,) + (1,) * len(self.batch_shape))
return values

def log_prob(self, value):
def log_prob(self, value: Tensor) -> Tensor:
"""Log probability density/mass function.

Args:
value (Tensor): The input tensor.
value (Tensor): The input tensor.

Returns:
Tensor: log probability. The data type is the same as `probs`.
Tensor: log probability. The data type is the same as `probs`.
"""
value = paddle.cast(value, dtype=self.dtype)

Expand All @@ -200,7 +215,7 @@ def log_prob(self, value):
neginf=-eps,
)

def prob(self, value):
def prob(self, value: Tensor) -> Tensor:
"""Probability density/mass function.

Args:
Expand All @@ -211,7 +226,7 @@ def prob(self, value):
"""
return paddle.exp(self.log_prob(value))

def kl_divergence(self, other):
def kl_divergence(self, other: Binomial) -> Tensor:
r"""The KL-divergence between two binomial distributions with the same :attr:`total_count`.

The probability density function (pdf) is
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distribution/transformed_distribution.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def __init__(self, base, transforms):
)
if chain._domain.event_rank > len(base.event_shape):
base = independent.Independent(
(base, chain._domain.event_rank - len(base.event_shape))
base, chain._domain.event_rank - len(base.event_shape)
)

transformed_shape = chain.forward_shape(
Expand Down