Skip to content

Commit

Permalink
doc(whl): polish doc for loss, compression helper and bfs helper. (#747)
Browse files Browse the repository at this point in the history
* init commit

* polish comments
  • Loading branch information
kxzxvbk authored Dec 20, 2023
1 parent 1e6f351 commit 9116ba6
Show file tree
Hide file tree
Showing 5 changed files with 157 additions and 76 deletions.
55 changes: 38 additions & 17 deletions ding/torch_utils/loss/contrastive_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@

class ContrastiveLoss(nn.Module):
"""
The class for contrastive learning losses.
Only InfoNCE loss supported currently.
Code Reference: https://github.com/rdevon/DIM.
paper: https://arxiv.org/abs/1808.06670.
Overview:
The class for contrastive learning losses. Only InfoNCE loss is supported currently. \
Code Reference: https://github.com/rdevon/DIM. Paper Reference: https://arxiv.org/abs/1808.06670.
Interfaces:
__init__, forward.
"""

def __init__(
Expand All @@ -24,13 +25,18 @@ def __init__(
temperature: float = 1.0,
) -> None:
"""
Args:
x_size: input shape for x, both the obs shape and the encoding shape are supported.
y_size: input shape for y, both the obs shape and the encoding shape are supported.
heads: a list of 2 int elems, heads[0] for x and head[1] for y.
Overview:
Initialize the ContrastiveLoss object using the given arguments.
Arguments:
- x_size (:obj:`Union[int, SequenceType]`): input shape for x, both the obs shape and the encoding shape \
are supported.
- y_size (:obj:`Union[int, SequenceType]`): Input shape for y, both the obs shape and the encoding shape \
are supported.
- heads (:obj:`SequenceType`): A list of 2 int elems, ``heads[0]`` for x and ``head[1]`` for y. \
Used in multi-head, global-local, local-local MI maximization process.
loss_type: only the InfoNCE loss is available now.
temperature: the parameter to adjust the log_softmax.
- encoder_shape (:obj:`Union[int, SequenceType]`): The dimension of encoder hidden state.
- loss_type: Only the InfoNCE loss is available now.
- temperature: The parameter to adjust the ``log_softmax``.
"""
super(ContrastiveLoss, self).__init__()
assert len(heads) == 2, "Expected length of 2, but got: {}".format(len(heads))
Expand All @@ -43,7 +49,7 @@ def __init__(
self._y_encoder = self._get_encoder(y_size, heads[1])
self._temperature = temperature

def _get_encoder(self, obs: Union[int, SequenceType], heads: int):
def _get_encoder(self, obs: Union[int, SequenceType], heads: int) -> nn.Module:
from ding.model import ConvEncoder, FCEncoder

if isinstance(obs, int):
Expand All @@ -61,14 +67,29 @@ def _get_encoder(self, obs: Union[int, SequenceType], heads: int):
encoder = ConvEncoder(obs, hidden_size_list, kernel_size=[4, 3, 2], stride=[2, 1, 1])
return encoder

def forward(self, x: torch.Tensor, y: torch.Tensor):
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Computes the noise contrastive estimation-based loss, a.k.a. infoNCE.
Args:
x: the input x, both raw obs and encoding are supported.
y: the input y, both raw obs and encoding are supported.
Overview:
Computes the noise contrastive estimation-based loss, a.k.a. infoNCE.
Arguments:
- x (:obj:`torch.Tensor`): The input x, both raw obs and encoding are supported.
- y (:obj:`torch.Tensor`): The input y, both raw obs and encoding are supported.
Returns:
torch.Tensor: loss value.
loss (:obj:`torch.Tensor`): The calculated loss value.
Examples:
>>> x_dim = [3, 16]
>>> encode_shape = 16
>>> x = np.random.normal(0, 1, size=x_dim)
>>> y = x ** 2 + 0.01 * np.random.normal(0, 1, size=x_dim)
>>> estimator = ContrastiveLoss(dims, dims, encode_shape=encode_shape)
>>> loss = estimator.forward(x, y)
Examples:
>>> x_dim = [3, 1, 16, 16]
>>> encode_shape = 16
>>> x = np.random.normal(0, 1, size=x_dim)
>>> y = x ** 2 + 0.01 * np.random.normal(0, 1, size=x_dim)
>>> estimator = ContrastiveLoss(dims, dims, encode_shape=encode_shape)
>>> loss = estimator.forward(x, y)
"""

N = x.size(0)
Expand Down
36 changes: 29 additions & 7 deletions ding/torch_utils/loss/cross_entropy_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,26 @@


class LabelSmoothCELoss(nn.Module):
r"""
"""
Overview:
Label smooth cross entropy loss.
Interfaces:
forward
__init__, forward.
"""

def __init__(self, ratio: float) -> None:
"""
Overview:
Initialize the LabelSmoothCELoss object using the given arguments.
Arguments:
- ratio (:obj:`float`): The ratio of label-smoothing (the value is in 0-1). If the ratio is larger, the \
extent of label smoothing is larger.
"""
super().__init__()
self.ratio = ratio

def forward(self, logits: torch.Tensor, labels: torch.LongTensor) -> torch.Tensor:
r"""
"""
Overview:
Calculate label smooth cross entropy loss.
Arguments:
Expand All @@ -35,16 +42,31 @@ def forward(self, logits: torch.Tensor, labels: torch.LongTensor) -> torch.Tenso


class SoftFocalLoss(nn.Module):
r"""
"""
Overview:
Soft focal loss.
Interfaces:
forward
__init__, forward.
"""

def __init__(
self, gamma: int = 2, weight: Any = None, size_average: bool = True, reduce: Optional[bool] = None
) -> None:
"""
Overview:
Initialize the SoftFocalLoss object using the given arguments.
Arguments:
- gamma (:obj:`int`): The extent of focus on hard samples. A smaller ``gamma`` will lead to more focus on \
easy samples, while a larger ``gamma`` will lead to more focus on hard samples.
- weight (:obj:`Any`): The weight for loss of each class.
- size_average (:obj:`bool`): By default, the losses are averaged over each loss element in the batch. \
Note that for some losses, there are multiple elements per sample. If the field ``size_average`` is \
set to ``False``, the losses are instead summed for each minibatch. Ignored when ``reduce`` is \
``False``.
- reduce (:obj:`Optional[bool]`): By default, the losses are averaged or summed over observations for \
each minibatch depending on size_average. When ``reduce`` is ``False``, returns a loss for each batch \
element instead and ignores ``size_average``.
"""
super().__init__()
self.gamma = gamma
self.nll_loss = torch.nn.NLLLoss2d(weight, size_average, reduce=reduce)
Expand All @@ -63,9 +85,9 @@ def forward(self, inputs: torch.Tensor, targets: torch.LongTensor) -> torch.Tens


def build_ce_criterion(cfg: dict) -> nn.Module:
r"""
"""
Overview:
Get a cross enntropy loss instance according to given config.
Get a cross entropy loss instance according to given config.
Arguments:
- cfg (:obj:`dict`)
Returns:
Expand Down
27 changes: 10 additions & 17 deletions ding/torch_utils/loss/multi_logits_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,36 +6,29 @@
from ding.torch_utils.network import one_hot


def get_distance_matrix(lx, ly, mat, M: int) -> np.ndarray:
def get_distance_matrix(lx: np.ndarray, ly: np.ndarray, mat: np.ndarray, M: int) -> np.ndarray:
nlx = np.broadcast_to(lx, [M, M]).T
nly = np.broadcast_to(ly, [M, M])
nret = nlx + nly - mat

# ret = []
# for i in range(M):
# ret.append(lx[i] + ly - mat[i])
# ret = np.stack(ret)
# assert ret.shape == (M, M)
# assert np.all(nret == ret)
return nret


class MultiLogitsLoss(nn.Module):
'''
"""
Overview:
Base class for supervised learning on linklink, including basic processes.
Interface:
forward
'''
__init__, forward.
"""

def __init__(self, criterion: str = None, smooth_ratio: float = 0.1) -> None:
'''
"""
Overview:
initialization method, use cross_entropy as default criterion
Initialization method, use cross_entropy as default criterion.
Arguments:
- criterion (:obj:`str`): criterion type, supports ['cross_entropy', 'label_smooth_ce']
- smooth_ratio (:obs:`float`): smooth_ratio for label smooth
'''
- criterion (:obj:`str`): Criterion type, supports ['cross_entropy', 'label_smooth_ce'].
- smooth_ratio (:obs:`float`): Smoothing ratio for label smoothing.
"""
super(MultiLogitsLoss, self).__init__()
if criterion is None:
criterion = 'cross_entropy'
Expand Down Expand Up @@ -109,7 +102,7 @@ def has_augmented_path(t, binary_distance_matrix):
return index

def forward(self, logits: torch.Tensor, labels: torch.LongTensor) -> torch.Tensor:
r"""
"""
Overview:
Calculate multiple logits loss.
Arguments:
Expand Down
17 changes: 14 additions & 3 deletions ding/utils/bfs_helper.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,21 @@
import numpy as np
import torch
from gym import Env
from typing import Tuple, List


# BFS algorithm
def get_vi_sequence(env, observation):
"""Returns [L, W, W] optimal actions."""
def get_vi_sequence(env: Env, observation: np.ndarray) -> Tuple[np.ndarray, List]:
"""
Overview:
Given an instance of the maze environment and the current observation, using Broad-First-Search (BFS) \
algorithm to plan an optimal path and record the result.
Arguments:
- env (:obj:`Env`): The instance of the maze environment.
- observation (:obj:`np.ndarray`): The current observation.
Returns:
- output (:obj:`Tuple[np.ndarray, List]`): The BFS result. ``output[0]`` contains the BFS map after each \
iteration and ``output[1]`` contains the optimal actions before reaching the finishing point.
"""
xy = np.where(observation[Ellipsis, -1] == 1)
start_x, start_y = xy[0][0], xy[1][0]
target_location = env.target_location
Expand Down
Loading

0 comments on commit 9116ba6

Please sign in to comment.