-
Notifications
You must be signed in to change notification settings - Fork 9
/
model.py
56 lines (50 loc) · 1.81 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from classification import SequenceClassificationLayer, TokenClassificationLayer
from mixer import Mixer
from omegaconf.dictconfig import DictConfig
from typing import Any, Dict
import torch
import torch.nn as nn
class PnlpMixerSeqCls(nn.Module):
def __init__(
self,
bottleneck_cfg: DictConfig,
mixer_cfg: DictConfig,
seq_cls_cfg: DictConfig,
**kwargs
):
super(PnlpMixerSeqCls, self).__init__(**kwargs)
self.pnlp_mixer = PnlpMixer(bottleneck_cfg, mixer_cfg)
self.seq_cls = SequenceClassificationLayer(**seq_cls_cfg)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
reprs = self.pnlp_mixer(inputs)
seq_logits = self.seq_cls(reprs)
return seq_logits
class PnlpMixerTokenCls(nn.Module):
def __init__(
self,
bottleneck_cfg: DictConfig,
mixer_cfg: DictConfig,
token_cls_cfg: DictConfig,
**kwargs
):
super(PnlpMixerTokenCls, self).__init__(**kwargs)
self.pnlp_mixer = PnlpMixer(bottleneck_cfg, mixer_cfg)
self.token_cls = TokenClassificationLayer(**token_cls_cfg)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
reprs = self.pnlp_mixer(inputs)
token_logits = self.token_cls(reprs)
return token_logits
class PnlpMixer(nn.Module):
def __init__(
self,
bottleneck_cfg: DictConfig,
mixer_cfg: DictConfig,
**kwargs
):
super(PnlpMixer, self).__init__(**kwargs)
self.bottleneck = nn.Linear((2 * bottleneck_cfg.window_size + 1) * bottleneck_cfg.feature_size, bottleneck_cfg.hidden_dim)
self.mixer = Mixer(**mixer_cfg)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
features = self.bottleneck(inputs)
reprs = self.mixer(features)
return reprs