-
Notifications
You must be signed in to change notification settings - Fork 1
/
model.py
124 lines (103 loc) · 3.35 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import torch
import torch.nn as nn
from module import SharedMLP, LinearMLP
class PointVAE(nn.Module):
def __init__(self, in_dim, z_dim):
super().__init__()
self.z_dim = z_dim
self.encoder = Encoder(in_dim, z_dim)
self.decoder = Decoder(z_dim)
def forward(self, x):
B, C, N = x.shape
# ==================
# global_feature = self.encoder(x)
# out = self.decoder(global_feature)
# out = out.view(B, 2000, C)
# return global_feature, out
# ==================
point_feature, mu, log_var, z = self.encoder(x)
out = self.decoder(z)
out = out.view(B, 2000, C)
return mu, log_var, z, out
# for point-wise decoder
z_clone = z.clone().detach()
z = z.view(B, self.z_dim, 1).repeat(1, 1, N)
features = torch.concat([point_feature, z], dim=1)
out = self.decoder(features)
out = out.view(B, C, N)
return mu, log_var, z_clone, out
class Encoder(nn.Module):
def __init__(self, in_dim, z_dim):
super().__init__()
self.MLP1 = nn.Sequential(
SharedMLP(in_dim, 64)
)
self.MLP2 = nn.Sequential(
SharedMLP(64, 64),
SharedMLP(64, 128),
SharedMLP(128, 256),
SharedMLP(256, 512),
)
self.fc_mu = nn.Sequential(
LinearMLP(512, z_dim),
nn.Linear(z_dim, z_dim)
)
self.fc_var = nn.Sequential(
LinearMLP(512, z_dim),
nn.Linear(z_dim, z_dim)
)
self.fc_global = nn.Sequential(
LinearMLP(512, z_dim),
nn.Linear(z_dim, z_dim)
)
def forward(self, x):
device = x.device
# get point feature
point_feature = self.MLP1(x)
# get global feature
global_feature = self.MLP2(point_feature)
global_feature = torch.max(global_feature, dim=2)[0]
# ==================
# global_feature = self.fc_global(global_feature)
# return global_feature
# ==================
# get mean and variance
mu = self.fc_mu(global_feature)
log_var = self.fc_var(global_feature)
# reparametrization tric
eps = torch.randn_like(torch.exp(log_var), device=device)
z = mu + torch.exp(0.5*log_var)*eps
return point_feature, mu, log_var, z
class Decoder(nn.Module):
def __init__(self, z_dim):
super().__init__()
self.SharedMLP = nn.Sequential(
# SharedMLP(64+z_dim, 256),
# SharedMLP(256, 512),
# SharedMLP(512, 1024),
# SharedMLP(1024, 256),
# SharedMLP(256, 64),
# SharedMLP(64, 3),
# nn.Conv1d(3, 3, 1)
SharedMLP(1+z_dim, 64),
SharedMLP(64, 32),
SharedMLP(32, 16),
SharedMLP(16, 3),
nn.Conv1d(3, 3, 1)
)
self.fc = nn.Sequential(
LinearMLP(z_dim, 128),
LinearMLP(128, 512),
LinearMLP(512, 1024),
LinearMLP(1024, 6000),
nn.Linear(6000, 6000)
)
def forward(self, x):
out = self.fc(x)
return out
if __name__ == "__main__":
x = torch.randn(100, 3, 90)
Net = PointVAE(in_dim=3, z_dim=256)
mu, log_var, z, out = Net(x)
print(z.shape)
print(out.shape)