-
Notifications
You must be signed in to change notification settings - Fork 0
/
myNet.py
52 lines (46 loc) · 1.49 KB
/
myNet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# -*-coding:utf-8 -*-
'''
Created on 18/10/2022
@author: Carlos
'''
import torch
import torch.nn as nn
import torch.cuda
from torch.autograd import Variable
import sys, multiprocessing
class mynet1d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, gpu: int):
super(mynet1d, self).__init__()
self.net = nn.Sequential(
nn.Linear(in_channels, 128),
nn.ReLU(inplace=True),
nn.Linear(128, 512),
nn.ReLU(inplace=True),
nn.Linear(512, 32),
nn.ReLU(inplace=True),
nn.Linear(32, out_channels),
nn.Tanh()
)
self.gpus=[]
if len(gpu) > 0 and torch.cuda.is_available():
self.gpus = gpu
def set_input(self, input):
inputA = input[0].float()
inputB = input[1].float()
if len(self.gpus) > 0:
inputA = inputA.cuda(self.gpus[0])
inputB = inputB.cuda(self.gpus[0])
self.inputA = inputA
self.inputB = inputB
def forward(self):
self.realA = Variable(self.inputA)
if len(self.gpus) > 0:
self.fakeB = nn.parallel.data_parallel(self.net, self.realA, self.gpus)
else:
self.fakeB = self.net(self.realA)
self.realB = Variable(self.inputB)
def test(self):
with torch.no_grad():
self.realA = Variable(self.inputA)
self.fakeB = self.net(self.realA)
self.realB = Variable(self.inputB)