-
Notifications
You must be signed in to change notification settings - Fork 56
/
main.py
169 lines (144 loc) · 5.5 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import os
import torch
import copy
from torch.utils.data import DataLoader
import utils.DataProcessing as DP
import utils.LSTMClassifier as LSTMC
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
use_plot = True
use_save = True
if use_save:
import pickle
from datetime import datetime
DATA_DIR = 'data'
TRAIN_DIR = 'train_txt'
TEST_DIR = 'test_txt'
TRAIN_FILE = 'train_txt.txt'
TEST_FILE = 'test_txt.txt'
TRAIN_LABEL = 'train_label.txt'
TEST_LABEL = 'test_label.txt'
## parameter setting
epochs = 50
batch_size = 5
use_gpu = torch.cuda.is_available()
learning_rate = 0.01
def adjust_learning_rate(optimizer, epoch):
lr = learning_rate * (0.1 ** (epoch // 10))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
if __name__=='__main__':
### parameter setting
embedding_dim = 100
hidden_dim = 50
sentence_len = 32
train_file = os.path.join(DATA_DIR, TRAIN_FILE)
test_file = os.path.join(DATA_DIR, TEST_FILE)
fp_train = open(train_file, 'r')
train_filenames = [os.path.join(TRAIN_DIR, line.strip()) for line in fp_train]
filenames = copy.deepcopy(train_filenames)
fp_train.close()
fp_test = open(test_file, 'r')
test_filenames = [os.path.join(TEST_DIR, line.strip()) for line in fp_test]
fp_test.close()
filenames.extend(test_filenames)
corpus = DP.Corpus(DATA_DIR, filenames)
nlabel = 8
### create model
model = LSTMC.LSTMClassifier(embedding_dim=embedding_dim,hidden_dim=hidden_dim,
vocab_size=len(corpus.dictionary),label_size=nlabel, batch_size=batch_size, use_gpu=use_gpu)
if use_gpu:
model = model.cuda()
### data processing
dtrain_set = DP.TxtDatasetProcessing(DATA_DIR, TRAIN_DIR, TRAIN_FILE, TRAIN_LABEL, sentence_len, corpus)
train_loader = DataLoader(dtrain_set,
batch_size=batch_size,
shuffle=True,
num_workers=4
)
dtest_set = DP.TxtDatasetProcessing(DATA_DIR, TEST_DIR, TEST_FILE, TEST_LABEL, sentence_len, corpus)
test_loader = DataLoader(dtest_set,
batch_size=batch_size,
shuffle=False,
num_workers=4
)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
loss_function = nn.CrossEntropyLoss()
train_loss_ = []
test_loss_ = []
train_acc_ = []
test_acc_ = []
### training procedure
for epoch in range(epochs):
optimizer = adjust_learning_rate(optimizer, epoch)
## training epoch
total_acc = 0.0
total_loss = 0.0
total = 0.0
for iter, traindata in enumerate(train_loader):
train_inputs, train_labels = traindata
train_labels = torch.squeeze(train_labels)
if use_gpu:
train_inputs, train_labels = Variable(train_inputs.cuda()), train_labels.cuda()
else: train_inputs = Variable(train_inputs)
model.zero_grad()
model.batch_size = len(train_labels)
model.hidden = model.init_hidden()
output = model(train_inputs.t())
loss = loss_function(output, Variable(train_labels))
loss.backward()
optimizer.step()
# calc training acc
_, predicted = torch.max(output.data, 1)
total_acc += (predicted == train_labels).sum()
total += len(train_labels)
total_loss += loss.data[0]
train_loss_.append(total_loss / total)
train_acc_.append(total_acc / total)
## testing epoch
total_acc = 0.0
total_loss = 0.0
total = 0.0
for iter, testdata in enumerate(test_loader):
test_inputs, test_labels = testdata
test_labels = torch.squeeze(test_labels)
if use_gpu:
test_inputs, test_labels = Variable(test_inputs.cuda()), test_labels.cuda()
else: test_inputs = Variable(test_inputs)
model.batch_size = len(test_labels)
model.hidden = model.init_hidden()
output = model(test_inputs.t())
loss = loss_function(output, Variable(test_labels))
# calc testing acc
_, predicted = torch.max(output.data, 1)
total_acc += (predicted == test_labels).sum()
total += len(test_labels)
total_loss += loss.data[0]
test_loss_.append(total_loss / total)
test_acc_.append(total_acc / total)
print('[Epoch: %3d/%3d] Training Loss: %.3f, Testing Loss: %.3f, Training Acc: %.3f, Testing Acc: %.3f'
% (epoch, epochs, train_loss_[epoch], test_loss_[epoch], train_acc_[epoch], test_acc_[epoch]))
param = {}
param['lr'] = learning_rate
param['batch size'] = batch_size
param['embedding dim'] = embedding_dim
param['hidden dim'] = hidden_dim
param['sentence len'] = sentence_len
result = {}
result['train loss'] = train_loss_
result['test loss'] = test_loss_
result['train acc'] = train_acc_
result['test acc'] = test_acc_
result['param'] = param
if use_plot:
import PlotFigure as PF
PF.PlotFigure(result, use_save)
if use_save:
filename = 'log/LSTM_classifier_' + datetime.now().strftime("%d-%h-%m-%s") + '.pkl'
result['filename'] = filename
fp = open(filename, 'wb')
pickle.dump(result, fp)
fp.close()
print('File %s is saved.' % filename)