-
Notifications
You must be signed in to change notification settings - Fork 1
/
train.py
47 lines (41 loc) · 1.74 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import os
from torch.backends import cudnn
from config import Config
from processor.processor import do_train_fer2013
from utils.logger import setup_logger
from datasets import make_dataloader
from model import make_model
from solver import make_optimizer, WarmupMultiStepLR
from loss import make_loss
from processor import do_train
if __name__ == '__main__':
cfg = Config()
if not os.path.exists(cfg.LOG_DIR):
os.mkdir(cfg.LOG_DIR)
logger = setup_logger('{}'.format(cfg.PROJECT_NAME), cfg.LOG_DIR, cfg)
logger.info("Running with config:\n{}".format(cfg.CFG_NAME))
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.DEVICE_ID
cudnn.benchmark = True
# This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware.
train_loader, val_loader, test_loader, num_query, num_classes = make_dataloader(cfg)
model = make_model(cfg, num_class=num_classes)
loss_func, center_criterion = make_loss(cfg, num_classes=num_classes, feature_dim = model.in_planes)
optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion)
scheduler = WarmupMultiStepLR(optimizer, cfg.STEPS, cfg.GAMMA,
cfg.WARMUP_FACTOR,
cfg.WARMUP_EPOCHS, cfg.WARMUP_METHOD)
if cfg.DATASET_NAME in ("fer2013","ck+","ref-db"):
do_train_fer2013(cfg, model, train_loader, loss_func, optimizer, test_loader, scheduler )
else:
do_train(
cfg,
model,
center_criterion,
train_loader,
val_loader,
optimizer,
optimizer_center,
scheduler, # modify for using self trained model
loss_func,
num_query
)