This repository has been archived by the owner on Jul 25, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 109
/
evaluate.py
135 lines (113 loc) · 4.49 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import argparse
import sys
import torch
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
from torchpack import distributed as dist
from torchpack.callbacks import Callbacks, SaverRestore
from torchpack.environ import auto_set_run_dir, set_run_dir
from torchpack.utils.config import configs
from torchpack.utils.logging import logger
from tqdm import tqdm
from core import builder
from core.callbacks import MeanIoU
from core.trainers import SemanticKITTITrainer
from model_zoo import minkunet, spvcnn, spvnas_specialized
import torchsparse.nn.functional as F
F.set_conv_mode(2)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('config', metavar='FILE', help='config file')
parser.add_argument('--run-dir', metavar='DIR', help='run directory')
parser.add_argument('--name', type=str, help='model name')
args, opts = parser.parse_known_args()
configs.load(args.config, recursive=True)
configs.update(opts)
if configs.distributed:
dist.init()
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(dist.local_rank())
if args.run_dir is None:
args.run_dir = auto_set_run_dir()
else:
set_run_dir(args.run_dir)
logger.info(' '.join([sys.executable] + sys.argv))
logger.info(f'Experiment started: "{args.run_dir}".' + '\n' + f'{configs}')
dataset = builder.make_dataset()
dataflow = {}
for split in dataset:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset[split],
num_replicas=dist.size(),
rank=dist.rank(),
shuffle=(split == 'train'))
dataflow[split] = torch.utils.data.DataLoader(
dataset[split],
batch_size=configs.batch_size if split == 'train' else 1,
sampler=sampler,
num_workers=configs.workers_per_gpu,
pin_memory=True,
collate_fn=dataset[split].collate_fn)
if 'spvnas' in args.name.lower():
model = spvnas_specialized(args.name)
elif 'spvcnn' in args.name.lower():
model = spvcnn(args.name)
elif 'mink' in args.name.lower():
model = minkunet(args.name)
else:
raise NotImplementedError
if configs.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model.cuda(),
device_ids=[dist.local_rank()],
find_unused_parameters=True)
model.eval()
criterion = builder.make_criterion()
optimizer = builder.make_optimizer(model)
scheduler = builder.make_scheduler(optimizer)
trainer = SemanticKITTITrainer(model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
num_workers=configs.workers_per_gpu,
seed=configs.train.seed)
callbacks = Callbacks([
SaverRestore(),
MeanIoU(configs.data.num_classes, configs.data.ignore_label)
])
callbacks._set_trainer(trainer)
trainer.callbacks = callbacks
trainer.dataflow = dataflow['test']
trainer.before_train()
trainer.before_epoch()
model.eval()
with torch.no_grad():
for feed_dict in tqdm(dataflow['test'], desc='eval'):
_inputs = {}
for key, value in feed_dict.items():
if 'name' not in key:
_inputs[key] = value.cuda()
inputs = _inputs['lidar']
targets = feed_dict['targets'].F.long().cuda(non_blocking=True)
outputs = model(inputs)
invs = feed_dict['inverse_map']
all_labels = feed_dict['targets_mapped']
_outputs = []
_targets = []
for idx in range(invs.C[:, 0].max() + 1):
cur_scene_pts = (inputs.C[:, 0] == idx).cpu().numpy()
cur_inv = invs.F[invs.C[:, 0] == idx].cpu().numpy()
cur_label = (all_labels.C[:, 0] == idx).cpu().numpy()
outputs_mapped = outputs[cur_scene_pts][cur_inv].argmax(1)
targets_mapped = all_labels.F[cur_label]
_outputs.append(outputs_mapped)
_targets.append(targets_mapped)
outputs = torch.cat(_outputs, 0)
targets = torch.cat(_targets, 0)
output_dict = {'outputs': outputs, 'targets': targets}
trainer.after_step(output_dict)
trainer.after_epoch()
if __name__ == '__main__':
main()