-
Notifications
You must be signed in to change notification settings - Fork 55
/
quicksrnet_quanteval.py
96 lines (82 loc) · 3.48 KB
/
quicksrnet_quanteval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#!/usr/bin/env python3
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022 of Qualcomm Innovation Center, Inc. All rights reserved.
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" AIMET evaluation code for QuickSRNet """
import argparse
from aimet_zoo_torch.quicksrnet import QuickSRNet
from aimet_zoo_torch.quicksrnet.model.helpers import evaluate_average_psnr
from aimet_zoo_torch.quicksrnet.dataloader.utils import (
load_dataset,
pass_calibration_data,
)
from aimet_zoo_torch.quicksrnet.model.inference import run_model
# add arguments
def arguments(raw_args):
"""parses command line arguments"""
parser = argparse.ArgumentParser(description="Arguments for evaluating model")
parser.add_argument(
"--dataset-path", help="path to image evaluation dataset", type=str
)
parser.add_argument(
"--model-config", help="model configuration to be tested", type=str
)
parser.add_argument(
"--default-output-bw",
help="Default output bitwidth for quantization.",
type=int,
default=8,
)
parser.add_argument(
"--default-param-bw",
help="Default parameter bitwidth for quantization.",
type=int,
default=8,
)
parser.add_argument(
"--batch-size", help="batch_size for loading data", type=int, default=16
)
parser.add_argument(
"--use-cuda", help="Run evaluation on GPU", type=bool, default=True
)
args = parser.parse_args(raw_args)
return args
def main(raw_args=None):
"""exeutes evaluation"""
args = arguments(raw_args)
model_fp32 = QuickSRNet(model_config=args.model_config)
model_fp32.from_pretrained(quantized=False)
sim_fp32 = model_fp32.get_quantsim(quantized=False)
model_int8 = QuickSRNet(model_config=args.model_config)
model_int8.from_pretrained(quantized=True)
sim_int8 = model_int8.get_quantsim(quantized=True)
IMAGES_LR, IMAGES_HR = load_dataset(args.dataset_path, model_fp32.scaling_factor)
sim_fp32.compute_encodings(
forward_pass_callback=pass_calibration_data,
forward_pass_callback_args=(IMAGES_LR, args.use_cuda),
)
sim_int8.compute_encodings(
forward_pass_callback=pass_calibration_data,
forward_pass_callback_args=(IMAGES_LR, args.use_cuda),
)
# Run model inference on test images and get super-resolved images
IMAGES_SR_original_fp32 = run_model(model_fp32, IMAGES_LR, args.use_cuda)
IMAGES_SR_original_int8 = run_model(sim_fp32.model, IMAGES_LR, args.use_cuda)
IMAGES_SR_optimized_fp32 = run_model(model_int8, IMAGES_LR, args.use_cuda)
IMAGES_SR_optimized_int8 = run_model(sim_int8.model, IMAGES_LR, args.use_cuda)
# Get the average PSNR for all test-images
avg_psnr = evaluate_average_psnr(IMAGES_SR_original_fp32, IMAGES_HR)
print(f"Original Model | FP32 Environment | Avg. PSNR: {avg_psnr:.3f}")
avg_psnr = evaluate_average_psnr(IMAGES_SR_original_int8, IMAGES_HR)
print(f"Original Model | Quantized Environment | Avg. PSNR: {avg_psnr:.3f}")
avg_psnr = evaluate_average_psnr(IMAGES_SR_optimized_fp32, IMAGES_HR)
print(f"Optimized Model | FP32 Environment | Avg. PSNR: {avg_psnr:.3f}")
avg_psnr = evaluate_average_psnr(IMAGES_SR_optimized_int8, IMAGES_HR)
print(f"Optimized Model | Quantized Environment | Avg. PSNR: {avg_psnr:.3f}")
if __name__ == "__main__":
main()