-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig_parser.py
executable file
·93 lines (84 loc) · 3.84 KB
/
config_parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import configparser
from activation_functions import linear, relu, sigmoid, tanh
import datagen
from loss_functions import cross_entropy, mse
import neural_network
class ConfigParser:
"""
Configuration file parser
"""
def __init__(self, config_file) -> None:
self.config_file = config_file
self.config = configparser.ConfigParser()
self.config.read(config_file)
def parse_act_func(self, act_func):
"""
Parses string and returns activation function
"""
if act_func == "sigmoid":
return sigmoid
elif act_func == "relu":
return relu
elif act_func == "linear":
return linear
elif act_func == "tanh":
return tanh
def parse_loss_func(self, loss_func):
"""
Parses string and returns loss function
"""
if loss_func == "cross_entropy":
return cross_entropy
elif loss_func == "mse":
return mse
def create_nn(self):
"""
Parses config file and creates neural network, data generator and returns epochs and batch size
"""
# Parsing global variables
loss_func = self.parse_loss_func(self.config["globals"]["loss"])
include_softmax = self.config["globals"]["include_softmax"].lower(
) == "true"
num_classes = int(self.config["globals"]["num_classes"])
regularizer = self.config["globals"]["regularizer"].lower()
reg_rate = float(self.config["globals"]["reg_rate"])
epochs = int(self.config["globals"]["epochs"])
batch_size = int(self.config["globals"]["batch_size"])
# Parsing datagenerator variables
image_dimension = int(self.config["data_generator"]["image_dimension"])
num_features = image_dimension**2
dataset_size = int(self.config["data_generator"]["dataset_size"])
l_lower_frac = float(self.config["data_generator"]["l_lower_frac"])
l_higher_frac = float(self.config["data_generator"]["l_higher_frac"])
width_lower_frac = float(
self.config["data_generator"]["width_lower_frac"])
width_higher_frac = float(
self.config["data_generator"]["width_higher_frac"])
centering = self.config["data_generator"]["centering"].lower(
) == "true"
noise_percentage = float(
self.config["data_generator"]["noise_percentage"])
train_frac = float(self.config["data_generator"]["train_frac"])
valid_frac = float(self.config["data_generator"]["valid_frac"])
test_frac = float(self.config["data_generator"]["test_frac"])
# Parsing the layer vairables for each layer
layers = []
for section in self.config.sections()[2:]:
neurons = int(self.config[section]["neurons"])
layer_act_func = self.parse_act_func(
self.config[section]["activation_function"])
layer_wr_lower = float(self.config[section]["wr_lower"])
layer_wr_higher = float(self.config[section]["wr_higher"])
layer_lr = float(self.config[section]["lr"])
layers.append((neurons, layer_act_func, layer_wr_lower,
layer_wr_higher, layer_lr))
# Creating data generator and neural network
dg = datagen.DataGenerator(image_dimension, dataset_size, l_lower_frac,
l_higher_frac, width_lower_frac,
width_higher_frac, centering,
noise_percentage, train_frac, valid_frac,
test_frac)
nn = neural_network.NeuralNetwork(num_features, layers, loss_func,
num_classes, regularizer, reg_rate,
False, include_softmax)
return dg, nn, epochs, batch_size