-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTesting.py
227 lines (158 loc) · 8.94 KB
/
Testing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
from generator_block import Generator_block
from CustomGenerator import CustomGenerator
from CustomDiscriminator import CustomDiscriminator
from BCEWithLogitsLoss import BCEWithLogitsLoss
from AdamOptimizer import SimpleAdamOptimizer
import numpy as np
import torch
def test_gen_block(in_features:int=25, out_features:int=12, num_test=1000):
# Create an instance of the Generator_block class
Genblock = Generator_block(in_features, out_features)
# Loop through the specified number of tests
for i in range(num_test):
# Get a generator block from the instance of Generator_block
gen_block = Genblock.get_generator_block()
# Test that the generator block contains three parts
assert len(gen_block) == 3, "The generator block should contain three parts."
# Test the first part of the generator block, which should be a linear layer
linear_layer = gen_block[0]
assert linear_layer[0] == 'linear', "The first part of the generator block should be a linear layer."
assert linear_layer[1].shape == torch.Size([out_features, in_features]), "The shape of the weight matrix in the linear layer is incorrect."
assert linear_layer[2].shape == torch.Size([out_features]), "The shape of the bias vector in the linear layer is incorrect."
# Test the second part of the generator block, which should be a batch normalization layer
batchnorm_layer = gen_block[1]
assert batchnorm_layer[0] == 'batchnorm', "The second part of the generator block should be a batch normalization layer."
assert batchnorm_layer[1] == out_features, "The number of features in the batch normalization layer is incorrect."
# Test the third part of the generator block, which should be a ReLU activation function
relu_layer = gen_block[2]
assert relu_layer[0] == 'relu', "The third part of the generator block should be a ReLU activation function."
# Print a message indicating that all tests have passed
print("All generator block tests pass.")
def test_get_dense_block(input_dim=int(15), output_dim=int(28), activation="relu"):
gen = CustomGenerator(input_dim, output_dim, [])
# Call the get_dense_block method
block = gen.get_dense_block(input_dim, output_dim, activation)
# Check that the output is a list with length 1 or 2
assert isinstance(block, list) and len(block) in [1, 2]
# Check that the first element of the output is a tuple
assert isinstance(block[0], tuple)
# Check that the tuple has the correct length
assert len(block[0]) == 3
# Check that the first element of the tuple is a string
assert isinstance(block[0][0], str)
# Check that the second and third elements of the tuple are tensors with the correct shape
assert block[0][1].shape == (output_dim, input_dim)
assert block[0][2].shape == (output_dim,)
# If activation is 'relu', check that the second element of the output is a tuple with the string 'relu'
if activation == 'relu':
assert len(block) == 2
assert isinstance(block[1], tuple)
assert block[1][0] == 'relu'
# If activation is 'sigmoid', check that the second element of the output is a tuple with the string 'sigmoid'
elif activation == 'sigmoid':
assert len(block) == 2
assert isinstance(block[1], tuple)
assert block[1][0] == 'sigmoid'
print("All get_dense_block test passed!")
def test_forward():
# Define the input and output dimensions and hidden dimensions for the generator
input_dim = 10
output_dim = 5
hidden_dims = [32, 64, 128]
# Create an instance of the CustomGenerator class
gen = CustomGenerator(input_dim, output_dim, hidden_dims)
# Generate random input for testing
test_input = torch.randn(100, input_dim)
# Call the forward method of the generator with the test input
test_output = gen.forward(test_input)
# Test that the output has the correct shape
assert test_output.shape == (100, output_dim), "The output shape is incorrect."
# Test that the output values are between 0 and 1 (since the generator uses a sigmoid activation)
assert test_output.max() <= 1, "Output values should be less than or equal to 1 for sigmoid activation."
assert test_output.min() >= 0, "Output values should be greater than or equal to 0 for sigmoid activation."
# Print a message indicating that all tests have passed
print("All forward tests pass.")
def test_custom_generator():
input_dim = 10
output_dim = 5
hidden_dims = [32, 64, 128]
gen = CustomGenerator(input_dim, output_dim, hidden_dims)
layers = gen.layers
# Check the correct number of layers
assert len(layers) == 2 * len(hidden_dims) + 2, "The number of layers in the custom generator is incorrect."
# Check that the last layer is a sigmoid activation
assert layers[-1][0] == 'sigmoid', "The last layer should be a sigmoid activation function."
print("All custom generator tests pass.")
def test_get_dense_block_discriminator():
dis = CustomDiscriminator(10, [32, 64, 128], "relu")
# Test ReLU activation
dense_block = dis.get_dense_block(10, 32, 'relu')
assert len(dense_block) == 2, "ReLU dense block should contain two parts."
assert dense_block[0][0] == 'linear', "The first part of the ReLU dense block should be a linear layer."
assert dense_block[1][0] == 'relu', "The second part of the ReLU dense block should be a ReLU activation function."
# Test None activation
dense_block = dis.get_dense_block(10, 1, None)
assert len(dense_block) == 1, "None dense block should contain one part."
assert dense_block[0][0] == 'linear', "The first part of the None dense block should be a linear layer."
print("All get_dense_block_discriminator tests pass.")
def test_forward_discriminator():
input_dim = 5
hidden_dims = [32, 64, 128]
dis = CustomDiscriminator(input_dim, hidden_dims, "relu")
test_input = torch.randn(100, input_dim)
test_output = dis.forward(test_input)
assert test_output.shape == (100, 1), "The output shape is incorrect."
print("All forward_discriminator tests pass.")
def test_custom_discriminator():
input_dim = 5
hidden_dims = [32, 64, 128]
dis = CustomDiscriminator(input_dim, hidden_dims, "relu")
layers = dis.layers
# Check the correct number of layers
total_layers = sum([len(dis.get_dense_block(input_dim, hidden_dims[i], "relu")) for i in range(len(hidden_dims))]) + len(dis.get_dense_block(hidden_dims[-1], 1, None))
assert len(layers) == total_layers, f"The number of layers in the custom discriminator is incorrect. Expected {total_layers}, got {len(layers)}."
# Check that the last layer does not have an activation function
assert layers[-1][0] == 'linear', "The last layer should be a linear layer without an activation function."
print("All custom discriminator tests pass.")
def test_bce_with_logits_loss():
# Create a dummy prediction tensor with a shape of (2, 3)
predictions = np.array([[1.5, -0.5, 0.5], [-1.0, 2.0, -1.5]])
predictions_tensor = torch.from_numpy(predictions)
# Create a dummy target tensor with the same shape as the prediction tensor
targets = np.array([[1, 0, 1], [0, 1, 0]])
targets_tensor = torch.from_numpy(targets)
# Compute the expected loss using the binary cross-entropy with logits formula
epsilon = 1e-12
logits = 1 / (1 + torch.exp(-predictions_tensor + epsilon))
expected_loss = -(targets_tensor * torch.log(logits) + (1 - targets_tensor) * torch.log(1 - logits)).mean()
# Create a BCEWithLogitsLoss object and compute the actual loss
criterion = BCEWithLogitsLoss()
actual_loss = criterion(predictions_tensor, targets_tensor)
# Compare the expected and actual losses
assert torch.allclose(actual_loss, expected_loss), f"The actual loss ({actual_loss}) is not close to the expected loss ({expected_loss})."
print("All BCEWithLogitsLoss tests pass.")
def test_simple_adam_optimizer():
# Dummy parameters (PyTorch tensors)
param1 = torch.tensor([[1.0, 2.0], [3.0, 4.0]], requires_grad=True)
param2 = torch.tensor([10.0, 20.0], requires_grad=True)
# Dummy gradients (PyTorch tensors)
grad1 = torch.tensor([[-0.1, -0.2], [-0.3, -0.4]])
grad2 = torch.tensor([-1.0, -2.0])
# Initialize the optimizer
optimizer = SimpleAdamOptimizer([param1, param2], lr=0.001)
# Check the initial parameter values
assert torch.allclose(param1, torch.tensor([[1.0, 2.0], [3.0, 4.0]]))
assert torch.allclose(param2, torch.tensor([10.0, 20.0]))
# Perform an optimization step
optimizer.step()
# Print a message indicating that the test has passed
print("All SimpleAdamOptimizer tests pass.")
test_forward_discriminator()
test_custom_discriminator()
test_gen_block()
test_gen_block(15, 28)
test_get_dense_block()
test_forward()
test_custom_generator()
test_bce_with_logits_loss()
test_simple_adam_optimizer()