Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added unit tests #8

Merged
merged 1 commit into from
Apr 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions src/graph_network.jl
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ end


"""
build_model(quantities_size, dims, output_size, mps, layer_size, hidden_layers, device)
build_model(quantities_size, dims, output_size, mps, layer_size, hidden_layers)

Constructs the Encode-Process-Decode model as a [Lux.jl](https://github.com/LuxDL/Lux.jl) Chain with the given arguments.

Expand All @@ -82,7 +82,6 @@ Constructs the Encode-Process-Decode model as a [Lux.jl](https://github.com/LuxD
- `mps`: Number of message passing steps.
- `layer_size`: Size of hidden layers.
- `hidden_layers`: Number of hidden layers.
- `device`: Device where the model should be loaded (see [Lux GPU Management](https://lux.csail.mit.edu/dev/manual/gpu_management#gpu-management)).

## Returns
- Encode-Process-Decode model as a [Lux.jl](https://github.com/LuxDL/Lux.jl) Chain.
Expand Down
125 changes: 110 additions & 15 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,18 @@
using GraphNetCore
using Test

using CUDA, cuDNN
using CUDA, cuDNN, Lux

import Random: MersenneTwister

@testset "GraphNetCore.jl" begin

hascuda = CUDA.has_cuda()

!hascuda && @warn "No CUDA installation detected! Skipping GPU tests..."

cpu = cpu_device()

@testset "utils.jl" begin
# 3 - 4
# / \ /
Expand All @@ -24,6 +28,10 @@ using CUDA, cuDNN
@test triangles_to_edges(faces) == ([2, 4, 3, 4, 3, 1, 2, 2, 3, 1],
[1, 2, 2, 3, 1, 2, 4, 3, 4, 3])

edges = [1 1 2 2 4
2 3 4 3 3]
@test parse_edges(edges) == ([2, 3, 4, 3, 4, 1, 1, 2, 2, 3], [1, 1, 2, 2, 3, 2, 3, 4, 3, 4])

indices = [1, 3, 2, 4]
@test one_hot(indices, 5, 0) == Bool[1 0 0 0
0 0 1 0
Expand All @@ -39,21 +47,108 @@ using CUDA, cuDNN
0 0 1 0
0 1 0 0]

@test minmaxnorm([2.0], 1.0, 1.0) == [0.0]
hascuda && @test minmaxnorm(cu([1.0, 2.0]), 1.0, 1.0) == cu([0.0, 0.0])
@test minmaxnorm([1.4, 2.3, 3.9, 4.0], -4.0, 4.0, 0.0, 1.0) == [0.675, 0.7875, 0.9875, 1.0]
@test minmaxnorm([2.0f0], 1.0f0, 1.0f0) == [0.0f0]
hascuda && @test minmaxnorm(cu([1.0f0, 2.0f0]), 1.0f0, 1.0f0) == cu([0.0f0, 0.0f0])
@test minmaxnorm([1.4f0, 2.3f0, 3.9f0, 4.0f0], -4.0f0, 4.0f0, 0.0f0, 1.0f0) == [0.675f0, 0.7875f0, 0.9875f0, 1.0f0]
@test_throws AssertionError minmaxnorm([2.0f0], 1.5f0, 0.5f0)
@test_throws AssertionError minmaxnorm([2.0f0], 1.0f0, 1.0f0, 1.5f0, 0.5f0)

mse_reduce_target = [2.0 1.3
0.6 1.7]
mse_reduce_output = [2.0 1.5
0.2 1.8]
@test mse_reduce(mse_reduce_target, mse_reduce_output) ≈ [0.16, 0.05]
hascuda && @test mse_reduce(cu(mse_reduce_target), cu(mse_reduce_output)) ≈ cu([0.16, 0.05])

reduce_arr = [1.0 2.0
3.0 4.0]
@test GraphNetCore.tullio_reducesum(reduce_arr, 1) == [4.0 6.0]
@test GraphNetCore.tullio_reducesum(reduce_arr, 2) == [3.0, 7.0]
mse_reduce_target = [2.0f0 1.3f0
0.6f0 1.7f0]
mse_reduce_output = [2.0f0 1.5f0
0.2f0 1.8f0]
@test mse_reduce(mse_reduce_target, mse_reduce_output) ≈ [0.16f0, 0.05f0]
hascuda && @test mse_reduce(cu(mse_reduce_target), cu(mse_reduce_output)) ≈ cu([0.16f0, 0.05f0])

reduce_arr = [1.0f0 2.0f0
3.0f0 4.0f0]
@test GraphNetCore.tullio_reducesum(reduce_arr, 1) == [4.0f0 6.0f0]
@test GraphNetCore.tullio_reducesum(reduce_arr, 2) == [3.0f0, 7.0f0]
hascuda && @test GraphNetCore.tullio_reducesum(cu(reduce_arr), 1) == cu([4.0f0 6.0f0])
hascuda && @test GraphNetCore.tullio_reducesum(cu(reduce_arr), 2) == cu([3.0f0, 7.0f0])
end

@testset "normaliser.jl" begin
norm_off = NormaliserOffline(-10.0f0, 10.0f0, -1.0f0, 1.0f0)
norm_on_cpu = NormaliserOnline(Dict{String, Any}(
"max_accumulations" => 10000,
"std_epsilon" => 1f-8,
"acc_count" => 2000,
"num_accumulations" => 200,
"acc_sum" => [142.32f0, 63.24f0],
"acc_sum_squared" => [20254.9824f0, 3999.2976f0]
), cpu)
norm_on_gpu = NormaliserOnline(Dict{String, Any}(
"max_accumulations" => 10000,
"std_epsilon" => 1f-8,
"acc_count" => 2000,
"num_accumulations" => 200,
"acc_sum" => cu([142.32f0, 63.24f0]),
"acc_sum_squared" => cu([20254.9824f0, 3999.2976f0])
), cu)
norm_dict_cpu = Dict{String, Union{NormaliserOffline, NormaliserOnline}}("norm_off" => norm_off, "norm_on" => norm_on_cpu)
norm_dict_gpu = Dict{String, Union{NormaliserOffline, NormaliserOnline}}("norm_off" => norm_off, "norm_on" => norm_on_gpu)

@test inverse_data(norm_off, [0.0f0]) == [0.0f0]
@test inverse_data(norm_off, [-0.5f0, -0.25f0, 0.1f0, 0.75f0]) == [-5.0f0, -2.5f0, 1.0f0, 7.5f0]
hascuda && @test inverse_data(norm_off, cu([-0.5f0, -0.25f0, 0.1f0, 0.75f0])) == cu([-5.0f0, -2.5f0, 1.0f0, 7.5f0])

norm_dict_cpu_test = GraphNetCore.deserialize(GraphNetCore.serialize(norm_dict_cpu), cpu)
@test norm_dict_cpu["norm_off"].data_min == norm_dict_cpu_test["norm_off"].data_min &&
norm_dict_cpu["norm_off"].data_max == norm_dict_cpu_test["norm_off"].data_max &&
norm_dict_cpu["norm_off"].target_min == norm_dict_cpu_test["norm_off"].target_min &&
norm_dict_cpu["norm_off"].target_max == norm_dict_cpu_test["norm_off"].target_max
@test norm_dict_cpu["norm_on"].max_accumulations == norm_dict_cpu_test["norm_on"].max_accumulations &&
norm_dict_cpu["norm_on"].std_epsilon == norm_dict_cpu_test["norm_on"].std_epsilon &&
norm_dict_cpu["norm_on"].acc_count == norm_dict_cpu_test["norm_on"].acc_count &&
norm_dict_cpu["norm_on"].num_accumulations == norm_dict_cpu_test["norm_on"].num_accumulations &&
norm_dict_cpu["norm_on"].acc_sum == norm_dict_cpu_test["norm_on"].acc_sum &&
norm_dict_cpu["norm_on"].acc_sum_squared == norm_dict_cpu_test["norm_on"].acc_sum_squared

if hascuda
norm_dict_gpu_test = GraphNetCore.deserialize(GraphNetCore.serialize(norm_dict_gpu), cu)
@test norm_dict_gpu["norm_off"].data_min == norm_dict_gpu_test["norm_off"].data_min &&
norm_dict_gpu["norm_off"].data_max == norm_dict_gpu_test["norm_off"].data_max &&
norm_dict_gpu["norm_off"].target_min == norm_dict_gpu_test["norm_off"].target_min &&
norm_dict_gpu["norm_off"].target_max == norm_dict_gpu_test["norm_off"].target_max
@test norm_dict_gpu["norm_on"].max_accumulations == norm_dict_gpu_test["norm_on"].max_accumulations &&
norm_dict_gpu["norm_on"].std_epsilon == norm_dict_gpu_test["norm_on"].std_epsilon &&
norm_dict_gpu["norm_on"].acc_count == norm_dict_gpu_test["norm_on"].acc_count &&
norm_dict_gpu["norm_on"].num_accumulations == norm_dict_gpu_test["norm_on"].num_accumulations &&
norm_dict_gpu["norm_on"].acc_sum == norm_dict_gpu_test["norm_on"].acc_sum &&
norm_dict_gpu["norm_on"].acc_sum_squared == norm_dict_gpu_test["norm_on"].acc_sum_squared
end
end

@testset "GraphNetwork" begin
nf = [0.5f0 -0.25f0 -0.75f0
-2.4f0 3.6f0 1.2f0]
ef = [3.0f0 0.0f0 -3.0f0 0.0f0 -4.0f0 -4.0f0
0.0f0 4.0f0 4.0f0 -3.0f0 0.0f0 3.0f0
3.0f0 4.0f0 5.0f0 3.0f0 4.0f0 5.0f0]
senders = [2, 3, 3, 1, 1, 2]
receivers = [1, 1, 2, 2, 3, 3]
output = [0.7324772f0 -0.027799817f0 0.1475548f0; 0.42122957f0 -0.6571782f0 -0.15739384f0]

graph = FeatureGraph(nf, ef, senders, receivers)

model = build_model(2, 2, 2, 1, 16, 1)
ps, st = Lux.setup(MersenneTwister(1234), model)

out, _ = model(graph, ps, st)

@test out ≈ output

if hascuda
graph = FeatureGraph(cu(nf), cu(ef), cu(senders), cu(receivers))

ps = cu(ps)
st = cu(st)

out, _ = model(graph, ps, st)

@test out ≈ cu(output)
end
end

end