diff --git a/GNNGraphs/Project.toml b/GNNGraphs/Project.toml new file mode 100644 index 000000000..fa1a28ef5 --- /dev/null +++ b/GNNGraphs/Project.toml @@ -0,0 +1,67 @@ +name = "GNNGraphs" +uuid = "aed8fd31-079b-4b5a-b342-a13352159b8c" +authors = ["Carlo Lucibello and contributors"] +version = "0.1.0" + +[deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" +Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +LuxDeviceUtils = "34f89e08-e1d5-43b4-8944-0b49ac560553" +MLUtils = "f1d291b0-491e-4a28-83b9-f70985020b54" +MacroTools = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +NNlib = "872c559c-99b0-510c-b3b7-b6c96a88d5cd" +NearestNeighbors = "b8a86587-4115-5ab1-83bc-aa920d37bbce" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" +Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" +StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" + +[weakdeps] +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" +SimpleWeightedGraphs = "47aef6b3-ad0c-573a-a1e2-d07658019622" + +[extensions] +GNNGraphsCUDAExt = "CUDA" +GNNGraphsSimpleWeightedGraphsExt = "SimpleWeightedGraphs" + +[compat] +Adapt = "4" +CUDA = "5" +ChainRulesCore = "1" +Functors = "0.4.1" +Graphs = "1.4" +KrylovKit = "0.8" +LinearAlgebra = "1" +LuxDeviceUtils = "0.1.24" +MLDatasets = "0.7" +MLUtils = "0.4" +NNlib = "0.9" +NearestNeighbors = "0.4" +Random = "1" +SimpleWeightedGraphs = "1.4.0" +SparseArrays = "1" +Statistics = "1" +StatsBase = "0.34" +cuDNN = "1" +julia = "1.9" + +[extras] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" +ChainRulesTestUtils = "cdddcdb0-9152-4a09-a978-84456f9df70a" +DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" +FiniteDifferences = "26cc04aa-876d-5657-8c51-4c34ba976000" +InlineStrings = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48" +MLDatasets = "eb30cadb-4394-5ae3-aed4-317e484a6458" +SimpleWeightedGraphs = "47aef6b3-ad0c-573a-a1e2-d07658019622" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" +cuDNN = "02a925ec-e4fe-4b08-9a7e-0d78e3d38ccd" + +[targets] +test = ["Test", "Adapt", "DataFrames", "InlineStrings", "SimpleWeightedGraphs", "Zygote", "FiniteDifferences", "ChainRulesTestUtils", "MLDatasets", "CUDA", "cuDNN"] diff --git a/GNNGraphs/ext/GNNGraphsCUDAExt/GNNGraphsCUDAExt.jl b/GNNGraphs/ext/GNNGraphsCUDAExt/GNNGraphsCUDAExt.jl new file mode 100644 index 000000000..f7a053b97 --- /dev/null +++ b/GNNGraphs/ext/GNNGraphsCUDAExt/GNNGraphsCUDAExt.jl @@ -0,0 +1,14 @@ +module GNNGraphsCUDAExt + +using CUDA +using Random, Statistics, LinearAlgebra +using GNNGraphs +using GNNGraphs: COO_T, ADJMAT_T, SPARSE_T + +const CUMAT_T = Union{CUDA.AnyCuMatrix, CUDA.CUSPARSE.CuSparseMatrix} + +include("query.jl") +include("transform.jl") +include("utils.jl") + +end #module diff --git a/GNNlib/ext/GNNlibCUDAExt/GNNGraphs/query.jl b/GNNGraphs/ext/GNNGraphsCUDAExt/query.jl similarity index 100% rename from GNNlib/ext/GNNlibCUDAExt/GNNGraphs/query.jl rename to GNNGraphs/ext/GNNGraphsCUDAExt/query.jl diff --git a/GNNlib/ext/GNNlibCUDAExt/GNNGraphs/transform.jl b/GNNGraphs/ext/GNNGraphsCUDAExt/transform.jl similarity index 100% rename from GNNlib/ext/GNNlibCUDAExt/GNNGraphs/transform.jl rename to GNNGraphs/ext/GNNGraphsCUDAExt/transform.jl diff --git a/GNNlib/ext/GNNlibCUDAExt/GNNGraphs/utils.jl b/GNNGraphs/ext/GNNGraphsCUDAExt/utils.jl similarity index 55% rename from GNNlib/ext/GNNlibCUDAExt/GNNGraphs/utils.jl rename to GNNGraphs/ext/GNNGraphsCUDAExt/utils.jl index c3d78e9c1..0083d1db5 100644 --- a/GNNlib/ext/GNNlibCUDAExt/GNNGraphs/utils.jl +++ b/GNNGraphs/ext/GNNGraphsCUDAExt/utils.jl @@ -3,6 +3,9 @@ GNNGraphs.iscuarray(x::AnyCuArray) = true function sort_edge_index(u::AnyCuArray, v::AnyCuArray) + dev = get_device(u) + cdev = cpu_device() + u, v = u |> cdev, v |> cdev #TODO proper cuda friendly implementation - sort_edge_index(u |> Flux.cpu, v |> Flux.cpu) |> Flux.gpu -end \ No newline at end of file + sort_edge_index(u, v) |> dev +end diff --git a/GNNlib/ext/GNNlibSimpleWeightedGraphsExt/GNNlibSimpleWeightedGraphsExt.jl b/GNNGraphs/ext/GNNGraphsSimpleWeightedGraphsExt/GNNGraphsSimpleWeightedGraphsExt.jl similarity index 61% rename from GNNlib/ext/GNNlibSimpleWeightedGraphsExt/GNNlibSimpleWeightedGraphsExt.jl rename to GNNGraphs/ext/GNNGraphsSimpleWeightedGraphsExt/GNNGraphsSimpleWeightedGraphsExt.jl index 48c8f0f3b..f53cbdfd3 100644 --- a/GNNlib/ext/GNNlibSimpleWeightedGraphsExt/GNNlibSimpleWeightedGraphsExt.jl +++ b/GNNGraphs/ext/GNNGraphsSimpleWeightedGraphsExt/GNNGraphsSimpleWeightedGraphsExt.jl @@ -1,10 +1,10 @@ -module GNNlibSimpleWeightedGraphsExt +module GNNGraphsSimpleWeightedGraphsExt -using GNNlib using Graphs +using GNNGraphs using SimpleWeightedGraphs -function GNNlib.GNNGraph(g::T; kws...) where +function GNNGraphs.GNNGraph(g::T; kws...) where {T <: Union{SimpleWeightedGraph, SimpleWeightedDiGraph}} return GNNGraph(g.weights, kws...) end diff --git a/GNNlib/src/GNNGraphs/GNNGraphs.jl b/GNNGraphs/src/GNNGraphs.jl similarity index 90% rename from GNNlib/src/GNNGraphs/GNNGraphs.jl rename to GNNGraphs/src/GNNGraphs.jl index 0fbf871d0..15cd88e99 100644 --- a/GNNlib/src/GNNGraphs/GNNGraphs.jl +++ b/GNNGraphs/src/GNNGraphs.jl @@ -5,8 +5,6 @@ using Functors: @functor import Graphs using Graphs: AbstractGraph, outneighbors, inneighbors, adjacency_matrix, degree, has_self_loops, is_directed -import MLUtils -using MLUtils: getobs, numobs, ones_like, zeros_like, batch import NearestNeighbors import NNlib import StatsBase @@ -14,7 +12,9 @@ import KrylovKit using ChainRulesCore using LinearAlgebra, Random, Statistics import MLUtils +using MLUtils: getobs, numobs, ones_like, zeros_like, chunk, batch import Functors +using LuxDeviceUtils: get_device, cpu_device, LuxCPUDevice include("chainrules.jl") # hacks for differentiability @@ -78,7 +78,9 @@ export add_nodes, to_unidirected, random_walk_pe, remove_nodes, -# from Flux + ppr_diffusion, + drop_nodes, +# from MLUtils batch, unbatch, # from SparseArrays @@ -101,8 +103,7 @@ include("operators.jl") include("convert.jl") include("utils.jl") -export sort_edge_index, - color_refinement +export sort_edge_index, color_refinement include("gatherscatter.jl") # _gather, _scatter diff --git a/GNNlib/src/GNNGraphs/abstracttypes.jl b/GNNGraphs/src/abstracttypes.jl similarity index 100% rename from GNNlib/src/GNNGraphs/abstracttypes.jl rename to GNNGraphs/src/abstracttypes.jl diff --git a/GNNlib/src/GNNGraphs/chainrules.jl b/GNNGraphs/src/chainrules.jl similarity index 100% rename from GNNlib/src/GNNGraphs/chainrules.jl rename to GNNGraphs/src/chainrules.jl diff --git a/GNNlib/src/GNNGraphs/convert.jl b/GNNGraphs/src/convert.jl similarity index 100% rename from GNNlib/src/GNNGraphs/convert.jl rename to GNNGraphs/src/convert.jl diff --git a/GNNlib/src/GNNGraphs/datastore.jl b/GNNGraphs/src/datastore.jl similarity index 87% rename from GNNlib/src/GNNGraphs/datastore.jl rename to GNNGraphs/src/datastore.jl index 5fe80b5f0..c39dfa64a 100644 --- a/GNNlib/src/GNNGraphs/datastore.jl +++ b/GNNGraphs/src/datastore.jl @@ -9,44 +9,44 @@ At construction time, the `data` can be provided as any iterables of pairs of symbols and arrays or as keyword arguments: ```jldoctest -julia> ds = DataStore(3, x = rand(Float32, 2, 3), y = rand(Float32, 3)) +julia> ds = DataStore(3, x = rand(2, 3), y = rand(3)) DataStore(3) with 2 elements: - y = 3-element Vector{Float32} - x = 2×3 Matrix{Float32} + y = 3-element Vector{Float64} + x = 2×3 Matrix{Float64} -julia> ds = DataStore(3, Dict(:x => rand(Float32, 2, 3), :y => rand(Float32, 3))); # equivalent to above +julia> ds = DataStore(3, Dict(:x => rand(2, 3), :y => rand(3))); # equivalent to above -julia> ds = DataStore(3, (x = rand(Float32, 2, 3), y = rand(Float32, 30))) +julia> ds = DataStore(3, (x = rand(2, 3), y = rand(30))) ERROR: AssertionError: DataStore: data[y] has 30 observations, but n = 3 Stacktrace: [1] DataStore(n::Int64, data::Dict{Symbol, Any}) - @ GNNlib.GNNGraphs ~/.julia/dev/GNNlib/src/GNNGraphs/datastore.jl:54 - [2] DataStore(n::Int64, data::NamedTuple{(:x, :y), Tuple{Matrix{Float32}, Vector{Float32}}}) - @ GNNlib.GNNGraphs ~/.julia/dev/GNNlib/src/GNNGraphs/datastore.jl:73 + @ GraphNeuralNetworks.GNNGraphs ~/.julia/dev/GraphNeuralNetworks/src/GNNGraphs/datastore.jl:54 + [2] DataStore(n::Int64, data::NamedTuple{(:x, :y), Tuple{Matrix{Float64}, Vector{Float64}}}) + @ GraphNeuralNetworks.GNNGraphs ~/.julia/dev/GraphNeuralNetworks/src/GNNGraphs/datastore.jl:73 [3] top-level scope @ REPL[13]:1 -julia> ds = DataStore(x = randFloat32, 2, 3), y = rand(Float32, 30)) # no checks +julia> ds = DataStore(x = rand(2, 3), y = rand(30)) # no checks DataStore() with 2 elements: - y = 30-element Vector{Float32} - x = 2×3 Matrix{Float32} + y = 30-element Vector{Float64} + x = 2×3 Matrix{Float64} ``` The `DataStore` has an interface similar to both dictionaries and named tuples. Arrays can be accessed and added using either the indexing or the property syntax: ```jldoctest -julia> ds = DataStore(x = ones(Float32, 2, 3), y = zeros(Float32, 3)) +julia> ds = DataStore(x = ones(2, 3), y = zeros(3)) DataStore() with 2 elements: - y = 3-element Vector{Float32} - x = 2×3 Matrix{Float32} + y = 3-element Vector{Float64} + x = 2×3 Matrix{Float64} julia> ds.x # same as `ds[:x]` -2×3 Matrix{Float32}: +2×3 Matrix{Float64}: 1.0 1.0 1.0 1.0 1.0 1.0 -julia> ds.z = zeros(Float32, 3) # Add new feature array `z`. Same as `ds[:z] = rand(Float32, 3)` +julia> ds.z = zeros(3) # Add new feature array `z`. Same as `ds[:z] = rand(3)` 3-element Vector{Float64}: 0.0 0.0 diff --git a/GNNlib/src/GNNGraphs/gatherscatter.jl b/GNNGraphs/src/gatherscatter.jl similarity index 100% rename from GNNlib/src/GNNGraphs/gatherscatter.jl rename to GNNGraphs/src/gatherscatter.jl diff --git a/GNNlib/src/GNNGraphs/generate.jl b/GNNGraphs/src/generate.jl similarity index 99% rename from GNNlib/src/GNNGraphs/generate.jl rename to GNNGraphs/src/generate.jl index 07a31c0da..bb0317548 100644 --- a/GNNlib/src/GNNGraphs/generate.jl +++ b/GNNGraphs/src/generate.jl @@ -26,7 +26,7 @@ julia> edge_index(g) ([1, 3, 3, 4], [5, 4, 5, 2]) # In the bidirected case, edge data will be duplicated on the reverse edges if needed. -julia> g = rand_graph(5, 4, edata=rand(Float32, 16, 2)) +julia> g = rand_graph(5, 4, edata=rand(16, 2)) GNNGraph: num_nodes = 5 num_edges = 4 @@ -173,7 +173,7 @@ to its `k` closest `points`. ```jldoctest julia> n, k = 10, 3; -julia> x = rand(Float32, 3, n); +julia> x = rand(3, n); julia> g = knn_graph(x, k) GNNGraph: @@ -254,7 +254,7 @@ to its neighbors within a given distance `r`. ```jldoctest julia> n, r = 10, 0.75; -julia> x = Float32, 3, n); +julia> x = rand(3, n); julia> g = radius_graph(x, r) GNNGraph: diff --git a/GNNlib/src/GNNGraphs/gnngraph.jl b/GNNGraphs/src/gnngraph.jl similarity index 97% rename from GNNlib/src/GNNGraphs/gnngraph.jl rename to GNNGraphs/src/gnngraph.jl index a26652d94..64fd32aad 100644 --- a/GNNlib/src/GNNGraphs/gnngraph.jl +++ b/GNNGraphs/src/gnngraph.jl @@ -66,7 +66,7 @@ functionality from that library. # Examples ```julia -using Flux, GraphNeuralNetworks, CUDA +using GraphNeuralNetworks # Construct from adjacency list representation data = [[2,3], [1,4,5], [1], [2,5], [2,4]] @@ -86,24 +86,27 @@ g = GNNGraph(s, t) g = GNNGraph(erdos_renyi(100, 20)) # Add 2 node feature arrays at creation time -g = GNNGraph(g, ndata = (x=rand(Float32,100,g.num_nodes), y=rand(Float32,g.num_nodes))) +g = GNNGraph(g, ndata = (x=rand(100, g.num_nodes), y=rand(g.num_nodes))) # Add 1 edge feature array, after the graph creation -g.edata.z = rand(Float32,16,g.num_edges) +g.edata.z = rand(16, g.num_edges) # Add node features and edge features with default names `x` and `e` -g = GNNGraph(g, ndata = rand(Float32,100,g.num_nodes), edata = rand(Float32,16,g.num_edges)) +g = GNNGraph(g, ndata = rand(100, g.num_nodes), edata = rand(16, g.num_edges)) g.ndata.x # or just g.x g.edata.e # or just g.e -# Send to gpu -g = g |> gpu - # Collect edges' source and target nodes. # Both source and target are vectors of length num_edges source, target = edge_index(g) ``` +A `GNNGraph` can be sent to the GPU using e.g. Flux's `gpu` function: +``` +# Send to gpu +using Flux, CUDA +g = g |> Flux.gpu +``` """ struct GNNGraph{T <: Union{COO_T, ADJMAT_T}} <: AbstractGNNGraph{T} graph::T diff --git a/GNNlib/src/GNNGraphs/gnnheterograph.jl b/GNNGraphs/src/gnnheterograph.jl similarity index 100% rename from GNNlib/src/GNNGraphs/gnnheterograph.jl rename to GNNGraphs/src/gnnheterograph.jl diff --git a/GNNlib/src/GNNGraphs/operators.jl b/GNNGraphs/src/operators.jl similarity index 100% rename from GNNlib/src/GNNGraphs/operators.jl rename to GNNGraphs/src/operators.jl diff --git a/GNNlib/src/GNNGraphs/query.jl b/GNNGraphs/src/query.jl similarity index 100% rename from GNNlib/src/GNNGraphs/query.jl rename to GNNGraphs/src/query.jl diff --git a/GNNlib/src/GNNGraphs/sampling.jl b/GNNGraphs/src/sampling.jl similarity index 100% rename from GNNlib/src/GNNGraphs/sampling.jl rename to GNNGraphs/src/sampling.jl diff --git a/GNNlib/src/GNNGraphs/temporalsnapshotsgnngraph.jl b/GNNGraphs/src/temporalsnapshotsgnngraph.jl similarity index 100% rename from GNNlib/src/GNNGraphs/temporalsnapshotsgnngraph.jl rename to GNNGraphs/src/temporalsnapshotsgnngraph.jl diff --git a/GNNlib/src/GNNGraphs/transform.jl b/GNNGraphs/src/transform.jl similarity index 91% rename from GNNlib/src/GNNGraphs/transform.jl rename to GNNGraphs/src/transform.jl index 0c8e7d74b..1ced9be53 100644 --- a/GNNlib/src/GNNGraphs/transform.jl +++ b/GNNGraphs/src/transform.jl @@ -306,6 +306,38 @@ function remove_nodes(g::GNNGraph{<:COO_T}, nodes_to_remove::AbstractVector) ndata, edata, g.gdata) end +""" + drop_nodes(g::GNNGraph{<:COO_T}, p) + +Randomly drop nodes (and their associated edges) from a GNNGraph based on a given probability. +Dropping nodes is a technique that can be used for graph data augmentation, refering paper [DropNode](https://arxiv.org/pdf/2008.12578.pdf). + +# Arguments +- `g`: The input graph from which nodes (and their associated edges) will be dropped. +- `p`: The probability of dropping each node. Default value is `0.5`. + +# Returns +A modified GNNGraph with nodes (and their associated edges) dropped based on the given probability. + +# Example +```julia +using GraphNeuralNetworks +# Construct a GNNGraph +g = GNNGraph([1, 1, 2, 2, 3], [2, 3, 1, 3, 1], num_nodes=3) +# Drop nodes with a probability of 0.5 +g_new = drop_node(g, 0.5) +println(g_new) +``` +""" +function drop_nodes(g::GNNGraph{<:COO_T}, p = 0.5) + num_nodes = g.num_nodes + nodes_to_remove = filter(_ -> rand() < p, 1:num_nodes) + + new_g = remove_nodes(g, nodes_to_remove) + + return new_g +end + """ add_edges(g::GNNGraph, s::AbstractVector, t::AbstractVector; [edata]) add_edges(g::GNNGraph, (s, t); [edata]) @@ -1028,6 +1060,9 @@ function negative_sample(g::GNNGraph; s, t = edge_index(g) n = g.num_nodes + dev = get_device(s) + cdev = cpu_device() + s, t = s |> cdev, t |> cdev idx_pos, maxid = edge_encoding(s, t, n) if bidirected num_neg_edges = num_neg_edges ÷ 2 @@ -1051,6 +1086,7 @@ function negative_sample(g::GNNGraph; if bidirected s_neg, t_neg = [s_neg; t_neg], [t_neg; s_neg] end + s_neg, t_neg = s_neg |> dev, t_neg |> dev return GNNGraph(s_neg, t_neg, num_nodes = n) end @@ -1129,3 +1165,49 @@ ci2t(ci::AbstractVector{<:CartesianIndex}, dims) = ntuple(i -> map(x -> x[i], ci @non_differentiable remove_self_loops(x...) # TODO this is wrong, since g carries feature arrays, needs rrule @non_differentiable dense_zeros_like(x...) +""" + ppr_diffusion(g::GNNGraph{<:COO_T}, alpha =0.85f0) -> GNNGraph + +Calculates the Personalized PageRank (PPR) diffusion based on the edge weight matrix of a GNNGraph and updates the graph with new edge weights derived from the PPR matrix. +References paper: [The pagerank citation ranking: Bringing order to the web](http://ilpubs.stanford.edu:8090/422) + + +The function performs the following steps: +1. Constructs a modified adjacency matrix `A` using the graph's edge weights, where `A` is adjusted by `(α - 1) * A + I`, with `α` being the damping factor (`alpha_f32`) and `I` the identity matrix. +2. Normalizes `A` to ensure each column sums to 1, representing transition probabilities. +3. Applies the PPR formula `α * (I + (α - 1) * A)^-1` to compute the diffusion matrix. +4. Updates the original edge weights of the graph based on the PPR diffusion matrix, assigning new weights for each edge from the PPR matrix. + +# Arguments +- `g::GNNGraph`: The input graph for which PPR diffusion is to be calculated. It should have edge weights available. +- `alpha_f32::Float32`: The damping factor used in PPR calculation, controlling the teleport probability in the random walk. Defaults to `0.85f0`. + +# Returns +- A new `GNNGraph` instance with the same structure as `g` but with updated edge weights according to the PPR diffusion calculation. +""" +function ppr_diffusion(g::GNNGraph{<:COO_T}; alpha = 0.85f0) + s, t = edge_index(g) + w = get_edge_weight(g) + if isnothing(w) + w = ones(Float32, g.num_edges) + end + + N = g.num_nodes + + initial_A = sparse(t, s, w, N, N) + scaled_A = (Float32(alpha) - 1) * initial_A + + I_sparse = sparse(Diagonal(ones(Float32, N))) + A_sparse = I_sparse + scaled_A + + A_dense = Matrix(A_sparse) + + PPR = alpha * inv(A_dense) + + new_w = [PPR[dst, src] for (src, dst) in zip(s, t)] + + return GNNGraph((s, t, new_w), + g.num_nodes, length(s), g.num_graphs, + g.graph_indicator, + g.ndata, g.edata, g.gdata) +end diff --git a/GNNlib/src/GNNGraphs/utils.jl b/GNNGraphs/src/utils.jl similarity index 99% rename from GNNlib/src/GNNGraphs/utils.jl rename to GNNGraphs/src/utils.jl index f6e25dc80..4bba304ef 100644 --- a/GNNlib/src/GNNGraphs/utils.jl +++ b/GNNGraphs/src/utils.jl @@ -65,7 +65,6 @@ function sort_edge_index(u, v) end - cat_features(x1::Nothing, x2::Nothing) = nothing cat_features(x1::AbstractArray, x2::AbstractArray) = cat(x1, x2, dims = ndims(x1)) function cat_features(x1::Union{Number, AbstractVector}, x2::Union{Number, AbstractVector}) diff --git a/test/GNNGraphs/chainrules.jl b/GNNGraphs/test/chainrules.jl similarity index 100% rename from test/GNNGraphs/chainrules.jl rename to GNNGraphs/test/chainrules.jl diff --git a/test/GNNGraphs/convert.jl b/GNNGraphs/test/convert.jl similarity index 100% rename from test/GNNGraphs/convert.jl rename to GNNGraphs/test/convert.jl diff --git a/test/GNNGraphs/datastore.jl b/GNNGraphs/test/datastore.jl similarity index 100% rename from test/GNNGraphs/datastore.jl rename to GNNGraphs/test/datastore.jl diff --git a/test/ext/GraphNeuralNetworksSimpleWeightedGraphsExt/GraphNeuralNetworksSimpleWeightedGraphsExt.jl b/GNNGraphs/test/ext/SimpleWeightedGraphs/SimpleWeightedGraphs.jl similarity index 100% rename from test/ext/GraphNeuralNetworksSimpleWeightedGraphsExt/GraphNeuralNetworksSimpleWeightedGraphsExt.jl rename to GNNGraphs/test/ext/SimpleWeightedGraphs/SimpleWeightedGraphs.jl diff --git a/test/GNNGraphs/generate.jl b/GNNGraphs/test/generate.jl similarity index 93% rename from test/GNNGraphs/generate.jl rename to GNNGraphs/test/generate.jl index d9f281fb2..867fec399 100644 --- a/test/GNNGraphs/generate.jl +++ b/GNNGraphs/test/generate.jl @@ -107,8 +107,8 @@ end end @testset "rand_temporal_hyperbolic_graph" begin - @test GraphNeuralNetworks.GNNGraphs._hyperbolic_distance([1.0,1.0],[1.0,1.0];ζ=1)==0 - @test GraphNeuralNetworks.GNNGraphs._hyperbolic_distance([0.23,0.11],[0.98,0.55];ζ=1)==GraphNeuralNetworks.GNNGraphs._hyperbolic_distance([0.98,0.55],[0.23,0.11];ζ=1) + @test GNNGraphs._hyperbolic_distance([1.0,1.0],[1.0,1.0];ζ=1)==0 + @test GNNGraphs._hyperbolic_distance([0.23,0.11],[0.98,0.55];ζ=1) == GNNGraphs._hyperbolic_distance([0.98,0.55],[0.23,0.11];ζ=1) number_nodes = 30 number_snapshots = 5 α, R, speed, ζ = 1, 1, 0.1, 1 diff --git a/test/GNNGraphs/gnngraph.jl b/GNNGraphs/test/gnngraph.jl similarity index 99% rename from test/GNNGraphs/gnngraph.jl rename to GNNGraphs/test/gnngraph.jl index f1c952cb1..6f298db23 100644 --- a/test/GNNGraphs/gnngraph.jl +++ b/GNNGraphs/test/gnngraph.jl @@ -275,14 +275,14 @@ end U = rand(10, 1) data = [rand_graph(n, m, ndata = X, edata = E, gdata = U, graph_type = GRAPH_T) for _ in 1:num_graphs] - g = Flux.batch(data) + g = MLUtils.batch(data) @testset "batch then pass to dataloader" begin @test MLUtils.getobs(g, 3) == getgraph(g, 3) @test MLUtils.getobs(g, 3:5) == getgraph(g, 3:5) @test MLUtils.numobs(g) == g.num_graphs - d = Flux.DataLoader(g, batchsize = 2, shuffle = false) + d = MLUtils.DataLoader(g, batchsize = 2, shuffle = false) @test first(d) == getgraph(g, 1:2) end @@ -292,7 +292,7 @@ end @test MLUtils.getobs(data, 3:5) == [data[3], data[4], data[5]] @test MLUtils.numobs(data) == g.num_graphs - d = Flux.DataLoader(data, batchsize = 2, shuffle = false) + d = MLUtils.DataLoader(data, batchsize = 2, shuffle = false) @test first(d) == [data[1], data[2]] end end diff --git a/test/GNNGraphs/gnnheterograph.jl b/GNNGraphs/test/gnnheterograph.jl similarity index 97% rename from test/GNNGraphs/gnnheterograph.jl rename to GNNGraphs/test/gnnheterograph.jl index e17159dc0..6764b7814 100644 --- a/test/GNNGraphs/gnnheterograph.jl +++ b/GNNGraphs/test/gnnheterograph.jl @@ -195,9 +195,9 @@ end # eindex = ((:A, :rel1, :B) => edges1, (:B, :rel2, :A) => edges2) # ndata = Dict(:A => (x = rand(2, num_nodes[:A]), y = rand(3, num_nodes[:A])),:B => rand(10, num_nodes[:B])) # edata= Dict((:A, :rel1, :B) => (x = rand(2, 20), y = rand(3, 20)),(:B, :rel2, :A) => rand(10, 30)) -# hg1 = GraphNeuralNetworks.GNNHeteroGraph(eindex; num_nodes) -# hg2 = GraphNeuralNetworks.GNNHeteroGraph(eindex; num_nodes, ndata,edata) -# hg3 = GraphNeuralNetworks.GNNHeteroGraph(eindex; num_nodes, ndata) +# hg1 = GNNHeteroGraph(eindex; num_nodes) +# hg2 = GNNHeteroGraph(eindex; num_nodes, ndata,edata) +# hg3 = GNNHeteroGraph(eindex; num_nodes, ndata) # @test sprint(show, hg1) == "GNNHeteroGraph(Dict(:A => 10, :B => 20), Dict((:A, :rel1, :B) => 20, (:B, :rel2, :A) => 30))" # @test sprint(show, hg2) == sprint(show, hg1) # @test sprint(show, MIME("text/plain"), hg1; context=:compact => true) == "GNNHeteroGraph(Dict(:A => 10, :B => 20), Dict((:A, :rel1, :B) => 20, (:B, :rel2, :A) => 30))" diff --git a/test/GNNGraphs/operators.jl b/GNNGraphs/test/operators.jl similarity index 100% rename from test/GNNGraphs/operators.jl rename to GNNGraphs/test/operators.jl diff --git a/test/GNNGraphs/query.jl b/GNNGraphs/test/query.jl similarity index 99% rename from test/GNNGraphs/query.jl rename to GNNGraphs/test/query.jl index b0f03a262..62d6a7e78 100644 --- a/test/GNNGraphs/query.jl +++ b/GNNGraphs/test/query.jl @@ -185,10 +185,10 @@ end g = GNNGraph(s, t) @test laplacian_lambda_max(g) ≈ Float32(1.809017) data1 = [g for i in 1:5] - gall1 = Flux.batch(data1) + gall1 = MLUtils.batch(data1) @test laplacian_lambda_max(gall1) ≈ [Float32(1.809017) for i in 1:5] data2 = [rand_graph(10, 20) for i in 1:3] - gall2 = Flux.batch(data2) + gall2 = MLUtils.batch(data2) @test length(laplacian_lambda_max(gall2, add_self_loops=true)) == 3 end diff --git a/GNNGraphs/test/runtests.jl b/GNNGraphs/test/runtests.jl new file mode 100644 index 000000000..5c334a7fa --- /dev/null +++ b/GNNGraphs/test/runtests.jl @@ -0,0 +1,53 @@ +using CUDA +using GNNGraphs +using GNNGraphs: getn, getdata +using Functors +using LinearAlgebra, Statistics, Random +using NNlib +import MLUtils +import StatsBase +using SparseArrays +using Graphs +using Zygote +using Test +using MLDatasets +using InlineStrings # not used but with the import we test #98 and #104 +using SimpleWeightedGraphs + +CUDA.allowscalar(false) + +const ACUMatrix{T} = Union{CuMatrix{T}, CUDA.CUSPARSE.CuSparseMatrix{T}} + +ENV["DATADEPS_ALWAYS_ACCEPT"] = true # for MLDatasets + +include("test_utils.jl") + +tests = [ + "chainrules", + "datastore", + "gnngraph", + "convert", + "transform", + "operators", + "generate", + "query", + "sampling", + "gnnheterograph", + "temporalsnapshotsgnngraph", + "ext/SimpleWeightedGraphs/SimpleWeightedGraphs" +] + +!CUDA.functional() && @warn("CUDA unavailable, not testing GPU support") + +for graph_type in (:coo, :dense, :sparse) + @info "Testing graph format :$graph_type" + global GRAPH_T = graph_type + global TEST_GPU = CUDA.functional() && (GRAPH_T != :sparse) + # global GRAPH_T = :sparse + # global TEST_GPU = false + + @testset "$t" for t in tests + t == "GNNGraphs/sampling" && GRAPH_T != :coo && continue + include("$t.jl") + end +end diff --git a/test/GNNGraphs/sampling.jl b/GNNGraphs/test/sampling.jl similarity index 100% rename from test/GNNGraphs/sampling.jl rename to GNNGraphs/test/sampling.jl diff --git a/test/GNNGraphs/temporalsnapshotsgnngraph.jl b/GNNGraphs/test/temporalsnapshotsgnngraph.jl similarity index 98% rename from test/GNNGraphs/temporalsnapshotsgnngraph.jl rename to GNNGraphs/test/temporalsnapshotsgnngraph.jl index 90ddeafbf..7eccc6bde 100644 --- a/test/GNNGraphs/temporalsnapshotsgnngraph.jl +++ b/GNNGraphs/test/temporalsnapshotsgnngraph.jl @@ -107,7 +107,8 @@ if TEST_GPU snapshots = [rand_graph(10, 20; ndata = rand(5,10)) for i in 1:5] tsg = TemporalSnapshotsGNNGraph(snapshots) tsg.tgdata.x = rand(5) - tsg = Flux.gpu(tsg) + dev = gpu_device() + tsg = tsg |> dev @test tsg.snapshots[1].ndata.x isa CuArray @test tsg.snapshots[end].ndata.x isa CuArray @test tsg.tgdata.x isa CuArray diff --git a/GNNGraphs/test/test_utils.jl b/GNNGraphs/test/test_utils.jl new file mode 100644 index 000000000..fe8f9a997 --- /dev/null +++ b/GNNGraphs/test/test_utils.jl @@ -0,0 +1,228 @@ +using ChainRulesTestUtils, FiniteDifferences, Zygote, Adapt, CUDA +CUDA.allowscalar(false) + +function ngradient(f, x...) + fdm = central_fdm(5, 1) + return FiniteDifferences.grad(fdm, f, x...) +end + +const rule_config = Zygote.ZygoteRuleConfig() + +# Using this until https://github.com/JuliaDiff/FiniteDifferences.jl/issues/188 is fixed +function FiniteDifferences.to_vec(x::Integer) + Integer_from_vec(v) = x + return Int[x], Integer_from_vec +end + +# Test that forward pass on cpu and gpu are the same. +# Tests also gradient on cpu and gpu comparing with +# finite difference methods. +# Test gradients with respects to layer weights and to input. +# If `g` has edge features, it is assumed that the layer can +# use them in the forward pass as `l(g, x, e)`. +# Test also gradient with respect to `e`. +function test_layer(l, g::GNNGraph; atol = 1e-5, rtol = 1e-5, + exclude_grad_fields = [], + verbose = false, + test_gpu = TEST_GPU, + outsize = nothing, + outtype = :node) + + # TODO these give errors, probably some bugs in ChainRulesTestUtils + # test_rrule(rule_config, x -> l(g, x), x; rrule_f=rrule_via_ad, check_inferred=false) + # test_rrule(rule_config, l -> l(g, x), l; rrule_f=rrule_via_ad, check_inferred=false) + + isnothing(node_features(g)) && error("Plese add node data to the input graph") + fdm = central_fdm(5, 1) + + x = node_features(g) + e = edge_features(g) + use_edge_feat = !isnothing(e) + + x64, e64, l64, g64 = to64.([x, e, l, g]) # needed for accurate FiniteDifferences' grad + xgpu, egpu, lgpu, ggpu = gpu.([x, e, l, g]) + + f(l, g::GNNGraph) = l(g) + f(l, g::GNNGraph, x, e) = use_edge_feat ? l(g, x, e) : l(g, x) + + loss(l, g::GNNGraph) = + if outtype == :node + sum(node_features(f(l, g))) + elseif outtype == :edge + sum(edge_features(f(l, g))) + elseif outtype == :graph + sum(graph_features(f(l, g))) + elseif outtype == :node_edge + gnew = f(l, g) + sum(node_features(gnew)) + sum(edge_features(gnew)) + end + + function loss(l, g::GNNGraph, x, e) + y = f(l, g, x, e) + if outtype == :node_edge + return sum(y[1]) + sum(y[2]) + else + return sum(y) + end + end + + # TEST OUTPUT + y = f(l, g, x, e) + if outtype == :node_edge + @assert y isa Tuple + @test eltype(y[1]) == eltype(x) + @test eltype(y[2]) == eltype(e) + @test all(isfinite, y[1]) + @test all(isfinite, y[2]) + if !isnothing(outsize) + @test size(y[1]) == outsize[1] + @test size(y[2]) == outsize[2] + end + else + @test eltype(y) == eltype(x) + @test all(isfinite, y) + if !isnothing(outsize) + @test size(y) == outsize + end + end + + # test same output on different graph formats + gcoo = GNNGraph(g, graph_type = :coo) + ycoo = f(l, gcoo, x, e) + if outtype == :node_edge + @test ycoo[1] ≈ y[1] + @test ycoo[2] ≈ y[2] + else + @test ycoo ≈ y + end + + g′ = f(l, g) + if outtype == :node + @test g′.ndata.x ≈ y + elseif outtype == :edge + @test g′.edata.e ≈ y + elseif outtype == :graph + @test g′.gdata.u ≈ y + elseif outtype == :node_edge + @test g′.ndata.x ≈ y[1] + @test g′.edata.e ≈ y[2] + else + @error "wrong outtype $outtype" + end + if test_gpu + ygpu = f(lgpu, ggpu, xgpu, egpu) + if outtype == :node_edge + @test ygpu[1] isa CuArray + @test eltype(ygpu[1]) == eltype(xgpu) + @test Array(ygpu[1]) ≈ y[1] + @test ygpu[2] isa CuArray + @test eltype(ygpu[2]) == eltype(xgpu) + @test Array(ygpu[2]) ≈ y[2] + else + @test ygpu isa CuArray + @test eltype(ygpu) == eltype(xgpu) + @test Array(ygpu) ≈ y + end + end + + # TEST x INPUT GRADIENT + x̄ = gradient(x -> loss(l, g, x, e), x)[1] + x̄_fd = FiniteDifferences.grad(fdm, x64 -> loss(l64, g64, x64, e64), x64)[1] + @test eltype(x̄) == eltype(x) + @test x̄≈x̄_fd atol=atol rtol=rtol + + if test_gpu + x̄gpu = gradient(xgpu -> loss(lgpu, ggpu, xgpu, egpu), xgpu)[1] + @test x̄gpu isa CuArray + @test eltype(x̄gpu) == eltype(x) + @test Array(x̄gpu)≈x̄ atol=atol rtol=rtol + end + + # TEST e INPUT GRADIENT + if e !== nothing + verbose && println("Test e gradient cpu") + ē = gradient(e -> loss(l, g, x, e), e)[1] + ē_fd = FiniteDifferences.grad(fdm, e64 -> loss(l64, g64, x64, e64), e64)[1] + @test eltype(ē) == eltype(e) + @test ē≈ē_fd atol=atol rtol=rtol + + if test_gpu + verbose && println("Test e gradient gpu") + ēgpu = gradient(egpu -> loss(lgpu, ggpu, xgpu, egpu), egpu)[1] + @test ēgpu isa CuArray + @test eltype(ēgpu) == eltype(ē) + @test Array(ēgpu)≈ē atol=atol rtol=rtol + end + end + + # TEST LAYER GRADIENT - l(g, x, e) + l̄ = gradient(l -> loss(l, g, x, e), l)[1] + l̄_fd = FiniteDifferences.grad(fdm, l64 -> loss(l64, g64, x64, e64), l64)[1] + test_approx_structs(l, l̄, l̄_fd; atol, rtol, exclude_grad_fields, verbose) + + if test_gpu + l̄gpu = gradient(lgpu -> loss(lgpu, ggpu, xgpu, egpu), lgpu)[1] + test_approx_structs(lgpu, l̄gpu, l̄; atol, rtol, exclude_grad_fields, verbose) + end + + # TEST LAYER GRADIENT - l(g) + l̄ = gradient(l -> loss(l, g), l)[1] + test_approx_structs(l, l̄, l̄_fd; atol, rtol, exclude_grad_fields, verbose) + + return true +end + +function test_approx_structs(l, l̄, l̄fd; atol = 1e-5, rtol = 1e-5, + exclude_grad_fields = [], + verbose = false) + l̄ = l̄ isa Base.RefValue ? l̄[] : l̄ # Zygote wraps gradient of mutables in RefValue + l̄fd = l̄fd isa Base.RefValue ? l̄fd[] : l̄fd # Zygote wraps gradient of mutables in RefValue + + for f in fieldnames(typeof(l)) + f ∈ exclude_grad_fields && continue + verbose && println("Test gradient of field $f...") + x, g, gfd = getfield(l, f), getfield(l̄, f), getfield(l̄fd, f) + test_approx_structs(x, g, gfd; atol, rtol, exclude_grad_fields, verbose) + verbose && println("... field $f done!") + end + return true +end + +function test_approx_structs(x, g::Nothing, gfd; atol, rtol, kws...) + # finite diff gradients has to be zero if present + @test !(gfd isa AbstractArray) || isapprox(gfd, fill!(similar(gfd), 0); atol, rtol) +end + +function test_approx_structs(x::Union{AbstractArray, Number}, + g::Union{AbstractArray, Number}, gfd; atol, rtol, kws...) + @test eltype(g) == eltype(x) + if x isa CuArray + @test g isa CuArray + g = Array(g) + end + @test g≈gfd atol=atol rtol=rtol +end + +""" + to32(m) + +Convert the `eltype` of model's float parameters to `Float32`. +Preserves integer arrays. +""" +to32(m) = _paramtype(Float32, m) + +""" + to64(m) + +Convert the `eltype` of model's float parameters to `Float64`. +Preserves integer arrays. +""" +to64(m) = _paramtype(Float64, m) + +struct GNNEltypeAdaptor{T} end + +Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:AbstractFloat}) where T = convert(AbstractArray{T}, x) +Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:Integer}) where T = x +Adapt.adapt_storage(::GNNEltypeAdaptor{T}, x::AbstractArray{<:Number}) where T = convert(AbstractArray{T}, x) + +_paramtype(::Type{T}, m) where T = fmap(adapt(GNNEltypeAdaptor{T}()), m) diff --git a/test/GNNGraphs/transform.jl b/GNNGraphs/test/transform.jl similarity index 97% rename from test/GNNGraphs/transform.jl rename to GNNGraphs/test/transform.jl index af414bbd1..e3ea23e17 100644 --- a/test/GNNGraphs/transform.jl +++ b/GNNGraphs/test/transform.jl @@ -22,15 +22,15 @@ end g2 = GNNGraph(random_regular_graph(4, 2), ndata = rand(16, 4), graph_type = GRAPH_T) g3 = GNNGraph(random_regular_graph(7, 2), ndata = rand(16, 7), graph_type = GRAPH_T) - g12 = Flux.batch([g1, g2]) + g12 = MLUtils.batch([g1, g2]) g12b = blockdiag(g1, g2) @test g12 == g12b - g123 = Flux.batch([g1, g2, g3]) + g123 = MLUtils.batch([g1, g2, g3]) @test g123.graph_indicator == [fill(1, 10); fill(2, 4); fill(3, 7)] # Allow wider eltype - g123 = Flux.batch(GNNGraph[g1, g2, g3]) + g123 = MLUtils.batch(GNNGraph[g1, g2, g3]) @test g123.graph_indicator == [fill(1, 10); fill(2, 4); fill(3, 7)] @@ -43,11 +43,11 @@ end g1 = GNNGraph(g1, gdata = rand()) g2 = GNNGraph(g2, gdata = rand()) g3 = GNNGraph(g3, gdata = rand()) - g123 = Flux.batch([g1, g2, g3]) + g123 = MLUtils.batch([g1, g2, g3]) @test g123.gdata.u == [g1.gdata.u, g2.gdata.u, g3.gdata.u] # Batch of batches - g123123 = Flux.batch([g123, g123]) + g123123 = MLUtils.batch([g123, g123]) @test g123123.graph_indicator == [fill(1, 10); fill(2, 4); fill(3, 7); fill(4, 10); fill(5, 4); fill(6, 7)] @test g123123.num_graphs == 6 @@ -56,8 +56,8 @@ end @testset "unbatch" begin g1 = rand_graph(10, 20, graph_type = GRAPH_T) g2 = rand_graph(5, 10, graph_type = GRAPH_T) - g12 = Flux.batch([g1, g2]) - gs = Flux.unbatch([g1, g2]) + g12 = MLUtils.batch([g1, g2]) + gs = MLUtils.unbatch([g1, g2]) @test length(gs) == 2 @test gs[1].num_nodes == 10 @test gs[1].num_edges == 20 @@ -74,8 +74,8 @@ end gs = [rand_graph(n, c * n, ndata = rand(2, n), edata = rand(3, c * n), graph_type = GRAPH_T) for _ in 1:ngraphs] - gall = Flux.batch(gs) - gs2 = Flux.unbatch(gall) + gall = MLUtils.batch(gs) + gs2 = MLUtils.unbatch(gall) @test gs2[1] == gs[1] @test gs2[end] == gs[end] end @@ -85,7 +85,7 @@ end graph_type = GRAPH_T) g2 = GNNGraph(random_regular_graph(4, 2), ndata = rand(16, 4), graph_type = GRAPH_T) g3 = GNNGraph(random_regular_graph(7, 2), ndata = rand(16, 7), graph_type = GRAPH_T) - g = Flux.batch([g1, g2, g3]) + g = MLUtils.batch([g1, g2, g3]) g2b, nodemap = getgraph(g, 2, nmap = true) s, t = edge_index(g2b) @@ -427,7 +427,7 @@ end @testset "HeteroGraphs" begin @testset "batch" begin gs = [rand_bipartite_heterograph((10, 15), 20) for _ in 1:5] - g = Flux.batch(gs) + g = MLUtils.batch(gs) @test g.num_nodes[:A] == 50 @test g.num_nodes[:B] == 75 @test g.num_edges[(:A,:to,:B)] == 100 @@ -442,14 +442,14 @@ end gi.edata[(:A,:to,:B)].e = fill(2, 20) gi.gdata.u = 7 end - g = Flux.batch(gs) + g = MLUtils.batch(gs) @test g.ndata[:A].x == ones(2, 50) @test g.ndata[:A].y == zeros(50) @test g.edata[(:A,:to,:B)].e == fill(2, 100) @test g.gdata.u == fill(7, 5) # Allow for wider eltype - g = Flux.batch(GNNHeteroGraph[g for g in gs]) + g = MLUtils.batch(GNNHeteroGraph[g for g in gs]) @test g.ndata[:A].x == ones(2, 50) @test g.ndata[:A].y == zeros(50) @test g.edata[(:A,:to,:B)].e == fill(2, 100) @@ -463,7 +463,7 @@ end rand_heterograph((:A => 10, :B => 10, :C => 10), ((:A, :to1, :C) => 5, (:A, :to1, :B) => 5)), rand_heterograph((:C => 20), ((:C, :to3, :C) => 10)) ] - g = Flux.batch(gs) + g = MLUtils.batch(gs) @test g.num_nodes[:A] == 10 + 10 + 10 @test g.num_nodes[:B] == 14 + 15 + 15 + 10 @@ -501,7 +501,7 @@ end gi.gdata.u = 7 end - g = Flux.batch(gs) + g = MLUtils.batch(gs) @test g.ndata[:A].x == reduce(hcat, fill(0, 10 + 10 + 10)) @test g.ndata[:A].y == ones(2, 10 + 10 + 10) @@ -513,7 +513,7 @@ end @test g.gdata.u == fill(7, 5) # Allow for wider eltype - g = Flux.batch(GNNHeteroGraph[g for g in gs]) + g = MLUtils.batch(GNNHeteroGraph[g for g in gs]) @test g.ndata[:A].x == reduce(hcat, fill(0, 10 + 10 + 10)) @test g.ndata[:A].y == ones(2, 10 + 10 + 10) @test g.ndata[:B].x == ones(3, 14 + 15 + 15 + 10) diff --git a/test/GNNGraphs/utils.jl b/GNNGraphs/test/utils.jl similarity index 100% rename from test/GNNGraphs/utils.jl rename to GNNGraphs/test/utils.jl diff --git a/GNNlib/Project.toml b/GNNlib/Project.toml index e8195c7df..0607cb813 100644 --- a/GNNlib/Project.toml +++ b/GNNlib/Project.toml @@ -23,11 +23,9 @@ StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" [weakdeps] CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -SimpleWeightedGraphs = "47aef6b3-ad0c-573a-a1e2-d07658019622" [extensions] GNNlibCUDAExt = "CUDA" -GNNlibSimpleWeightedGraphsExt = "SimpleWeightedGraphs" [compat] Adapt = "3, 4" @@ -45,7 +43,6 @@ NNlib = "0.9" NearestNeighbors = "0.4" Random = "1" Reexport = "1" -SimpleWeightedGraphs = "1.4.0" SparseArrays = "1" Statistics = "1" StatsBase = "0.34" diff --git a/GNNlib/ext/GNNlibCUDAExt/GNNlibCUDAExt.jl b/GNNlib/ext/GNNlibCUDAExt/GNNlibCUDAExt.jl index bd3c919ae..600449422 100644 --- a/GNNlib/ext/GNNlibCUDAExt/GNNlibCUDAExt.jl +++ b/GNNlib/ext/GNNlibCUDAExt/GNNlibCUDAExt.jl @@ -2,16 +2,10 @@ module GNNlibCUDAExt using CUDA using Random, Statistics, LinearAlgebra -using GNNlib -using GNNlib.GNNGraphs -using GNNlib.GNNGraphs: COO_T, ADJMAT_T, SPARSE_T import GNNlib: propagate const CUMAT_T = Union{CUDA.AnyCuMatrix, CUDA.CUSPARSE.CuSparseMatrix} -include("GNNGraphs/query.jl") -include("GNNGraphs/transform.jl") -include("GNNGraphs/utils.jl") include("msgpass.jl") end #module diff --git a/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/query.jl b/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/query.jl deleted file mode 100644 index 0e74f725e..000000000 --- a/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/query.jl +++ /dev/null @@ -1,2 +0,0 @@ - -GNNGraphs._rand_dense_vector(A::CUMAT_T) = CUDA.randn(size(A, 1)) diff --git a/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/transform.jl b/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/transform.jl deleted file mode 100644 index d2ee417fc..000000000 --- a/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/transform.jl +++ /dev/null @@ -1,2 +0,0 @@ - -GNNGraphs.dense_zeros_like(a::CUMAT_T, T::Type, sz = size(a)) = CUDA.zeros(T, sz) diff --git a/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/utils.jl b/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/utils.jl deleted file mode 100644 index c3d78e9c1..000000000 --- a/ext/GraphNeuralNetworksCUDAExt/GNNGraphs/utils.jl +++ /dev/null @@ -1,8 +0,0 @@ - -GNNGraphs.iscuarray(x::AnyCuArray) = true - - -function sort_edge_index(u::AnyCuArray, v::AnyCuArray) - #TODO proper cuda friendly implementation - sort_edge_index(u |> Flux.cpu, v |> Flux.cpu) |> Flux.gpu -end \ No newline at end of file diff --git a/ext/GraphNeuralNetworksCUDAExt/GraphNeuralNetworksCUDAExt.jl b/ext/GraphNeuralNetworksCUDAExt/GraphNeuralNetworksCUDAExt.jl index 5e0132890..538083043 100644 --- a/ext/GraphNeuralNetworksCUDAExt/GraphNeuralNetworksCUDAExt.jl +++ b/ext/GraphNeuralNetworksCUDAExt/GraphNeuralNetworksCUDAExt.jl @@ -9,9 +9,6 @@ import GraphNeuralNetworks: propagate const CUMAT_T = Union{CUDA.AnyCuMatrix, CUDA.CUSPARSE.CuSparseMatrix} -include("GNNGraphs/query.jl") -include("GNNGraphs/transform.jl") -include("GNNGraphs/utils.jl") include("msgpass.jl") end #module diff --git a/ext/GraphNeuralNetworksSimpleWeightedGraphsExt/GraphNeuralNetworksSimpleWeightedGraphsExt.jl b/ext/GraphNeuralNetworksSimpleWeightedGraphsExt/GraphNeuralNetworksSimpleWeightedGraphsExt.jl deleted file mode 100644 index aabc13443..000000000 --- a/ext/GraphNeuralNetworksSimpleWeightedGraphsExt/GraphNeuralNetworksSimpleWeightedGraphsExt.jl +++ /dev/null @@ -1,12 +0,0 @@ -module GraphNeuralNetworksSimpleWeightedGraphsExt - -using GraphNeuralNetworks -using Graphs -using SimpleWeightedGraphs - -function GraphNeuralNetworks.GNNGraph(g::T; kws...) where - {T <: Union{SimpleWeightedGraph, SimpleWeightedDiGraph}} - return GNNGraph(g.weights, kws...) -end - -end #module \ No newline at end of file diff --git a/src/GNNGraphs/gnngraph.jl b/src/GNNGraphs/gnngraph.jl index e9b7e2414..44ebe59a7 100644 --- a/src/GNNGraphs/gnngraph.jl +++ b/src/GNNGraphs/gnngraph.jl @@ -27,7 +27,7 @@ as well, unless explicitely set by the keyword arguments `ndata`, `edata`, and `gdata`. A `GNNGraph` can also represent multiple graphs batched togheter -(see [`Flux.batch`](@ref) or [`SparseArrays.blockdiag`](@ref)). +(see [`MLUtils.batch`](@ref) or [`SparseArrays.blockdiag`](@ref)). The field `g.graph_indicator` contains the graph membership of each node. diff --git a/test/runtests.jl b/test/runtests.jl index 85e26ac38..cc7a93d0f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -26,17 +26,6 @@ ENV["DATADEPS_ALWAYS_ACCEPT"] = true # for MLDatasets include("test_utils.jl") tests = [ - "GNNGraphs/chainrules", - "GNNGraphs/datastore", - "GNNGraphs/gnngraph", - "GNNGraphs/convert", - "GNNGraphs/transform", - "GNNGraphs/operators", - "GNNGraphs/generate", - "GNNGraphs/query", - "GNNGraphs/sampling", - "GNNGraphs/gnnheterograph", - "GNNGraphs/temporalsnapshotsgnngraph", "utils", "msgpass", "layers/basic", @@ -47,7 +36,6 @@ tests = [ "mldatasets", "examples/node_classification_cora", "deprecations", - "ext/GraphNeuralNetworksSimpleWeightedGraphsExt/GraphNeuralNetworksSimpleWeightedGraphsExt" ] !CUDA.functional() && @warn("CUDA unavailable, not testing GPU support")