Skip to content

Commit

Permalink
Merge pull request #274 from pyt-team/hypergraph_models_input_logic
Browse files Browse the repository at this point in the history
Update the Input Logic of Hypergraph Models
  • Loading branch information
ninamiolane authored Apr 12, 2024
2 parents c349941 + 74b77e4 commit 2267768
Show file tree
Hide file tree
Showing 22 changed files with 336 additions and 61 deletions.
18 changes: 11 additions & 7 deletions topomodelx/nn/hypergraph/allset.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,20 @@ class AllSet(torch.nn.Module):
Dimension of the input features.
hidden_channels : int
Dimension of the hidden features.
n_layers : int, default: 2
n_layers : int, default = 2
Number of AllSet layers in the network.
layer_dropout: float, default: 0.2
layer_dropout : float, default = 0.2
Dropout probability for the AllSet layer.
mlp_num_layers : int, default: 2
mlp_num_layers : int, default = 2
Number of layers in the MLP.
mlp_dropout : float, default: 0.0
Dropout probability for the MLP.
mlp_activation : torch.nn.Module, default: None
mlp_activation : torch.nn.Module, default = None
Activation function in the MLP.
mlp_norm : bool, default: False
mlp_dropout : float, default = 0.0
Dropout probability for the MLP.
mlp_norm : bool, default = False
Whether to apply input normalization in the MLP.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -47,6 +49,7 @@ def __init__(
mlp_activation=None,
mlp_dropout=0.0,
mlp_norm=None,
**kwargs,
):
super().__init__()

Expand All @@ -59,6 +62,7 @@ def __init__(
mlp_activation=mlp_activation,
mlp_dropout=mlp_dropout,
mlp_norm=mlp_norm,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
18 changes: 13 additions & 5 deletions topomodelx/nn/hypergraph/allset_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ class AllSetLayer(nn.Module):
Dropout probability in the MLP.
mlp_norm : str or None, optional
Type of layer normalization in the MLP.
**kwargs : optional
Additional arguments for the layer modules.
References
----------
Expand All @@ -45,6 +47,7 @@ def __init__(
mlp_activation=nn.ReLU,
mlp_dropout: float = 0.0,
mlp_norm=None,
**kwargs,
) -> None:
super().__init__()

Expand All @@ -60,6 +63,7 @@ def __init__(
mlp_activation=mlp_activation,
mlp_dropout=mlp_dropout,
mlp_norm=mlp_norm,
**kwargs,
)

self.edge2vertex = AllSetBlock(
Expand All @@ -70,6 +74,7 @@ def __init__(
mlp_activation=mlp_activation,
mlp_dropout=mlp_dropout,
mlp_norm=mlp_norm,
**kwargs,
)

def reset_parameters(self) -> None:
Expand Down Expand Up @@ -103,7 +108,7 @@ def forward(self, x_0, incidence_1):
Parameters
----------
x : torch.Tensor, shape = (n_nodes, channels)
x_0 : torch.Tensor, shape = (n_nodes, channels)
Node input features.
incidence_1 : torch.sparse, shape = (n_nodes, n_hyperedges)
Incidence matrix :math:`B_1` mapping hyperedges to nodes.
Expand Down Expand Up @@ -200,6 +205,8 @@ class AllSetBlock(nn.Module):
Dropout probability in the MLP.
mlp_norm : callable or None, optional
Type of layer normalization in the MLP.
**kwargs : optional
Additional arguments for the block modules.
"""

encoder: MLP | nn.Identity
Expand All @@ -214,6 +221,7 @@ def __init__(
mlp_activation=nn.ReLU,
mlp_dropout: float = 0.0,
mlp_norm=None,
**kwargs,
) -> None:
super().__init__()

Expand Down Expand Up @@ -255,23 +263,23 @@ def reset_parameters(self) -> None:
self.decoder.reset_parameters()
self.conv.reset_parameters()

def forward(self, x, incidence):
def forward(self, x_0, incidence_1):
"""
Forward computation.
Parameters
----------
x_0 : torch.Tensor
Input node features.
incidence : torch.sparse
incidence_1 : torch.sparse
Incidence matrix between node/hyperedges.
Returns
-------
torch.Tensor
Output features.
"""
x = F.relu(self.encoder(x))
x = F.relu(self.encoder(x_0))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv(x, incidence)
x = self.conv(x, incidence_1)
return F.relu(self.decoder(x))
4 changes: 4 additions & 0 deletions topomodelx/nn/hypergraph/allset_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ class AllSetTransformer(torch.nn.Module):
Number of layers in the MLP.
mlp_dropout : float, default: 0.2
Dropout probability in the MLP.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -44,6 +46,7 @@ def __init__(
dropout=0.2,
mlp_num_layers=2,
mlp_dropout=0.2,
**kwargs,
):
super().__init__()

Expand All @@ -55,6 +58,7 @@ def __init__(
heads=heads,
mlp_num_layers=mlp_num_layers,
mlp_dropout=mlp_dropout,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
3 changes: 3 additions & 0 deletions topomodelx/nn/hypergraph/allset_transformer_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ class AllSetTransformerLayer(nn.Module):
Dropout probability in the MLP.
mlp_norm : str or None, optional
Type of layer normalization in the MLP.
**kwargs : optional
Additional arguments for the layer modules.
References
----------
Expand All @@ -52,6 +54,7 @@ def __init__(
mlp_activation=nn.ReLU,
mlp_dropout: float = 0.0,
mlp_norm=None,
**kwargs,
) -> None:
super().__init__()

Expand Down
6 changes: 5 additions & 1 deletion topomodelx/nn/hypergraph/dhgcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,10 @@ class DHGCN(torch.nn.Module):
Dimension of the input features.
hidden_channels : int
Dimension of the hidden features.
n_layer : int, default = 2
n_layers : int, default = 2
Amount of message passing layers.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -31,6 +33,7 @@ def __init__(
in_channels,
hidden_channels,
n_layers=1,
**kwargs,
):
super().__init__()

Expand All @@ -39,6 +42,7 @@ def __init__(
in_channels=in_channels if i == 0 else hidden_channels,
intermediate_channels=hidden_channels,
out_channels=hidden_channels,
**kwargs,
)
for i in range(n_layers)
)
Expand Down
27 changes: 18 additions & 9 deletions topomodelx/nn/hypergraph/dhgcn_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@ class DHGCNLayer(torch.nn.Module):
Dimension of intermediate features.
out_channels : int
Dimension of output features.
k_neighbours : int, default=3
Number of neighbours to consider in the local topology.
k_centroids : int, default=4
Number of centroids to consider in the global topology.
device : str, default="cpu"
Device to store the tensors.
**kwargs : optional
Additional arguments for the layer modules.
References
----------
Expand All @@ -40,6 +48,7 @@ def __init__(
k_neighbours: int = 3,
k_centroids: int = 4,
device: str = "cpu",
**kwargs,
) -> None:
super().__init__()

Expand Down Expand Up @@ -69,7 +78,7 @@ def kmeans_graph(x, k, flow: str = "source_to_target"):
x : torch.Tensor, shape = (n_nodes, node_features)
Input features on the nodes of the simplicial complex.
k : int
Number of clusters/centroids
Number of clusters/centroids.
flow : str
If this parameter has value "source_to_target", the output will have the shape
[n_nodes, n_hyperedges = k_centroids].
Expand All @@ -80,11 +89,9 @@ def kmeans_graph(x, k, flow: str = "source_to_target"):
Returns
-------
hyperedge_index : torch.Tensor, shape = (n_nodes, 2)
Indices of the on-zero values in the feature matrix of hypergraph
convolutional network.
The order of dimensions of the matrix is defined by the value of the flow
parameter.
torch.Tensor
Indices of the on-zero values in the feature matrix of hypergraph convolutional network.
The order of dimensions of the matrix is defined by the value of the flow parameter.
"""
assert flow in ["source_to_target", "target_to_source"]
device = x.device
Expand Down Expand Up @@ -142,10 +149,12 @@ def kmeans(self, x_0, k=None):
----------
x_0 : torch.Tensor, shape = (n_nodes, node_features)
Input features on the nodes of the simplicial complex.
k : int
Number of clusters/centroids.
Returns
-------
hyperedge_index : torch.Tensor, shape = (n_nodes, 2)
torch.Tensor
Indices of the on-zero values in the feature matrix of hypergraph convolutional network.
"""
if k is None:
Expand All @@ -166,8 +175,8 @@ def get_dynamic_topology(self, x_0_features):
Returns
-------
hyperedge_incidence_matrix : torch.Tensor, shape = (n_nodes, n_nodes + k_centroids)
Incidence matrix mapping edges to nodes.
torch.Tensor
Incidence matrix mapping edges to nodes, shape = (n_nodes, n_nodes + k_centroids).
"""
device = x_0_features.device
n_nodes = x_0_features.size(0)
Expand Down
12 changes: 7 additions & 5 deletions topomodelx/nn/hypergraph/hmpnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,18 +12,18 @@ class HMPNN(torch.nn.Module):
Parameters
----------
in_channels : int
Dimension of input features
Dimension of input features.
hidden_channels : Tuple[int]
A tuple of hidden feature dimensions to gradually reduce node/hyperedge representations feature
dimension from in_features to the last item in the tuple.
num_classes: int
Number of classes
n_layers : int, default = 2
Number of HMPNNLayer layers.
adjacency_dropout_rate: int, default = 0.7
adjacency_dropout_rate : int, default = 0.7
Adjacency dropout rate.
regular_dropout_rate : int, default = 0.5
Regular dropout rate applied on features.
**kwargs : optional
Additional arguments for the inner layers.
References
----------
Expand All @@ -40,6 +40,7 @@ def __init__(
n_layers=2,
adjacency_dropout_rate=0.7,
regular_dropout_rate=0.5,
**kwargs,
):
super().__init__()

Expand All @@ -52,6 +53,7 @@ def __init__(
hidden_channels,
adjacency_dropout=adjacency_dropout_rate,
updating_dropout=regular_dropout_rate,
**kwargs,
)
for _ in range(n_layers)
]
Expand All @@ -66,7 +68,7 @@ def forward(self, x_0, x_1, incidence_1):
Node features.
x_1 : torch.Tensor, shape = (n_hyperedges, in_features)
Hyperedge features.
incidence_1: torch.sparse.Tensor, shape = (n_nodes, n_hyperedges)
incidence_1 : torch.sparse.Tensor, shape = (n_nodes, n_hyperedges)
Incidence matrix (B1).
Returns
Expand Down
Loading

0 comments on commit 2267768

Please sign in to comment.