From b935c8880138dfdbee0549fc40010d8a7ff72fb9 Mon Sep 17 00:00:00 2001 From: Massimiliano Lupo Pasini Date: Wed, 27 Nov 2024 12:26:32 -0500 Subject: [PATCH] edge_dim added as argument to GATv2Conv (#310) * edge_dim added as argument to GATv2Conv * GAT added in edge_models * added model_type as optional imput argument to qm9 and md17 examples * edge_dim passed into GATv2Conv stack inside create method * architectural arguments added to vectoroutput CI test * qnum_samples variable moved outside main function scope in qm9 example * SAGE, GIN, anFC removed from examples where there are edge features * Correct management of node degree for on GPUs * split examples test based on whether thee model needs to use data.pos or not * model_type overwrite in config moved to right location in the code * comment for allowed stacks in LJ force_grad * Add MACE to test * black formatting --------- Co-authored-by: Rylie Weaver --- examples/md17/md17.json | 12 ++ examples/md17/md17.py | 182 ++++++++++-------- examples/qm9/qm9.json | 12 ++ examples/qm9/qm9.py | 172 +++++++++-------- hydragnn/models/Base.py | 10 +- hydragnn/models/GATStack.py | 3 + hydragnn/models/PNAEqStack.py | 15 +- hydragnn/models/create.py | 1 + .../input_config_parsing/config_utils.py | 3 +- hydragnn/utils/model/model.py | 20 +- tests/inputs/ci_vectoroutput.json | 8 + tests/test_examples.py | 52 ++++- tests/test_graphs.py | 15 +- 13 files changed, 316 insertions(+), 189 deletions(-) diff --git a/examples/md17/md17.json b/examples/md17/md17.json index ca5bff255..62537c134 100644 --- a/examples/md17/md17.json +++ b/examples/md17/md17.json @@ -11,6 +11,18 @@ "periodic_boundary_conditions": false, "hidden_dim": 5, "num_conv_layers": 6, + "int_emb_size": 32, + "out_emb_size": 16, + "basis_emb_size": 8, + "num_gaussians": 10, + "num_filters": 8, + "num_before_skip": 1, + "num_after_skip": 1, + "envelope_exponent": 5, + "max_ell": 1, + "node_max_ell": 1, + "num_radial": 5, + "num_spherical": 2, "output_heads": { "graph":{ "num_sharedlayers": 2, diff --git a/examples/md17/md17.py b/examples/md17/md17.py index 83695df9c..3705528d1 100644 --- a/examples/md17/md17.py +++ b/examples/md17/md17.py @@ -1,23 +1,20 @@ -import os, json - +import os +import json import torch - -# FIX random seed -random_state = 0 -torch.manual_seed(random_state) - import torch_geometric +import argparse # deprecated in torch_geometric 2.0 try: from torch_geometric.loader import DataLoader -except: +except ImportError: from torch_geometric.data import DataLoader import hydragnn + # Update each sample prior to loading. -def md17_pre_transform(data): +def md17_pre_transform(data, compute_edges): # Set descriptor as element type. data.x = data.z.float().view(-1, 1) # Only predict energy (index 0 of 2 properties) for this run. @@ -33,78 +30,95 @@ def md17_pre_filter(data): return torch.rand(1) < 0.25 -# Set this path for output. -try: - os.environ["SERIALIZED_DATA_PATH"] -except: - os.environ["SERIALIZED_DATA_PATH"] = os.getcwd() - -# Configurable run choices (JSON file that accompanies this example script). -filename = os.path.join(os.path.dirname(__file__), "md17.json") -with open(filename, "r") as f: - config = json.load(f) -verbosity = config["Verbosity"]["level"] -arch_config = config["NeuralNetwork"]["Architecture"] -var_config = config["NeuralNetwork"]["Variables_of_interest"] - -# Always initialize for multi-rank training. -world_size, world_rank = hydragnn.utils.distributed.setup_ddp() - -log_name = "md17_test" -# Enable print to log file. -hydragnn.utils.print.print_utils.setup_log(log_name) - -# Use built-in torch_geometric datasets. -# Filter function above used to run quick example. -# NOTE: data is moved to the device in the pre-transform. -# NOTE: transforms/filters will NOT be re-run unless the qm9/processed/ directory is removed. -compute_edges = hydragnn.preprocess.get_radius_graph_config(arch_config) - -# Fix for MD17 datasets -torch_geometric.datasets.MD17.file_names["uracil"] = "md17_uracil.npz" - -dataset = torch_geometric.datasets.MD17( - root="dataset/md17", - name="uracil", - pre_transform=md17_pre_transform, - pre_filter=md17_pre_filter, -) -train, val, test = hydragnn.preprocess.split_dataset( - dataset, config["NeuralNetwork"]["Training"]["perc_train"], False -) -(train_loader, val_loader, test_loader,) = hydragnn.preprocess.create_dataloaders( - train, val, test, config["NeuralNetwork"]["Training"]["batch_size"] -) - -config = hydragnn.utils.input_config_parsing.update_config( - config, train_loader, val_loader, test_loader -) - -model = hydragnn.models.create_model_config( - config=config["NeuralNetwork"], - verbosity=verbosity, -) -model = hydragnn.utils.distributed.get_distributed_model(model, verbosity) - -learning_rate = config["NeuralNetwork"]["Training"]["Optimizer"]["learning_rate"] -optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) -scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001 -) - -# Run training with the given model and md17 dataset. -writer = hydragnn.utils.model.model.get_summary_writer(log_name) -hydragnn.utils.input_config_parsing.save_config(config, log_name) - -hydragnn.train.train_validate_test( - model, - optimizer, - train_loader, - val_loader, - test_loader, - writer, - scheduler, - config["NeuralNetwork"], - log_name, - verbosity, -) +def main(model_type=None): + # FIX random seed + random_state = 0 + torch.manual_seed(random_state) + + # Set this path for output. + os.environ.setdefault("SERIALIZED_DATA_PATH", os.getcwd()) + + # Configurable run choices (JSON file that accompanies this example script). + filename = os.path.join(os.path.dirname(__file__), "md17.json") + with open(filename, "r") as f: + config = json.load(f) + + verbosity = config["Verbosity"]["level"] + arch_config = config["NeuralNetwork"]["Architecture"] + + # If a model type is provided, update the configuration + if model_type: + config["NeuralNetwork"]["Architecture"]["model_type"] = model_type + + # Always initialize for multi-rank training. + world_size, world_rank = hydragnn.utils.distributed.setup_ddp() + + log_name = f"md17_test_{model_type}" if model_type else "md17_test" + # Enable print to log file. + hydragnn.utils.print.print_utils.setup_log(log_name) + + # Preprocess configurations for edge computation + compute_edges = hydragnn.preprocess.get_radius_graph_config(arch_config) + + # Fix for MD17 datasets + torch_geometric.datasets.MD17.file_names["uracil"] = "md17_uracil.npz" + + dataset = torch_geometric.datasets.MD17( + root="dataset/md17", + name="uracil", + pre_transform=lambda data: md17_pre_transform(data, compute_edges), + pre_filter=md17_pre_filter, + ) + train, val, test = hydragnn.preprocess.split_dataset( + dataset, config["NeuralNetwork"]["Training"]["perc_train"], False + ) + (train_loader, val_loader, test_loader,) = hydragnn.preprocess.create_dataloaders( + train, val, test, config["NeuralNetwork"]["Training"]["batch_size"] + ) + + config = hydragnn.utils.input_config_parsing.update_config( + config, train_loader, val_loader, test_loader + ) + + model = hydragnn.models.create_model_config( + config=config["NeuralNetwork"], + verbosity=verbosity, + ) + model = hydragnn.utils.distributed.get_distributed_model(model, verbosity) + + learning_rate = config["NeuralNetwork"]["Training"]["Optimizer"]["learning_rate"] + optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) + scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001 + ) + + # Run training with the given model and md17 dataset. + writer = hydragnn.utils.model.model.get_summary_writer(log_name) + hydragnn.utils.input_config_parsing.save_config(config, log_name) + + hydragnn.train.train_validate_test( + model, + optimizer, + train_loader, + val_loader, + test_loader, + writer, + scheduler, + config["NeuralNetwork"], + log_name, + verbosity, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Run MD17 example with an optional model type." + ) + parser.add_argument( + "--model_type", + type=str, + default=None, + help="Specify the model type for training (default: None).", + ) + args = parser.parse_args() + main(model_type=args.model_type) diff --git a/examples/qm9/qm9.json b/examples/qm9/qm9.json index 78b5c7c96..45b3b4522 100644 --- a/examples/qm9/qm9.json +++ b/examples/qm9/qm9.json @@ -11,6 +11,18 @@ "periodic_boundary_conditions": false, "hidden_dim": 5, "num_conv_layers": 6, + "int_emb_size": 32, + "out_emb_size": 16, + "basis_emb_size": 8, + "num_gaussians": 10, + "num_filters": 8, + "num_before_skip": 1, + "num_after_skip": 1, + "envelope_exponent": 5, + "max_ell": 1, + "node_max_ell": 1, + "num_radial": 5, + "num_spherical": 2, "output_heads": { "graph":{ "num_sharedlayers": 2, diff --git a/examples/qm9/qm9.py b/examples/qm9/qm9.py index 1be651f97..2e5892151 100644 --- a/examples/qm9/qm9.py +++ b/examples/qm9/qm9.py @@ -1,21 +1,19 @@ -import os, json - +import os +import json import torch - -# FIX random seed -random_state = 0 -torch.manual_seed(random_state) - import torch_geometric +import argparse # deprecated in torch_geometric 2.0 try: from torch_geometric.loader import DataLoader -except: +except ImportError: from torch_geometric.data import DataLoader import hydragnn +num_samples = 1000 + # Update each sample prior to loading. def qm9_pre_transform(data): # Set descriptor as element type. @@ -31,71 +29,93 @@ def qm9_pre_filter(data): return data.idx < num_samples -# Set this path for output. -try: - os.environ["SERIALIZED_DATA_PATH"] -except: - os.environ["SERIALIZED_DATA_PATH"] = os.getcwd() - -num_samples = 1000 - -# Configurable run choices (JSON file that accompanies this example script). -filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "qm9.json") -with open(filename, "r") as f: - config = json.load(f) -verbosity = config["Verbosity"]["level"] -var_config = config["NeuralNetwork"]["Variables_of_interest"] - -# Always initialize for multi-rank training. -world_size, world_rank = hydragnn.utils.distributed.setup_ddp() - -log_name = "qm9_test" -# Enable print to log file. -hydragnn.utils.print.print_utils.setup_log(log_name) - -# Use built-in torch_geometric datasets. -# Filter function above used to run quick example. -# NOTE: data is moved to the device in the pre-transform. -# NOTE: transforms/filters will NOT be re-run unless the qm9/processed/ directory is removed. -dataset = torch_geometric.datasets.QM9( - root="dataset/qm9", pre_transform=qm9_pre_transform, pre_filter=qm9_pre_filter -) -train, val, test = hydragnn.preprocess.split_dataset( - dataset, config["NeuralNetwork"]["Training"]["perc_train"], False -) -(train_loader, val_loader, test_loader,) = hydragnn.preprocess.create_dataloaders( - train, val, test, config["NeuralNetwork"]["Training"]["batch_size"] -) - -config = hydragnn.utils.input_config_parsing.update_config( - config, train_loader, val_loader, test_loader -) - -model = hydragnn.models.create_model_config( - config=config["NeuralNetwork"], - verbosity=verbosity, -) -model = hydragnn.utils.distributed.get_distributed_model(model, verbosity) - -learning_rate = config["NeuralNetwork"]["Training"]["Optimizer"]["learning_rate"] -optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) -scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001 -) - -# Run training with the given model and qm9 datasets. -writer = hydragnn.utils.model.model.get_summary_writer(log_name) -hydragnn.utils.input_config_parsing.save_config(config, log_name) - -hydragnn.train.train_validate_test( - model, - optimizer, - train_loader, - val_loader, - test_loader, - writer, - scheduler, - config["NeuralNetwork"], - log_name, - verbosity, -) +def main(model_type=None): + # FIX random seed + random_state = 0 + torch.manual_seed(random_state) + + # Set this path for output. + try: + os.environ["SERIALIZED_DATA_PATH"] + except KeyError: + os.environ["SERIALIZED_DATA_PATH"] = os.getcwd() + + # Configurable run choices (JSON file that accompanies this example script). + filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "qm9.json") + with open(filename, "r") as f: + config = json.load(f) + + # If a model type is provided, update the configuration accordingly. + if model_type: + config["NeuralNetwork"]["Architecture"]["model_type"] = model_type + + verbosity = config["Verbosity"]["level"] + var_config = config["NeuralNetwork"]["Variables_of_interest"] + + # Always initialize for multi-rank training. + world_size, world_rank = hydragnn.utils.distributed.setup_ddp() + + log_name = f"qm9_test_{model_type}" if model_type else "qm9_test" + # Enable print to log file. + hydragnn.utils.print.print_utils.setup_log(log_name) + + # Use built-in torch_geometric datasets. + # Filter function above used to run quick example. + # NOTE: data is moved to the device in the pre-transform. + # NOTE: transforms/filters will NOT be re-run unless the qm9/processed/ directory is removed. + dataset = torch_geometric.datasets.QM9( + root="dataset/qm9", pre_transform=qm9_pre_transform, pre_filter=qm9_pre_filter + ) + train, val, test = hydragnn.preprocess.split_dataset( + dataset, config["NeuralNetwork"]["Training"]["perc_train"], False + ) + (train_loader, val_loader, test_loader,) = hydragnn.preprocess.create_dataloaders( + train, val, test, config["NeuralNetwork"]["Training"]["batch_size"] + ) + + config = hydragnn.utils.input_config_parsing.update_config( + config, train_loader, val_loader, test_loader + ) + + model = hydragnn.models.create_model_config( + config=config["NeuralNetwork"], + verbosity=verbosity, + ) + model = hydragnn.utils.distributed.get_distributed_model(model, verbosity) + + learning_rate = config["NeuralNetwork"]["Training"]["Optimizer"]["learning_rate"] + optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) + scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, mode="min", factor=0.5, patience=5, min_lr=0.00001 + ) + + # Run training with the given model and qm9 datasets. + writer = hydragnn.utils.model.model.get_summary_writer(log_name) + hydragnn.utils.input_config_parsing.save_config(config, log_name) + + hydragnn.train.train_validate_test( + model, + optimizer, + train_loader, + val_loader, + test_loader, + writer, + scheduler, + config["NeuralNetwork"], + log_name, + verbosity, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Run the QM9 example with optional model type." + ) + parser.add_argument( + "--model_type", + type=str, + default=None, + help="Specify the model type for training (default: None).", + ) + args = parser.parse_args() + main(model_type=args.model_type) diff --git a/hydragnn/models/Base.py b/hydragnn/models/Base.py index e51a96516..4887054f4 100644 --- a/hydragnn/models/Base.py +++ b/hydragnn/models/Base.py @@ -399,10 +399,12 @@ def energy_force_loss(self, pred, data): tasks_loss = [] # Energies node_energy_pred = pred[0] - graph_energy_pred = torch_scatter.scatter_add( - node_energy_pred, data.batch, dim=0 - ).float() - graph_energy_true = data.energy + graph_energy_pred = ( + torch_scatter.scatter_add(node_energy_pred, data.batch, dim=0) + .squeeze() + .float() + ) + graph_energy_true = data.energy.squeeze().float() energy_loss_weight = self.loss_weights[ 0 ] # There should only be one loss-weight for energy diff --git a/hydragnn/models/GATStack.py b/hydragnn/models/GATStack.py index adeb0ad76..3a4743ff3 100644 --- a/hydragnn/models/GATStack.py +++ b/hydragnn/models/GATStack.py @@ -25,12 +25,14 @@ def __init__( conv_args, heads: int, negative_slope: float, + edge_dim: int, *args, **kwargs, ): # note that self.heads is a parameter in GATConv, not the num_heads in the output part self.heads = heads self.negative_slope = negative_slope + self.edge_dim = edge_dim super().__init__(input_args, conv_args, *args, **kwargs) @@ -98,6 +100,7 @@ def get_conv(self, input_dim, output_dim, concat): negative_slope=self.negative_slope, dropout=self.dropout, add_self_loops=True, + edge_dim=self.edge_dim, concat=concat, ) diff --git a/hydragnn/models/PNAEqStack.py b/hydragnn/models/PNAEqStack.py index c20bea646..5daa00751 100644 --- a/hydragnn/models/PNAEqStack.py +++ b/hydragnn/models/PNAEqStack.py @@ -210,14 +210,15 @@ def __init__( **kwargs, ): - super().__init__() + degree_scaler_aggregation = DegreeScalerAggregation( + aggr=x_aggregators, scaler=x_scalers, deg=deg + ) + + super().__init__(aggr=degree_scaler_aggregation, node_dim=0, **kwargs) assert node_size % towers == 0 self.node_size = node_size # We keep input and output dim the same here because of the skip connection - self.x_aggregators = x_aggregators - self.x_scalers = x_scalers - self.deg = deg self.num_radial = num_radial self.edge_dim = edge_dim @@ -334,11 +335,7 @@ def forward( message_vector = message_vector + edge_vector # Aggregate and scale message_scalar - # message_scalar = aggregate_and_scale(self.x_aggregators, self.x_scalers, message_scalar, src, self.deg) - degree_scaler_aggregation = DegreeScalerAggregation( - aggr=self.x_aggregators, scaler=self.x_scalers, deg=self.deg - ) - message_scalar = degree_scaler_aggregation( + message_scalar = self.aggr_module( message_scalar.squeeze(1), index=src, dim_size=x.shape[0] ).unsqueeze( 1 diff --git a/hydragnn/models/create.py b/hydragnn/models/create.py index 7c6949d55..ef24a3a9c 100644 --- a/hydragnn/models/create.py +++ b/hydragnn/models/create.py @@ -205,6 +205,7 @@ def create_model( "inv_node_feat, edge_index", heads, negative_slope, + edge_dim, input_dim, hidden_dim, output_dim, diff --git a/hydragnn/utils/input_config_parsing/config_utils.py b/hydragnn/utils/input_config_parsing/config_utils.py index 57eaab8d8..735264e2b 100644 --- a/hydragnn/utils/input_config_parsing/config_utils.py +++ b/hydragnn/utils/input_config_parsing/config_utils.py @@ -149,6 +149,7 @@ def update_config_equivariance(config): def update_config_edge_dim(config): config["edge_dim"] = None edge_models = [ + "GAT", "PNA", "PNAPlus", "PNAEq", @@ -161,7 +162,7 @@ def update_config_edge_dim(config): if "edge_features" in config and config["edge_features"]: assert ( config["model_type"] in edge_models - ), "Edge features can only be used with DimeNet, MACE, EGNN, SchNet, PNA, PNAPlus, PNAEq, and CGCNN." + ), "Edge features can only be used with GAT, PNA, PNAPlus, PNAEq, CGCNN, SchNet, EGNN, DimeNet, MACE" config["edge_dim"] = len(config["edge_features"]) elif config["model_type"] == "CGCNN": # CG always needs an integer edge_dim diff --git a/hydragnn/utils/model/model.py b/hydragnn/utils/model/model.py index 7e3251e08..2176d0e3c 100644 --- a/hydragnn/utils/model/model.py +++ b/hydragnn/utils/model/model.py @@ -146,8 +146,8 @@ def calculate_avg_deg(loader): elif backend == "mpi": return calculate_avg_deg_mpi(loader) else: - deg = 0 - counter = 0 + deg = torch.zeros(1, dtype=torch.long) + counter = torch.zeros(1, dtype=torch.long) for data in iterate_tqdm(loader, 2, desc="Calculate avg degree"): d = degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long) deg += d.sum() @@ -169,14 +169,14 @@ def calculate_PNA_degree_dist(loader, max_neighbours): def calculate_avg_deg_dist(loader): assert dist.is_initialized() - deg = 0 - counter = 0 + deg = torch.zeros(1, dtype=torch.long) + counter = torch.zeros(1, dtype=torch.long) for data in iterate_tqdm(loader, 2, desc="Calculate avg degree"): d = degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long) deg += d.sum() counter += d.size(0) - deg = torch.tensor(deg) - counter = torch.tensor(counter) + deg = deg.to(get_device()) + counter = counter.to(get_device()) dist.all_reduce(deg, op=dist.ReduceOp.SUM) dist.all_reduce(counter, op=dist.ReduceOp.SUM) deg = deg.detach().cpu() @@ -198,16 +198,16 @@ def calculate_PNA_degree_mpi(loader, max_neighbours): def calculate_avg_deg_mpi(loader): assert dist.is_initialized() - deg = 0 - counter = 0 + deg = torch.zeros(1, dtype=torch.long) + counter = torch.zeros(1, dtype=torch.long) for data in iterate_tqdm(loader, 2, desc="Calculate avg degree"): d = degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long) deg += d.sum() counter += d.size(0) from mpi4py import MPI - deg = MPI.COMM_WORLD.allreduce(deg, op=MPI.SUM) - counter = MPI.COMM_WORLD.allreduce(counter, op=MPI.SUM) + deg = MPI.COMM_WORLD.allreduce(deg.numpy(), op=MPI.SUM) + counter = MPI.COMM_WORLD.allreduce(counter.numpy(), op=MPI.SUM) return deg / counter diff --git a/tests/inputs/ci_vectoroutput.json b/tests/inputs/ci_vectoroutput.json index ddb616615..f1fc8fd46 100644 --- a/tests/inputs/ci_vectoroutput.json +++ b/tests/inputs/ci_vectoroutput.json @@ -26,8 +26,16 @@ "model_type": "PNA", "radius": 2.0, "max_neighbours": 100, + "num_gaussians": 50, "envelope_exponent": 5, + "int_emb_size": 64, + "basis_emb_size": 8, + "out_emb_size": 128, + "num_after_skip": 2, + "num_before_skip": 1, "num_radial": 6, + "num_spherical": 7, + "num_filters": 126, "max_ell": 1, "node_max_ell": 1, "periodic_boundary_conditions": false, diff --git a/tests/test_examples.py b/tests/test_examples.py index 8a82fa18d..117f6a554 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -15,12 +15,58 @@ import subprocess -@pytest.mark.parametrize("example", ["qm9", "md17", "LennardJones"]) +@pytest.mark.parametrize( + "model_type", + [ + "SAGE", + "GIN", + "GAT", + "MFC", + "PNA", + "PNAPlus", + "SchNet", + "DimeNet", + "EGNN", + "PNAEq", + "PAINN", + ], +) +@pytest.mark.parametrize("example", ["qm9", "md17"]) @pytest.mark.mpi_skip() -def pytest_examples(example): +def pytest_examples_energy(example, model_type): path = os.path.join(os.path.dirname(__file__), "..", "examples", example) file_path = os.path.join(path, example + ".py") - return_code = subprocess.call(["python", file_path]) + + # Add the --model_type argument for the subprocess call + return_code = subprocess.call(["python", file_path, "--model_type", model_type]) + + # Check the file ran without error. + assert return_code == 0 + + +# NOTE the grad forces example with LennardJones requires +# there to be a positional gradient via using +# positions in torch operations for message-passing. +@pytest.mark.parametrize( + "model_type", + [ + "PNAPlus", + "SchNet", + "DimeNet", + "EGNN", + "PNAEq", + "PAINN", + "MACE", + ], +) +@pytest.mark.parametrize("example", ["LennardJones"]) +@pytest.mark.mpi_skip() +def pytest_examples_grad_forces(example, model_type): + path = os.path.join(os.path.dirname(__file__), "..", "examples", example) + file_path = os.path.join(path, example + ".py") + + # Add the --model_type argument for the subprocess call + return_code = subprocess.call(["python", file_path, "--model_type", model_type]) # Check the file ran without error. assert return_code == 0 diff --git a/tests/test_graphs.py b/tests/test_graphs.py index 177cd11c8..971e273d4 100755 --- a/tests/test_graphs.py +++ b/tests/test_graphs.py @@ -221,7 +221,7 @@ def pytest_train_model(model_type, ci_input, overwrite_data=False): # Test only models @pytest.mark.parametrize( - "model_type", ["PNA", "PNAPlus", "CGCNN", "SchNet", "EGNN", "MACE"] + "model_type", ["GAT", "PNA", "PNAPlus", "CGCNN", "SchNet", "EGNN", "MACE"] ) def pytest_train_model_lengths(model_type, overwrite_data=False): unittest_train_model(model_type, "ci.json", True, overwrite_data) @@ -234,7 +234,18 @@ def pytest_train_equivariant_model(model_type, overwrite_data=False): # Test vector output -@pytest.mark.parametrize("model_type", ["PNA", "PNAPlus", "MACE"]) +@pytest.mark.parametrize( + "model_type", + [ + "GAT", + "PNA", + "PNAPlus", + "SchNet", + "DimeNet", + "EGNN", + "PNAEq", + ], +) def pytest_train_model_vectoroutput(model_type, overwrite_data=False): unittest_train_model(model_type, "ci_vectoroutput.json", True, overwrite_data)