forked from mlelarge/graph_neural_net
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconfig_symmetric.yml
67 lines (62 loc) · 1.99 KB
/
config_symmetric.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
---
name: Reg-ER-100-nodes-classification
cpu: No
root_dir: .
load_data: Yes
train_data: # Train data related parameters
num_examples_train: 20000
num_examples_val: 1000
graph_1:
generative_model: ErdosRenyi # so far ErdosRenyi, Regular or BarabasiAlbert
edge_density: 0.5
n_vertices: 25
vertex_proba: 1. # Parameter of the binomial distribution of vertices
graph_2:
generative_model: ErdosRenyi # so far ErdosRenyi, Regular or BarabasiAlbert
edge_density: 0.5
n_vertices: 25
vertex_proba: 1. # Parameter of the binomial distribution of vertices
merge_arg:
generative_model: ErdosRenyi # so far ErdosRenyi, Regular or BarabasiAlbert
edge_density: 0.2
path_dataset: dataset # Path where datasets are stored
test_data: # Test data related parameters
num_examples_test: 1000
graph_1:
generative_model: ErdosRenyi # so far ErdosRenyi, Regular or BarabasiAlbert
edge_density: 0.5
n_vertices: 25
vertex_proba: 1. # Parameter of the binomial distribution of vertices
graph_2:
generative_model: Symmetric # so far ErdosRenyi, Regular or BarabasiAlbert
edge_density: 0.009 #if symmetric, this means noise !
n_vertices: 25
vertex_proba: 1. # Parameter of the binomial distribution of vertices
merge_arg:
generative_model: Symmetric # so far ErdosRenyi, Regular or BarabasiAlbert
edge_density: 0.2
path_dataset: dataset # Path where datasets are stored
train: # Training parameters
epoch: 10
batch_size: 16
lr: !!float 1e-4
scheduler_step: 5
scheduler_decay: 0.9
print_freq: 100
# How to reduce the loss over several examples:
# mean, mean_of_mean
loss_reduction: mean
arch: # Architecture and model
arch: Similarity_Model
# arch: Simple_Node_Embedding
model_name: Simple_Node_Embedding
num_blocks: 2
original_features_num: 2
in_features: 64
out_features: 64
depth_of_mlp: 3
freeze_mlp: [0, 0, 0] #number of mlp blocks to freeze in each regular block
observers:
neptune:
enable: No
project: