-
Notifications
You must be signed in to change notification settings - Fork 0
/
synthetic.py
139 lines (128 loc) · 6.38 KB
/
synthetic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from utils import sgc_precompute, set_seed, stack_feat, load_donuts
from models import get_model
from metrics import accuracy
import pickle as pkl
from args import get_syn_args
from time import perf_counter
from noise import zero_idx, gaussian
from train import train_regression, test_regression,\
train_gcn, test_gcn,\
train_kgcn, test_kgcn
# Arguments
args = get_syn_args()
# setting random seeds
set_seed(args.seed, args.cuda)
adj, features, labels, idx_train,\
idx_val, idx_test, mesh_pack = load_donuts(args.gen_num_samples,
args.gen_noise,
args.gen_factor,
args.gen_test_size,
args.gen_num_neigh,
args.normalization,
args.cuda,
args.invlap_alpha,
args.gen_mesh,
args.gen_mesh_step)
### NOISE TO FEATURES ONLY USE ZERO HERE
if args.noise != "None":
features = features.numpy()
if args.noise == "gaussian":
features = gaussian(features,
mean=args.gaussian_opt[0],
std=args.gaussian_opt[1])
if args.noise == "zero_test":
idx_test = idx_test.numpy()
features = zero_idx(features, idx_test)
idx_test = torch.LongTensor(idx_test)
if args.cuda:
idx_test = idx_test.cuda()
if args.noise != "None":
features = torch.FloatTensor(features).float()
if args.cuda:
features = features.cuda()
### END NOISE TO FEATURES
# Monkey patch for Stacked Logistic Regression
if args.model == "SLG":
nfeat = features.size(1) * args.degree
else:
nfeat = features.size(1)
model = get_model(model_opt=args.model,
nfeat=nfeat,
nclass=labels.max().item()+1,
nhid=args.hidden,
dropout=args.dropout,
cuda=args.cuda,
degree=args.degree)
if args.model == "SGC" or args.model == "SGCMLP":
features, precompute_time = sgc_precompute(features, adj, args.degree)
print("{:.4f}s".format(precompute_time))
model, acc_val, train_time = train_regression(model,
features[idx_train],
labels[idx_train],
features[idx_val],
labels[idx_val],
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_regression(model, features[idx_test], labels[idx_test])
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
if args.model == "SLG":
features, precompute_time = stack_feat(features, adj, args.degree)
features = torch.FloatTensor(features).float()
if args.cuda:
features = features.cuda()
print("{:.4f}s".format(precompute_time))
model, acc_val, train_time = train_regression(model,
features[idx_train],
labels[idx_train],
features[idx_val],
labels[idx_val],
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_regression(model, features[idx_test], labels[idx_test])
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
if args.model == "GCN":
model, acc_val, train_time = train_gcn(model,
adj,
features,
labels,
idx_train,
idx_val,
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_gcn(model, adj, features, labels, idx_test)
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
precompute_time = 0
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))
if args.model == "KGCN":
model, acc_val, train_time = train_kgcn(model,
adj,
features,
labels,
idx_train,
idx_val,
args.epochs,
args.weight_decay,
args.lr,
args.dropout)
acc_test = test_kgcn(model, adj, features, labels, idx_test)
precompute_time = 0
print("Validation Accuracy: {:.4f} Test Accuracy: {:.4f}".format(acc_val,\
acc_test))
print("Pre-compute time: {:.4f}s, train time: {:.4f}s, total: {:.4f}s".format(precompute_time, train_time, precompute_time+train_time))