-
Notifications
You must be signed in to change notification settings - Fork 3
/
nodeclassification.py
106 lines (80 loc) · 3.15 KB
/
nodeclassification.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.data
import dgl.nn as dglnn
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import os
path_project = os.path.abspath('C:/Users/Y/Desktop/EXPERIMENT/GNN/Icoinexper')
logger = SummaryWriter()
dataset = dgl.data.CoraGraphDataset()
print('Number of categories:', dataset.num_classes)
g = dataset[0]
print('Node features')
print(g.ndata)
print('Edge features')
print(g.edata)
class GNN(nn.Module):
def __init__(self, in_feats, hid_feats, out_feats):
super().__init__()
self.conv1 = dglnn.SAGEConv(
in_feats=in_feats, out_feats=hid_feats, aggregator_type='mean')
self.conv2 = dglnn.SAGEConv(
in_feats=hid_feats, out_feats=out_feats, aggregator_type='mean')
def forward(self, graph, inputs):
# inputs are features of nodes
h = self.conv1(graph, inputs)
h = F.relu(h)
h = self.conv2(graph, h)
return h
# class GCN(nn.Module):
# def __init__(self, in_feats, h_feats, num_classes):
# super(GCN, self).__init__()
# self.conv1 = GraphConv(in_feats, h_feats)
# self.conv2 = GraphConv(h_feats, num_classes)
# def forward(self, g, in_feat):
# h = self.conv1(g, in_feat)
# h = F.relu(h)
# h = self.conv2(g, h)
# return h
# Create the model with given dimensions
model = GNN(g.ndata['feat'].shape[1], 16, dataset.num_classes)
def train(g, model):
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
best_val_acc = 0
best_test_acc = 0
features = g.ndata['feat']
labels = g.ndata['label']
train_mask = g.ndata['train_mask']
val_mask = g.ndata['val_mask']
test_mask = g.ndata['test_mask']
for e in range(150):
# Forward
logits = model(g, features)
# Compute prediction
pred = logits.argmax(1)
# Compute loss
# Note that you should only compute the losses of the nodes in the training set.
loss = F.cross_entropy(logits[train_mask], labels[train_mask])
# Compute accuracy on training/validation/test
train_acc = (pred[train_mask] == labels[train_mask]).float().mean()
val_acc = (pred[val_mask] == labels[val_mask]).float().mean()
test_acc = (pred[test_mask] == labels[test_mask]).float().mean()
# Save the best validation accuracy and the corresponding test accuracy.
if best_val_acc < val_acc:
best_val_acc = val_acc
best_test_acc = test_acc
logger.add_scalar('Loss/loss', loss, e)
logger.add_scalar('Acc/Acc', test_acc, e)
# Backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
if e % 5 == 0:
print('In epoch {}, loss: {:.3f}, val acc: {:.3f} (best {:.3f}), test acc: {:.3f} (best {:.3f})'.format(
e, loss, val_acc, best_val_acc, test_acc, best_test_acc))
model = GNN(g.ndata['feat'].shape[1], 16, dataset.num_classes)
train(g, model)
logger.close()