-
Notifications
You must be signed in to change notification settings - Fork 1
/
dataset.py
230 lines (192 loc) · 9.11 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
"""
Dataset class definition for syn-cora
ref: https://github.com/GemsLab/H2GCN
"""
import os.path as osp
import numpy as np
import scipy.sparse as sp
class Dataset():
"""Dataset class contains four citation network datasets "cora", "cora-ml", "citeseer" and "pubmed",
and one blog dataset "Polblogs". Datasets "ACM", "BlogCatalog", "Flickr", "UAI",
"Flickr" are also available. See more details in https://github.com/DSE-MSU/DeepRobust/tree/master/deeprobust/graph#supported-datasets.
The 'cora', 'cora-ml', 'polblogs' and 'citeseer' are downloaded from https://github.com/danielzuegner/gnn-meta-attack/tree/master/data, and 'pubmed' is from https://github.com/tkipf/gcn/tree/master/gcn/data.
Parameters
----------
root : string
root directory where the dataset should be saved.
name : string
dataset name, it can be chosen from ['cora', 'citeseer', 'cora_ml', 'polblogs',
'pubmed', 'acm', 'blogcatalog', 'uai', 'flickr']
setting : string
there are two data splits settings. It can be chosen from ['nettack', 'gcn', 'prognn']
The 'nettack' setting follows nettack paper where they select the largest connected
components of the graph and use 10%/10%/80% nodes for training/validation/test .
The 'gcn' setting follows gcn paper where they use the full graph and 20 samples
in each class for traing, 500 nodes for validation, and 1000
nodes for test. (Note here 'netack' and 'gcn' setting do not provide fixed split, i.e.,
different random seed would return different data splits)
seed : int
random seed for splitting training/validation/test.
require_mask : bool
setting require_mask True to get training, validation and test mask
(self.train_mask, self.val_mask, self.test_mask)
Examples
--------
We can first create an instance of the Dataset class and then take out its attributes.
>>> from deeprobust.graph.data import Dataset
>>> data = Dataset(root='/tmp/', name='cora', seed=15)
>>> adj, features, labels = data.adj, data.features, data.labels
>>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
"""
def __init__(self, root, name, setting='nettack', seed=None, require_mask=False):
self.name = name.lower()
self.setting = setting.lower()
assert self.name in ['cora', 'citeseer', 'cora_ml', 'polblogs',
'pubmed', 'acm', 'blogcatalog', 'uai', 'flickr'], \
'Currently only support cora, citeseer, cora_ml, ' + \
'polblogs, pubmed, acm, blogcatalog, flickr'
assert self.setting in ['gcn', 'nettack', 'prognn'], "Settings should be" + \
" choosen from ['gcn', 'nettack', 'prognn']"
self.seed = seed
# self.url = 'https://raw.githubusercontent.com/danielzuegner/nettack/master/data/%s.npz' % self.name
self.url = 'https://raw.githubusercontent.com/danielzuegner/gnn-meta-attack/master/data/%s.npz' % self.name
self.root = osp.expanduser(osp.normpath(root))
self.data_folder = osp.join(root, self.name)
self.data_filename = self.data_folder + '.npz'
self.require_mask = require_mask
self.require_lcc = False if setting == 'gcn' else True
self.adj, self.features, self.labels = self.load_data()
def load_data(self):
# print('Loading {} dataset...'.format(self.name))
if self.name == 'pubmed':
return self.load_pubmed()
if self.name in ['acm', 'blogcatalog', 'uai', 'flickr']:
return self.load_zip()
if not osp.exists(self.data_filename):
self.download_npz()
adj, features, labels = self.get_adj()
return adj, features, labels
def get_adj(self):
adj, features, labels = self.load_npz(self.data_filename)
adj = adj + adj.T
adj = adj.tolil()
adj[adj > 1] = 1
if self.require_lcc:
lcc = self.largest_connected_components(adj)
adj = adj[lcc][:, lcc]
features = features[lcc]
labels = labels[lcc]
assert adj.sum(0).A1.min() > 0, "Graph contains singleton nodes"
# whether to set diag=0?
adj.setdiag(0)
adj = adj.astype("float32").tocsr()
adj.eliminate_zeros()
assert np.abs(adj - adj.T).sum() == 0, "Input graph is not symmetric"
assert adj.max() == 1 and len(np.unique(adj[adj.nonzero()].A1)) == 1, "Graph must be unweighted"
return adj, features, labels
def load_npz(self, file_name, is_sparse=True):
with np.load(file_name) as loader:
# loader = dict(loader)
if is_sparse:
adj = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],
loader['adj_indptr']), shape=loader['adj_shape'])
if 'attr_data' in loader:
features = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],
loader['attr_indptr']), shape=loader['attr_shape'])
else:
features = None
labels = loader.get('labels')
else:
adj = loader['adj_data']
if 'attr_data' in loader:
features = loader['attr_data']
else:
features = None
labels = loader.get('labels')
if features is None:
features = np.eye(adj.shape[0])
features = sp.csr_matrix(features, dtype=np.float32)
return adj, features, labels
def largest_connected_components(self, adj, n_components=1):
"""Select k largest connected components.
Parameters
----------
adj : scipy.sparse.csr_matrix
input adjacency matrix
n_components : int
n largest connected components we want to select
"""
_, component_indices = sp.csgraph.connected_components(adj)
component_sizes = np.bincount(component_indices)
components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending
nodes_to_keep = [
idx for (idx, component) in enumerate(component_indices) if component in components_to_keep]
print("Selecting {0} largest connected components".format(n_components))
return nodes_to_keep
def __repr__(self):
return '{0}(adj_shape={1}, feature_shape={2})'.format(self.name, self.adj.shape, self.features.shape)
def get_mask(self):
idx_train, idx_val, idx_test = self.idx_train, self.idx_val, self.idx_test
labels = self.onehot(self.labels)
def get_mask(idx):
mask = np.zeros(labels.shape[0], dtype=np.bool)
mask[idx] = 1
return mask
def get_y(idx):
mx = np.zeros(labels.shape)
mx[idx] = labels[idx]
return mx
self.train_mask = get_mask(self.idx_train)
self.val_mask = get_mask(self.idx_val)
self.test_mask = get_mask(self.idx_test)
self.y_train, self.y_val, self.y_test = get_y(idx_train), get_y(idx_val), get_y(idx_test)
def onehot(self, labels):
eye = np.identity(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
class CustomDataset(Dataset):
def __init__(self, root, name, setting='gcn', seed=None, require_mask=False):
'''
Adopted from https://github.com/DSE-MSU/DeepRobust/blob/master/deeprobust/graph/data/dataset.py
'''
self.name = name.lower()
self.setting = setting.lower()
self.seed = seed
self.url = None
self.root = osp.expanduser(osp.normpath(root))
self.data_folder = osp.join(root, self.name)
self.data_filename = self.data_folder + '.npz'
# Make sure dataset file exists
assert osp.exists(self.data_filename), f"{self.data_filename} does not exist!"
self.require_mask = require_mask
self.require_lcc = True if setting == 'nettack' else False
self.adj, self.features, self.labels = self.load_data()
if self.require_mask:
self.get_mask()
def get_adj(self):
adj, features, labels = self.load_npz(self.data_filename)
adj = adj + adj.T
adj = adj.tolil()
adj[adj > 1] = 1
if self.require_lcc:
lcc = self.largest_connected_components(adj)
# adj = adj[lcc][:, lcc]
adj_row = adj[lcc]
adj_csc = adj_row.tocsc()
adj_col = adj_csc[:, lcc]
adj = adj_col.tolil()
features = features[lcc]
labels = labels[lcc]
assert adj.sum(0).A1.min() > 0, "Graph contains singleton nodes"
# whether to set diag=0?
adj.setdiag(0)
adj = adj.astype("float32").tocsr()
adj.eliminate_zeros()
assert np.abs(adj - adj.T).sum() == 0, "Input graph is not symmetric"
assert adj.max() == 1 and len(np.unique(adj[adj.nonzero()].A1)) == 1, "Graph must be unweighted"
return adj, features, labels