-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathpubdb_links.py
executable file
·131 lines (119 loc) · 5.49 KB
/
pubdb_links.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#! /usr/bin/env python3
__author__ = "Bradley Huffaker"
__email__ = "<bradley@caida.org>"
from bs4 import BeautifulSoup
import json
import re
import os
import sys
import subprocess
import lib.utils as utils
import urllib.request, urllib.error
import traceback
url_id = {
"www.caida.org/data/active/ipv4_prefix_probing_dataset.xml":"dataset:ipv4_routed_24_topology_dataset",
"www.caida.org/data/as-or":"dataset:as_organizations",
"www.caida.org/data/as-relatio":"dataset:as_relationships_serial_1",
"www.caida.org/data/active/as-relationships":"datasetas_relationships"
}
invalid_id = set([
"dataset:overview",
"dataset:passive"
])
pubdb_links_file = "data/pubdb_links.json"
def url_cleaner(url):
return re.sub("https?://", "", re.sub("[/,\.\)]+$","",url))
def main():
links = set()
if not os.path.exists(files_dir):
print ("error:",files_dir," does not exits",file=sys.stderr);
sys.exit(1)
sys.exit()
for type_ in os.listdir("sources"):
p = "sources/"+type_
if os.path.isdir(p):
for fname in os.listdir(p):
fname = p+"/"+fname
if re.search("json$",fname) and "__" not in fname:
try:
obj = json.load(open(fname,"r"))
id_ = utils.id_create(fname, type_,obj["id"])
if "resources" in obj:
for resource in obj["resources"]:
if "url" in resource and len(resource["url"]) > 10:
url = url_cleaner(resource["url"])
if "media" not in id_ or resource["name"] == "pdf":
url_id[url] = id_
except ValueError as e:
print (fname)
raise e
#if "evolution" in name:
#print (obj["id"])
#print (name)
#print ()
name_id[name] = utils.id_create(fname, type_,obj["id"])
for type_,filename in [["media","data/PANDA-Presentations-json.pl.json"],
["paper","data/PANDA-Papers-json.pl.json"]]:
for obj in json.load(open(filename,"r")):
if "linkedObjects" in obj and len(obj["linkedObjects"]) > 0:
continue
id_ = utils.id_create(filename, type_, obj["id"])
failed = None
if "links" in obj:
for link in obj["links"]:
if "to" in link:
m = re.search("(\d\d\d\d/[^\/]+/[^/]+.pdf$)",link["to"])
if m:
fname = data_dir+"/"+m.group(1)
found = None
if os.path.exists(fname):
found = fname
else:
fname = "data/presentations/"+m.group(1)
if os.path.exists(fname):
found = fname
if found:
fname_txt = re.sub("pdf","txt",fname)
if not os.path.exists(fname_txt):
subprocess.run(["pdftotext",found])
with open(fname_txt,"r") as f:
for line in f:
m = re.search("(http[^\s]+)",line)
if m:
url = url_cleaner(m.group(1))
if url in url_id:
link = [id_,url_id[url]]
links.add(json.dumps(link))
else:
m = re.search("www.caida.org/data/([^/]+)",url)
if m:
i = utils.id_create("","dataset",m.group(1))
#if i not in invalid_id:
#print (" ",url," ",id_, " ",i)
#print (i)
#filename = "data/www_caida_org/"+re.sub("[^a-z]+","_",url)+".html"
#print (filename, m.group(1))
#if not os.path.exists(filename):
#download("http://"+url,filename)
#sys.exit()
with open(pubdb_links_file,"w") as f:
print ("writing",pubdb_links_file)
json.dump(list(links),f,indent=4)
def download(url, filename):
print ("downloading",url)
try:
response = urllib.request.urlopen(url, timeout=5)
html = response.read()
with open(filename,"wb") as f:
f.write(html)
f.close()
except urllib.error.HTTPError as e:
traceback.print_stack()
print ('HTTPError = ' + str(e.code))
with open(filename,"w") as f:
f.close()
except Exception:
traceback.print_stack()
print ('generic exception: ' + traceback.format_exc())
sys.exit()
main()