-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCrawl.py
151 lines (131 loc) · 5.15 KB
/
Crawl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
import urllib.request
import urllib.parse
import json
from bs4 import BeautifulSoup
class Crawl :
rootUrl = ""
crawled = []
def getHTML(self, method, url, argParamData = "", argHeaders = None) :
headers = { "User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0" }
paramData = ""
if argHeaders != None : headers.update(argHeaders)
if argParamData != "" : paramData = urllib.parse.urlencode(argParamData)
if method == "POST" : paramData = paramData.encode("utf-8")
else :
url += "?" + paramData
paramData = None
try :
req = urllib.request.Request(url, data=paramData, headers=headers)
read = urllib.request.urlopen(req).read()
try : read = read.decode("utf-8")
except : read
except :
read = ""
return read
# Url parsing and checking is vailid Url
# Author : JTJ
# Last fix : 2016.09.14. 23:04
def checkAndGet(self, tag, url = None) :
# if url == None : url = target.getUrl()
url = tag.get("href")
if url == None : url = tag.get("src")
if url == None : url = tag.get("link")
if url == None : url = tag.get("action")
if url != None and url != '#' :
if url.count("?") > 1 : return False
if url.find('#') != -1 : return False
if url.find('.js') != -1: return False
if url[0:7] == "mailto:": return False
if url[0:4] == "tel:" : return False
if url[0:2] == "./" : url = url.replace("./", "")
if url[0:2] == "//" : url = "http:" + url
if url[0:4] != "http" and url[0:3] != "ftp" and url[0:2] != "//" :
if url[0] == "/" : url = self.rootUrl + url
else : url = self.rootUrl + "/" + url
# s = url.split("/")
# if url == target.getUrl() : url = url + "/" + url
# else : url = "/".join(s[0:len(s)-1]) + "/" + url
# if url.find(target.getUrl().split("//")[1].split("/")[0]) == -1 : return False
# if url.count("//") > 1 : return False
return url
def parseForm(self, tag) :
action = tag.attrs["action"]
if action[0:2] == "./" : action = action.replace("./", "")
if action[0:2] == "//" : action = "http:" + action
if action[0:4] != "http" and action[0:3] != "ftp" and action[0:2] != "//" :
if action[0] == "/" : action = self.rootUrl + action
else : action = self.rootUrl + "/" + action
inputs = self.getInput(tag)
param = []
for ipt in inputs :
param.append(ipt.attrs["name"])
ret = {
"method" : "POST",
"url" : action,
"param" : param
}
return ret
def getInput(self, inputTag) :
output = []
tags = inputTag.find_all()
for tag in tags :
if tag.name == "input" :
output.append(tag)
elif len(tag.find_all()) == 0 :
return []
else :
pppp = self.getInput(tag)
output.extend(pppp)
return output
return output
def __init__(self, url) :
protocol = url.split("//")[0]
try : domain = url.split("//")[1].split("/")[0]
except : domain = url.split("//")[1]
url = protocol + "//" + domain
if url[-1] == "/" :
url = url[0:-1]
self.rootUrl = url
param = { }
html = self.getHTML("GET", self.rootUrl)
soup = BeautifulSoup(html, "lxml")
tags = soup.find_all()
for tag in tags :
if tag.name == "form" :
self.crawled.append(self.parseForm(tag))
else :
try :
url = self.checkAndGet(tag)
if url == False or url == None : continue
payload = {
"method" : "GET",
"url" : url
}
try: self.crawled.index(payload)
except: self.crawled.append(payload)
except :
pass
for i in range(0, len(self.crawled)) :
if self.crawled[i]['url'][0] == "/" :
self.crawled[i]['url'] = self.rootUrl + self.crawled[i]['url']
def filter(self) :
crawled = []
for i in range(0, len(self.crawled)) :
# print(self.rootUrl + " " + self.crawled[i]['url'][0:len(self.rootUrl)])
try :
domain = self.crawled[i]['url'].split("//")[1].split("/")[0]
rootUrl = self.rootUrl.split("//")[1]
# print(rootUrl + " " + domain)
if rootUrl == domain :
crawled.append(self.crawled.pop(i))
except :
pass
self.crawled = crawled
def get(self) :
# print(self.crawled)
return self.crawled
if __name__ == '__main__':
crawl = Crawl("https://ecampus.kookmin.ac.kr/login.php")
crawled = crawl.get()
for i in crawled :
print(i)