-
-
Notifications
You must be signed in to change notification settings - Fork 312
/
Copy pathwordrain.py
128 lines (112 loc) · 4.91 KB
/
wordrain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
# -*- coding: utf-8 -*-
import logging
from bs4 import Tag
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
search_url = "https://wordrain69.com/?s=%s"
post_chapter_url = "https://wordrain69.com/wp-admin/admin-ajax.php"
class WordRain(Crawler):
base_url = "https://wordrain69.com"
def initialize(self):
self.cleaner.bad_tags.update(
[
"a",
"h3",
"script",
]
)
self.cleaner.bad_css.update(
[
".code-block",
".adsbygoogle",
".adsense-code",
".sharedaddy",
]
)
self.cleaner.bad_text_regex.update(
[
"[The translation belongs to Wordrain. Support us by comments, ,"
+ " or buy Miao a coffee (*´ェ`*)っ旦~]",
"1 ko-Fi = extra chapter",
"[Thanks to everyone who’s reading this on wordrain. This translation "
+ "belongs to us. (•̀o•́)ง Support us by comments, , or buy Miao a coffee (´ェ`)っ旦~]",
"1 ko-fi= 1 bonus chapter.",
"[The translation belongs to Wordrain. Support us by comments, ,"
+ " or buy Miao a coffee (*´ェ`*)っ~]",
"1 ko fi = 1 extra chapter",
"[The translation belongs to Wordrain. Support us by comments, ,"
+ " or buy Miao a coffee (´ェ`)っ旦~]",
"[The translation belongs to Wordrain . Support us by comments, ,"
+ " or buy Miao a coffee (´ェ`)っ旦~]",
"[Thanks to everyone who are reading this on the site wordrain ."
+ " (•̀o•́)ง Support us by comments, , or buy Miao a coffee (´ェ`)っ旦~]",
"[Thanks to everyone who’s reading this on wordrain . "
+ "This translation belongs to us. (•̀o•́)ง Support us by comments, ,"
+ " or buy Miao a coffee (´ェ`)っ旦~]",
"[Thanks to everyone who’s reading this on wordrain ."
+ " This translation belongs to us. ( •̀o•́)ง Support us by comments, ,"
+ " or buy Miao a coffee ( ´ェ`)っ旦~]",
]
)
# NOTE: Site search doesn't work. So this won't work.
"""
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % query)
results = []
for tab in soup.select('.c-tabs-item__content'):
a = tab.select_one('.post-title h3 a')
latest = tab.select_one('.latest-chap .chapter a').text
votes = tab.select_one('.rating .total_votes').text
results.append({
'title': a.text.strip(),
'url': self.absolute_url(a['href']),
'info': '%s | Rating: %s' % (latest, votes),
})
return results
"""
def read_novel_info(self):
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
possible_title = soup.select_one('meta[property="og:title"]')
assert isinstance(possible_title, Tag), "No novel title"
self.novel_title = possible_title["content"]
logger.info("Novel title: %s", self.novel_title)
possible_image = soup.select_one('meta[property="og:image"]')
if isinstance(possible_image, Tag):
self.novel_cover = possible_image["content"]
logger.info("Novel cover: %s", self.novel_cover)
self.novel_author = " ".join(
[
a.text.strip()
for a in soup.select('.author-content a[href*="manga-translator"]')
]
)
logger.info("%s", self.novel_author)
self.novel_id = soup.select_one(
".wp-manga-action-button[data-action=bookmark]"
)["data-post"]
logger.info("Novel id: %s", self.novel_id)
logger.info("Sending post request to %s", post_chapter_url)
response = self.submit_form(
post_chapter_url,
data={"action": "manga_get_chapters", "manga": int(self.novel_id)},
)
soup = self.make_soup(response)
for a in reversed(soup.select(".wp-manga-chapter > a")):
chap_id = len(self.chapters) + 1
vol_id = chap_id // 100 + 1
if len(self.chapters) % 100 == 0:
self.volumes.append({"id": vol_id})
self.chapters.append(
{
"id": chap_id,
"volume": vol_id,
"title": a.text.strip(),
"url": self.absolute_url(a["href"]),
}
)
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
contents = soup.select_one("div.text-left")
return self.cleaner.extract_contents(contents)