-
-
Notifications
You must be signed in to change notification settings - Fork 305
/
xiainovel.py
61 lines (44 loc) · 1.87 KB
/
xiainovel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# -*- coding: utf-8 -*-
import logging
from bs4 import Comment
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
class YukiNovelCrawler(Crawler):
base_url = "https://www.xiainovel.com/"
def read_novel_info(self):
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
possible_title = soup.select_one("div.page-header h1")
assert possible_title, "No novel title"
self.novel_title = possible_title.text
logger.info("Novel title: %s", self.novel_title)
self.novel_author = "Translated by XiaiNovel"
logger.info("Novel author: %s", self.novel_author)
# NOTE: Can't fetch cover url, as it's listed a base64 code.
# self.novel_cover = self.absolute_url(
# soup.select_one('div.col-md-6 img')
# logger.info('Novel cover: %s', self.novel_cover)
# Extract volume-wise chapter entries
chapters = soup.select("ul.list-group li a")
chapters.reverse()
for a in chapters:
chap_id = len(self.chapters) + 1
vol_id = 1 + len(self.chapters) // 100
if len(self.volumes) < vol_id:
self.volumes.append({"id": vol_id})
self.chapters.append(
{
"id": chap_id,
"volume": vol_id,
"url": self.absolute_url(a["href"]),
"title": a.text.strip() or ("Chapter %d" % chap_id),
}
)
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
contents = soup.select_one("section#StoryContent")
for d in contents.findAll("div"):
d.extract()
for comment in contents.find_all(string=lambda text: isinstance(text, Comment)):
comment.extract()
return str(contents)