-
-
Notifications
You must be signed in to change notification settings - Fork 305
/
fictionpress.py
98 lines (82 loc) · 3.48 KB
/
fictionpress.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# -*- coding: utf-8 -*-
import logging
import re
from urllib.parse import urlparse
from bs4.element import Tag
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
chapter_url = "https://www.fictionpress.com/s/%s/%s"
search_url = "https://www.fictionpress.com/search/?keywords=%s&type=story&match=title&ready=1&categoryid=202"
class FictionPressCrawler(Crawler):
base_url = "https://www.fictionpress.com/"
def search_novel(self, query):
query = query.lower().replace(" ", "+")
soup = self.get_soup(search_url % query)
results = []
for div in soup.select("#content_wrapper .z-list")[:25]:
a = div.select_one("a.stitle")
a.select_one("img").extract()
info = div.select_one(".xgray").text.strip()
chapters = re.findall(r"Chapters: \d+", info)[0]
origin_book = re.findall(r"^.+Rated:", info)[0][:-9]
writer = div.select_one('a[href*="/u/"]').text.strip()
results.append(
{
"title": a.text.strip(),
"url": self.absolute_url(a["href"]),
"info": "%s | %s | By, %s" % (origin_book, chapters, writer),
}
)
return results
def read_novel_info(self):
logger.debug("Visiting %s", self.novel_url)
soup = self.get_soup(self.novel_url)
possible_title = soup.select_one("#profile_top b.xcontrast_txt, #content b")
assert possible_title, "No novel title"
self.novel_title = possible_title.text.strip()
logger.info("Novel title: %s", self.novel_title)
possible_image = soup.select_one("#profile_top img.cimage")
if possible_image:
self.novel_cover = self.absolute_url(possible_image["src"])
logger.info("Novel cover: %s", self.novel_cover)
possible_author = soup.select_one("#profile_top, #content")
if isinstance(possible_author, Tag):
possible_author = possible_author.select_one('a[href*="/u/"]')
if isinstance(possible_author, Tag):
self.novel_author = possible_author.text.strip()
logger.info("Novel author: %s", self.novel_author)
self.novel_id = urlparse(self.novel_url).path.split("/")[2]
logger.info("Novel id: %s", self.novel_id)
if soup.select_one("#pre_story_links"):
origin_book = soup.select("#pre_story_links a")[-1]
self.volumes.append(
{
"id": 1,
"title": origin_book.text.strip(),
}
)
else:
self.volumes.append({"id": 1})
chapter_select = soup.select_one("#chap_select, select#jump")
if chapter_select:
for option in chapter_select.select("option"):
self.chapters.append(
{
"volume": 1,
"id": int(option["value"]),
"title": option.text.strip(),
"url": chapter_url % (self.novel_id, option["value"]),
}
)
else:
self.chapters.append(
{
"id": 1,
"volume": 1,
"url": self.novel_url,
}
)
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
contents = soup.select_one("#storytext, #storycontent")
return str(contents)