Skip to content

Commit

Permalink
Added support for Mangahere.co and more
Browse files Browse the repository at this point in the history
Support for mangahere.co added and sorting order enabled. Check the
changelog and ReadMe for more and updated information.
  • Loading branch information
Xonshiz committed Feb 23, 2017
1 parent 38a13df commit 4338816
Show file tree
Hide file tree
Showing 18 changed files with 542 additions and 151 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ $RECYCLE.BIN/
*.msi
*.msm
*.msp
*.exe
*.pyc
*.pypirc

# Windows shortcuts
*.lnk
Expand Down
403 changes: 302 additions & 101 deletions .idea/workspace.xml

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions Changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,5 @@
- Fixed chapter count error in Kissmanga [2017.01.22]
- Fixed #4 [2017.02.16]
- Optimized Imports [2017.02.16]
- Site support for mangahere.co [2017.02.23]
- Added `Sorting Order` a.k.a `Download Order` [2017.02.23]
1 change: 1 addition & 0 deletions ReadMe.md
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ Currently, the script supports these arguments :
-u,--username Indicates username for a website.
-p,--password Indicates password for a website.
-v,--verbose Enables Verbose logging.
--sorting Sorts the download order.(VALUES = asc, ascending,old,new,desc,descending,latest,new)
```
#### Note :
Some websites like bato.to don't let you view some pages if you're not logged in. You'll have to create an account and pass the login information to the script via `-p` and `-u` arguments.
Expand Down
1 change: 1 addition & 0 deletions Supported_Sites.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@
* [Comic Naver](http://comic.naver.com/index.nhn)
* [Readcomiconline.to](http://readcomiconline.to/)
* [kisscomic.us](http://kisscomic.us/)
* [mangahere.co](http://mangahere.co/)
5 changes: 4 additions & 1 deletion comic_dl/comic_dl.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def usage():
print('{:^80}'.format("-a,--about : Shows the info about this script and exits."))
print('{:^80}'.format("-u,--username : Indicates username for a website."))
print('{:^80}'.format("-p,--password : Indicates password for a website."))
print('{:^80}'.format("--sorting : Sorts the download order.(VALUES = asc, ascending,old,new,desc,descending,latest,new)"))


def main(argv):
Expand All @@ -50,6 +51,7 @@ def main(argv):
parser.add_argument('-p','--password',nargs=1,help='Indicates password for a website',default='None')
parser.add_argument('-u','--username',nargs=1,help='Indicates username for a website',default='None')
parser.add_argument("-v", "--verbose", help="Prints important debugging messages on screen.", action="store_true")
parser.add_argument("--sorting", nargs=1, help="Sorts the download order.")

logger = "False"
args = parser.parse_args()
Expand All @@ -73,7 +75,8 @@ def main(argv):
input_url = str(args.input[0]).strip()
User_Password = str(args.password[0].strip())
User_Name = str(args.username[0].strip())
url_checker(input_url, current_directory, User_Name, User_Password, logger=logger)
sortingOrder = str(args.sorting[0].strip())
url_checker(input_url, current_directory, User_Name, User_Password, logger, sortingOrder)
sys.exit()

if __name__ == "__main__":
Expand Down
34 changes: 19 additions & 15 deletions comic_dl/honcho.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,14 @@
from sites.comic_naver import comic_naver_Url_Check
from sites.readcomic import readcomic_Url_Check
from sites.kisscomicus import kissmcomicus_Url_Check
from sites.mangahere import mangahere_Url_Check
from downloader import universal,cookies_required
from urllib.parse import urlparse




def url_checker(input_url, current_directory, User_Name, User_Password, logger):
def url_checker(input_url, current_directory, User_Name, User_Password, logger, sortingOrder):

if logger == "True":
logging.basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=logging.DEBUG)
Expand All @@ -37,29 +38,32 @@ def url_checker(input_url, current_directory, User_Name, User_Password, logger):
domain = urlparse(input_url).netloc
logging.debug("Domain : %s" % domain)

if domain in ['mangafox.me']:
if domain in ['mangafox.me', 'www.mangafox.me']:
mangafox_Url_Check(input_url, current_directory, logger)

elif domain in ['yomanga.co']:
yomanga_Url_Check(input_url, current_directory, logger)
elif domain in ['yomanga.co', 'www.yomanga.co']:
yomanga_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['gomanga.co']:
gomanga_Url_Check(input_url, current_directory, logger)
elif domain in ['gomanga.co', 'www.gomanga.co']:
gomanga_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['bato.to']:
elif domain in ['bato.to', 'www.bato.to']:
batoto_Url_Check(input_url, current_directory, User_Name, User_Password, logger)

elif domain in ['kissmanga.com']:
kissmanga_Url_Check(input_url, current_directory, logger)
elif domain in ['kissmanga.com', 'www.kissmanga.com']:
kissmanga_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['comic.naver.com']:
comic_naver_Url_Check(input_url, current_directory, logger)
elif domain in ['comic.naver.com', 'www.comic.naver.com']:
comic_naver_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['readcomiconline.to']:
readcomic_Url_Check(input_url, current_directory, logger)
elif domain in ['readcomiconline.to', 'www.readcomiconline.to']:
readcomic_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['kisscomic.us']:
kissmcomicus_Url_Check(input_url, current_directory, logger)
elif domain in ['kisscomic.us', 'www.kisscomic.us']:
kissmcomicus_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['mangahere.co','www.mangahere.co']:
mangahere_Url_Check(input_url, current_directory, logger, sortingOrder)

elif domain in ['']:
print('You need to specify at least 1 URL. Please run : comic-dl -h')
Expand Down
19 changes: 14 additions & 5 deletions comic_dl/sites/comic_naver.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def single_chapter(url,current_directory, logger):



def whole_series(url, current_directory, logger):
def whole_series(url, current_directory, logger, sortingOrder):



Expand All @@ -83,15 +83,24 @@ def whole_series(url, current_directory, logger):
if not first_link:
print("You failed to enter the last chapter count. Script will exit now.")
exit()

all_links = []
for x in range(1,int(first_link)):
Chapter_Url = "http://comic.naver.com/webtoon/detail.nhn?titleId=%s&no=%s" %(titleId,x)
debug("Chapter URL : %s" % Chapter_Url)
single_chapter(Chapter_Url,current_directory, logger)
all_links.append(Chapter_Url)
# print(all_links)
if str(sortingOrder).lower() in ['new','desc','descending','latest']:
for chapLink in all_links[::-1]:
single_chapter(chapLink, current_directory, logger)
elif str(sortingOrder).lower() in ['old','asc','ascending','oldest']:
# print("Running this")
for chapLink in all_links:
single_chapter(chapLink, current_directory, logger)
print("Finished Downloading")



def comic_naver_Url_Check(input_url, current_directory, logger):
def comic_naver_Url_Check(input_url, current_directory, logger, sortingOrder):
if logger == "True":
basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)

Expand All @@ -117,6 +126,6 @@ def comic_naver_Url_Check(input_url, current_directory, logger):
match = found.groupdict()
if match['list']:
url = str(input_url)
whole_series(url, current_directory, logger)
whole_series(url, current_directory, logger, sortingOrder)
else:
pass
21 changes: 17 additions & 4 deletions comic_dl/sites/gomanga.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def single_chapter(url,current_directory, logger):
print('\n')
print("Completed downloading ",Series_Name)

def whole_series(url,current_directory, logger):
def whole_series(url,current_directory, logger, sortingOrder):
if not url:
print("Couldn't get the URL. Please report it on Github Repository.")

Expand All @@ -91,15 +91,28 @@ def whole_series(url,current_directory, logger):
soup = BeautifulSoup(Page_source, 'html.parser')

chapter_text = soup.findAll('div',{'class':'title'})
all_links = []


for link in chapter_text:
x = link.findAll('a')
for a in x:
url = a['href']
debug("Final URL : %s" % url)
single_chapter(url,current_directory, logger)
all_links.append(url)

# print(all_links)

if str(sortingOrder).lower() in ['new','desc','descending','latest']:
for chapLink in all_links:
single_chapter(chapLink, current_directory, logger)
elif str(sortingOrder).lower() in ['old','asc','ascending','oldest']:
# print("Running this")
for chapLink in all_links[::-1]:
single_chapter(chapLink, current_directory, logger)
print("Finished Downloading")

def gomanga_Url_Check(input_url,current_directory, logger):
def gomanga_Url_Check(input_url,current_directory, logger, sortingOrder):
if logger == "True":
basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)

Expand All @@ -124,7 +137,7 @@ def gomanga_Url_Check(input_url,current_directory, logger):
match = found.groupdict()
if match['comic']:
url = str(input_url)
whole_series(url,current_directory, logger)
whole_series(url,current_directory, logger, sortingOrder)
else:
pass

Expand Down
16 changes: 12 additions & 4 deletions comic_dl/sites/kisscomicus.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,26 +60,34 @@ def single_chapter(url, directory, logger):
print('\n')
print("Completed downloading %s" % Series_Name)

def whole_series(url, directory, logger):
def whole_series(url, directory, logger, sortingOrder):

scraper = create_scraper()
connection = scraper.get(url).content

soup = BeautifulSoup(connection, "html.parser")
all_links = soup.findAll('div', {'class': 'list-chapter mCustomScrollbar'})

chapterLinks = []
for link in all_links:
x = link.findAll('a')
for a in x:
# print(a['href'])
url = "http://kisscomic.us" + a['href']
debug("Chapter URL : %s" % url)
chapterLinks.append(url)

if str(sortingOrder).lower() in ['new', 'desc', 'descending', 'latest']:
for url in chapterLinks:
single_chapter(url, directory, logger)
elif str(sortingOrder).lower() in ['old', 'asc', 'ascending', 'oldest']:
print("Running This")
for url in chapterLinks[::-1]:
single_chapter(url, directory, logger)
print("Finished Downloading")



def kissmcomicus_Url_Check(input_url, current_directory, logger):
def kissmcomicus_Url_Check(input_url, current_directory, logger, sortingOrder):
if logger == "True":
basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)
kissmcomicus_single_regex = compile('https?://(?P<host>[^/]+)/chapters/(?P<comic>[\d\w-]+)(?:/Issue-)?')
Expand All @@ -101,6 +109,6 @@ def kissmcomicus_Url_Check(input_url, current_directory, logger):
match = found.groupdict()
if match['comic_name']:
url = str(input_url)
whole_series(url, current_directory, logger)
whole_series(url, current_directory, logger, sortingOrder)
else:
pass
25 changes: 16 additions & 9 deletions comic_dl/sites/kissmanga.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from os import path,makedirs
from sys import exit
from bs4 import BeautifulSoup
from downloader.universal import main as FileDownloader
from downloader.cookies_required import with_referer as FileDownloader
from cfscrape import create_scraper
from logging import debug, basicConfig, DEBUG

Expand All @@ -18,6 +18,7 @@ def single_chapter(url, current_directory, logger):
scraper = create_scraper()

Page_Source = scraper.get(str(url)).content
cookies = scraper.cookies

formatted = BeautifulSoup(Page_Source, "lxml")

Expand Down Expand Up @@ -98,13 +99,13 @@ def single_chapter(url, current_directory, logger):
debug("Error inside Error : %s" % e)
File_Name_Final = str(ddl_image[-6:])
# print(File_Name_Final)
FileDownloader(File_Name_Final, Directory_path, ddl_image, logger)
FileDownloader(File_Name_Final, Directory_path, cookies, ddl_image, url, logger)

print('\n')
print("Completed downloading ", Series_Name, ' - ', chapter_number)


def whole_series(url, current_directory, logger):
def whole_series(url, current_directory, logger, sortingOrder):

scraper = create_scraper()

Expand Down Expand Up @@ -132,13 +133,19 @@ def whole_series(url, current_directory, logger):

print("Total Chapters To Download : ", len(link_list))

for item in link_list:
url = str(item)
debug("Chapter Links : %s" % url)
single_chapter(url, current_directory, logger)
if str(sortingOrder).lower() in ['new', 'desc', 'descending', 'latest']:
for item in link_list:
url = str(item)
debug("Chapter Links : %s" % url)
single_chapter(url, current_directory, logger)
elif str(sortingOrder).lower() in ['old', 'asc', 'ascending', 'oldest']:
for item in link_list[::-1]:
url = str(item)
debug("Chapter Links : %s" % url)
single_chapter(url, current_directory, logger)


def kissmanga_Url_Check(input_url, current_directory, logger):
def kissmanga_Url_Check(input_url, current_directory, logger, sortingOrder):
if logger == "True":
basicConfig(format='%(levelname)s: %(message)s', filename="Error Log.log", level=DEBUG)

Expand Down Expand Up @@ -168,6 +175,6 @@ def kissmanga_Url_Check(input_url, current_directory, logger):
match = found.groupdict()
if match['comic']:
url = str(input_url)
whole_series(url, current_directory, logger)
whole_series(url, current_directory, logger, sortingOrder)
else:
pass
Loading

0 comments on commit 4338816

Please sign in to comment.