Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Changing the return of get_quotes to tuple in brainyquote.py #22

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 2 additions & 5 deletions pyquotes/brainyquote.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,17 +51,14 @@ def get_quotes(person, category):
# Getting the quote of the related author
get_quote = soup_author.find_all('a', attrs={'title': 'view quote'})
quote_list = []
big_list = []
for i in range(count):
quote_list.append(get_quote[i].text)
big_list.append(quote_list)
quote_list.append((get_quote[i].text, person))

if len(quote_list) == 0:
return('''Oops! It seems that there are no quotes of the author of that
category.
\nYou may consider changing the category or the author ''')
quote_list.append(person)

quote_list = tuple(quote_list)
return(quote_list)


Expand Down
44 changes: 44 additions & 0 deletions pyquotes/wikiscrap.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from bs4 import BeautifulSoup
import requests

source = requests.get("https://en.wikiquote.org/wiki/Main_Page").text
soup = BeautifulSoup(source, "lxml")
# names is list of all names with links in the main page of wikiquote
names_path = soup.find('div', class_='mw-parser-output').find_all('div')[11]
names = names_path.find_all('p')[1].find_all('a')


def get_quotes(person):
quotes_by_author = list()
for name in names:
if (person == name.text.lower()):
link = "https://en.wikiquote.org" + name['href']
link = requests.get(link).text
soup_for_indiv = BeautifulSoup(link, "lxml")
q = soup_for_indiv.find_all('div', class_='mw-parser-output')[0]
quotes = q.find_all('ul')
for quote in quotes:
try:
if quote.li.b is None:
continue
elif quote.li.b.text.isdigit():
continue
elif len(quote.li.b.text.split(' ')) < 2:
continue
else:
temp = [quote.li.b.text, name.text]
quotes_by_author.append(tuple(temp))
except:
continue
else:
continue
return quotes_by_author

# scrapping for quote of the day
p = soup.find_all('table')[2].find_all('tbody')[2].find_all('tr')
quote_of_the_day = p[0].td.text
author_for_quote_of_the_day = p[1].td.a.text


def get_quote_of_the_day():
return (quote_of_the_day.rstrip(), author_for_quote_of_the_day)