Skip to content
This repository was archived by the owner on Jun 29, 2024. It is now read-only.

medium level #62

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions task1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import requests

api_key = '78c68b12139482d7e0784209bd15f555'

user_input = input("Enter city: ")

weather_data = requests.get(
f"https://api.openweathermap.org/data/2.5/weather?q={user_input}&units=imperial&APPID={api_key}")

if weather_data.json()['cod'] == '404':
print("No City Found")
else:
weather = weather_data.json()['weather'][0]['main']
temp = round(weather_data.json()['main']['temp'])

print(f"The weather in {user_input} is: {weather}")
print(f"The temperature in {user_input} is: {temp}ºF")
28 changes: 28 additions & 0 deletions task2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import requests
from bs4 import BeautifulSoup

# Get the HTML text
url = "https://quotes.toscrape.com/"
response = requests.get(url)
text = response.text

# Parse the text with Beautiful Soup
soup = BeautifulSoup(text, "lxml")

# Extract authors
authors = soup.find_all("small", class_="author")
author_set = set(author.text.strip() for author in authors)

# Extract quotes
quotes = soup.find_all("span", class_="text")
quote_list = [quote.text.strip() for quote in quotes]

# Extract top ten tags
top_tags = soup.find("div", class_="tags-box")
tags = top_tags.find_all("a")
tag_list = [tag.text.strip() for tag in tags]

# Loop through all pages to get unique authors (if applicable)
def get_page_authors(page_url):
# Your implementation here
pass
32 changes: 32 additions & 0 deletions task3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import nltk
from nltk.tokenize import word_tokenize

# Download NLTK data
nltk.download('punkt')

# Define chatbot responses
responses = {
"hi": "Hello! How can I assist you?",
"how are you": "I'm just a program, so I don't have feelings, but I'm here to help!",
# Add more predefined responses here
}

def preprocess_input(user_input):
tokens = word_tokenize(user_input.lower())
# Additional preprocessing steps if needed
return tokens

def chatbot_response(user_input):
tokens = preprocess_input(user_input)
for query, response in responses.items():
if any(token in query for token in tokens):
return response
return "I didn't understand. Can you please rephrase?"

if __name__ == "__main__":
while True:
user_query = input("You: ")
if user_query.lower() == "exit":
print("Chatbot: Goodbye!")
break
print("Chatbot:", chatbot_response(user_query))
17 changes: 17 additions & 0 deletions task4.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from PyPDF2 import PdfFileReader, PdfFileWriter

def split_pdf(input_pdf, output_folder):
pdf_reader = PdfFileReader(input_pdf)
total_pages = pdf_reader.numPages

for page_num in range(total_pages):
pdf_writer = PdfFileWriter()
pdf_writer.addPage(pdf_reader.getPage(page_num))

output_file = f"{output_folder}/page_{page_num + 1}.pdf"
with open(output_file, "wb") as output_pdf:
pdf_writer.write(output_pdf)

print(f"Page {page_num + 1} saved as {output_file}")