Skip to content

Commit

Permalink
feat: add ai for analyzing the resume
Browse files Browse the repository at this point in the history
  • Loading branch information
hungdtrn committed Mar 23, 2024
1 parent 773b222 commit 99999d0
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 31 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ dmypy.json
cython_debug/

# folder containing temporary test files
cache
# cache

# litellm auto generated files
litellm_uuid.txt
Empty file added cache/.file
Empty file.
2 changes: 1 addition & 1 deletion src/discord_bot/bot.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@


MODEL_CHOICES = ["gpt-3.5-turbo", "gpt-4", "phi"]
DEFAULT_MODEL = MODEL_CHOICES[2]
DEFAULT_MODEL = MODEL_CHOICES[0]
DISCORD_BOT_TOKEN = Settings().DISCORD_BOT_TOKEN
AI_SERVER_URL = Settings().AI_SERVER_URL

Expand Down
55 changes: 26 additions & 29 deletions src/discord_bot/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,27 @@
RESERVED_SPACE = 50 # for other additional strings. E.g. number `(1/4)`, `Q: `, `A: `, etc.


async def _call_llm(model: str, question: str, server_url: str, is_add_question=True) -> list[str]:
try:
client = openai.AsyncOpenAI(base_url=server_url, api_key="FAKE")
response = await client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": question}],
)
content = response.choices[0].message.content or "No response from the model. Please try again"
messages = split(content)
messages = add_number(messages)
if is_add_question:
messages = add_question(messages, question)

return messages

except Exception as e:
return split(f"Error: {e}")


async def review_resume(model: str, url: str, server_url: str) -> list[str]:
try:
try:
# Download PDF
output_path = f"cache/{time.time()}.pdf"
os.system(f"poetry run gdown -O {output_path} --fuzzy {url}")
Expand All @@ -28,41 +47,19 @@ async def review_resume(model: str, url: str, server_url: str) -> list[str]:
for page in downloaded_file:
text.append(page.get_text())
text = "\n\n".join(text)
os.remove(output_path) # Remove the downloaded file
os.remove(output_path) # Remove the downloaded file

print(f"Parsed content: {text}")


# Send to LLM
#
# Your implementation here
#
#
#
#


return ["NOT IMPLEMENTED YET"]

question = f"You are a resume reviewer. Your tasks are:\n- Show sentences with incorrect grammars, and suggest a way to correct them.\n- Provide suggestions to improve the resume: \n\n{text}"

return await _call_llm(model, question, server_url, is_add_question=False)
except Exception as e:
return split(f"Error: {e}")



async def answer_question(model: str, question: str, server_url: str) -> list[str]:
try:
client = openai.AsyncOpenAI(base_url=server_url, api_key="FAKE")
response = await client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": question}],
)
content = response.choices[0].message.content or "No response from the model. Please try again"
messages = split(content)
messages = add_number(messages)
messages = add_question(messages, question)
return messages

except Exception as e:
return split(f"Error: {e}")
return await _call_llm(model, question, server_url, is_add_question=True)


def split(answer: str) -> list[str]:
Expand Down

0 comments on commit 99999d0

Please sign in to comment.