Skip to content

Commit

Permalink
Refactor ask function in cli.py and completion_openai_gpt function in…
Browse files Browse the repository at this point in the history
… core.py

- Refactor the ask function in cli.py to improve code readability and handle different models for completion.
- Refactor the completion_openai_gpt function in core.py to add support for streaming and handle different models for completion.

Update version in pyproject.toml

- Update the version in pyproject.toml from 0.3.2 to 0.3.3.
  • Loading branch information
Rishang committed Sep 17, 2024
1 parent dff2de7 commit 413b380
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 25 deletions.
26 changes: 19 additions & 7 deletions src/heygpt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,26 @@ def ask(
text = Prompt.ask("[blue]Enter text")

# log.debug(text)
stream = False
if model.startswith("o1"):
completion = completion_openai_gpt(
command=command,
text=text,
model=model,
_print=raw,
stream=stream,
)
else:
stream = True
completion = completion_openai_gpt(
command=command,
text=text,
model=model,
_print=raw,
temperature=temperature,
stream=stream,
)

completion = completion_openai_gpt(
command=command,
text=text,
model=model,
_print=raw,
temperature=temperature,
)
content = completion

if not raw:
Expand Down
43 changes: 27 additions & 16 deletions src/heygpt/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def completion_openai_gpt(
system: str = "",
model=model,
_print=False,
temperature=0.7,
temperature=None,
stream=True,
):
"""
ref: https://docs.openai.com/api-reference/completions/create
Expand All @@ -66,21 +67,31 @@ def completion_openai_gpt(
else:
messages = [{"role": "user", "content": text}]

chat_completion = completion(
model=model,
messages=messages,
temperature=temperature,
stream=True,
)

for chunk in chat_completion:
# Process each chunk as needed
c = chunk.choices[0].delta.content or ""
out += c
if _print:
console.print(c, end="", markup=True)

return out
if stream:
chat_completion = completion(
model=model,
messages=messages,
temperature=temperature,
stream=True,
)

for chunk in chat_completion:
# Process each chunk as needed
c = chunk.choices[0].delta.content or ""
out += c
if _print:
console.print(c, end="", markup=True)

return out
else:
chat_completion = completion(
model=model,
messages=messages,
temperature=temperature,
stream=False,
)

return chat_completion.choices[0].message.content


def wisper(audio_file):
Expand Down
1 change: 0 additions & 1 deletion src/heygpt/serve.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ def local_css(file_name="", style=""):

# React to user input
if chat_input := st.chat_input("What is up?"):

# set stream to True if model is OpenAI
stream = True
if user_model.startswith("o1-"):
Expand Down
2 changes: 1 addition & 1 deletion src/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "heygptcli"
version = "0.3.2"
version = "0.3.3"
description = ""
authors = ["Rishang <rishang@localhost.com>"]
readme = "README.md"
Expand Down

0 comments on commit 413b380

Please sign in to comment.