Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/help.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ Options:
--cid, --conversation TEXT Continue the conversation with the given ID.
--key TEXT API key to use
--save TEXT Save prompt with this template name
-r, --rich Format output as rich markdown text
--help Show this message and exit.
```

Expand All @@ -119,6 +120,7 @@ Options:
-o, --option <TEXT TEXT>... key/value options for the model
--no-stream Do not stream output
--key TEXT API key to use
-r, --rich Format output as rich markdown text
--help Show this message and exit.
```

Expand Down
51 changes: 40 additions & 11 deletions llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@
import base64
import pathlib
import pydantic
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
from runpy import run_module
import shutil
import sqlite_utils
Expand All @@ -45,6 +48,8 @@

DEFAULT_TEMPLATE = "prompt: "

console = Console()


def _validate_metadata_json(ctx, param, value):
if value is None:
Expand Down Expand Up @@ -121,6 +126,13 @@ def cli():
)
@click.option("--key", help="API key to use")
@click.option("--save", help="Save prompt with this template name")
@click.option(
"--rich",
"-r",
is_flag=True,
default=False,
help="Format output as rich markdown text",
)
def prompt(
prompt,
system,
Expand All @@ -135,6 +147,7 @@ def prompt(
conversation_id,
key,
save,
rich,
):
"""
Execute a prompt
Expand Down Expand Up @@ -272,13 +285,7 @@ def read_prompt():

try:
response = prompt_method(prompt, system, **validated_options)
if should_stream:
for chunk in response:
print(chunk, end="")
sys.stdout.flush()
print("")
else:
print(response.text())
print_response(response=response, stream=should_stream, rich=rich)
except Exception as ex:
raise click.ClickException(str(ex))

Expand Down Expand Up @@ -326,6 +333,13 @@ def read_prompt():
)
@click.option("--no-stream", is_flag=True, help="Do not stream output")
@click.option("--key", help="API key to use")
@click.option(
"--rich",
"-r",
is_flag=True,
default=False,
help="Format output as rich markdown text",
)
def chat(
system,
model_id,
Expand All @@ -336,6 +350,7 @@ def chat(
options,
no_stream,
key,
rich,
):
"""
Hold an ongoing chat with a model.
Expand Down Expand Up @@ -435,11 +450,9 @@ def chat(
response = conversation.prompt(prompt, system, **validated_options)
# System prompt only sent for the first message:
system = None
for chunk in response:
print(chunk, end="")
sys.stdout.flush()
print_response(response=response, stream=True, rich=rich)
response.log_to_db(db)
print("")
console.print("")


def load_conversation(conversation_id: Optional[str]) -> Optional[Conversation]:
Expand Down Expand Up @@ -1641,3 +1654,19 @@ def _human_readable_size(size_bytes):

def logs_on():
return not (user_dir() / "logs-off").exists()


def print_response(response: Response, stream: bool = True, rich: bool = False):
if stream is True and rich is False:
for chunk in response:
console.print(chunk, end="")
elif stream is True and rich is True:
md = ""
with Live(Markdown(""), console=console) as live:
for chunk in response:
md += chunk
live.update(Markdown(md))
elif stream is False and rich is True:
console.print(Markdown(response.text()))
else:
console.print(response.text())
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ def get_long_description():
"python-ulid",
"setuptools",
"pip",
"rich",
],
extras_require={
"test": [
Expand Down