Skip to content

Commit 128676c

Browse files
feat: implement gptme-util CLI for utilities (#261)
* feat(gptme-util): implement tools subcommands - Added tools list and info commands - Added language tag support - Reused existing code from commands.py - Added initial tests for utility CLI - Moved util.py to util/__init__.py for better organization * fix(gptme-util): improve error handling and tests - Added empty list handling in chats_list - Added model validation in tokens_count - Added comprehensive test cases - Skip context test until module is available * refactor: remove duplicate ModelDictMeta definition - Import _ModelDictMeta from models.py instead of duplicating definition - Fixes code duplication identified by pylint * wip * fix: fixed recursive import * docs: fixed docs * docs: added gptme-util to CLI reference * Apply suggestions from code review * refactor: moved stuff around * Apply suggestions from code review Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * build: added pylint command for code duplication checks to Makefile * fix: more refactor, support summarizing conversations with `gptme-util chats list --summarize` * test: fixed tests --------- Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com>
1 parent e4ce6c8 commit 128676c

File tree

13 files changed

+532
-98
lines changed

13 files changed

+532
-98
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ lint:
5252
! grep -r 'ToolUse("python"' ${SRCDIRS}
5353
@# ruff
5454
poetry run ruff check ${RUFF_ARGS}
55-
55+
poetry run pylint --disable=all --enable=duplicate-code gptme/
5656

5757
format:
5858
poetry run ruff check --fix-only ${RUFF_ARGS}

docs/cli.rst

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,7 @@ This is the full CLI reference. For a more concise version, run ``gptme --help``
2121
.. click:: gptme.eval:main
2222
:prog: gptme-eval
2323
:nested: full
24+
25+
.. click:: gptme.util.cli:main
26+
:prog: gptme-util
27+
:nested: full

docs/server.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ It can be started by running the following command:
1313
1414
gptme-server
1515
16-
For more CLI usage, see :ref:`the CLI documentation <cli:gptme-server>`.
16+
For more CLI usage, see the :ref:`CLI reference <cli:gptme-server>`.
1717

1818
There are a few different interfaces available:
1919

gptme/llm_openai_models.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,10 @@
1-
from typing import TypedDict
2-
from typing_extensions import NotRequired
1+
from typing import TYPE_CHECKING
32

3+
if TYPE_CHECKING:
4+
from .models import _ModelDictMeta # fmt: skip
45

5-
class _ModelDictMeta(TypedDict):
6-
context: int
7-
max_output: NotRequired[int]
8-
price_input: NotRequired[float]
9-
price_output: NotRequired[float]
106

11-
12-
OPENAI_MODELS: dict[str, _ModelDictMeta] = {
7+
OPENAI_MODELS: dict[str, "_ModelDictMeta"] = {
138
# GPT-4o
149
"gpt-4o": {
1510
"context": 128_000,

gptme/logmanager.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -331,13 +331,26 @@ def _conversation_files() -> list[Path]:
331331

332332
@dataclass(frozen=True)
333333
class ConversationMeta:
334+
"""Metadata about a conversation."""
335+
334336
name: str
335337
path: str
336338
created: float
337339
modified: float
338340
messages: int
339341
branches: int
340342

343+
def format(self, metadata=False) -> str:
344+
"""Format conversation metadata for display."""
345+
output = f"{self.name}"
346+
if metadata:
347+
output += f"\nMessages: {self.messages}"
348+
output += f"\nCreated: {datetime.fromtimestamp(self.created)}"
349+
output += f"\nModified: {datetime.fromtimestamp(self.modified)}"
350+
if self.branches > 1:
351+
output += f"\n({self.branches} branches)"
352+
return output
353+
341354

342355
def get_conversations() -> Generator[ConversationMeta, None, None]:
343356
"""Returns all conversations, excluding ones used for testing, evals, etc."""
@@ -368,6 +381,23 @@ def get_user_conversations() -> Generator[ConversationMeta, None, None]:
368381
yield conv
369382

370383

384+
def list_conversations(
385+
limit: int = 20,
386+
include_test: bool = False,
387+
) -> list[ConversationMeta]:
388+
"""
389+
List conversations with a limit.
390+
391+
Args:
392+
limit: Maximum number of conversations to return
393+
include_test: Whether to include test conversations
394+
"""
395+
conversation_iter = (
396+
get_conversations() if include_test else get_user_conversations()
397+
)
398+
return list(islice(conversation_iter, limit))
399+
400+
371401
def _gen_read_jsonl(path: PathLike) -> Generator[Message, None, None]:
372402
with open(path) as file:
373403
for line in file.readlines():

gptme/message.py

Lines changed: 24 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,30 @@ def to_xml(self) -> str:
183183
attrs = f"role='{self.role}'"
184184
return f"<message {attrs}>\n{self.content}\n</message>"
185185

186-
def format(self, oneline: bool = False, highlight: bool = False) -> str:
186+
def format(
187+
self,
188+
oneline: bool = False,
189+
highlight: bool = False,
190+
max_length: int | None = None,
191+
) -> str:
192+
"""Format the message for display.
193+
194+
Args:
195+
oneline: Whether to format the message as a single line
196+
highlight: Whether to highlight code blocks
197+
max_length: Maximum length of the message. If None, no truncation is applied.
198+
If set, will truncate at first newline or max_length, whichever comes first.
199+
"""
200+
if max_length is not None:
201+
first_newline = self.content.find("\n")
202+
max_length = (
203+
min(max_length, first_newline) if first_newline != -1 else max_length
204+
)
205+
content = self.content[:max_length]
206+
if len(content) < len(self.content):
207+
content += "..."
208+
temp_msg = self.replace(content=content)
209+
return format_msgs([temp_msg], oneline=True, highlight=highlight)[0]
187210
return format_msgs([self], oneline=oneline, highlight=highlight)[0]
188211

189212
def print(self, oneline: bool = False, highlight: bool = True) -> None:

gptme/prompts.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717
from .__version__ import __version__
1818
from .config import get_config, get_project_config
1919
from .message import Message
20-
from .tools import loaded_tools
2120
from .util import document_prompt_function
2221

2322
PromptType = Literal["full", "short"]
@@ -199,6 +198,8 @@ def prompt_project() -> Generator[Message, None, None]:
199198

200199
def prompt_tools(examples: bool = True) -> Generator[Message, None, None]:
201200
"""Generate the tools overview prompt."""
201+
from .tools import loaded_tools # fmt: skip
202+
202203
assert loaded_tools, "No tools loaded"
203204
prompt = "# Tools Overview"
204205
for tool in loaded_tools:

gptme/tools/chats.py

Lines changed: 30 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -2,32 +2,16 @@
22
List, search, and summarize past conversation logs.
33
"""
44

5-
import itertools
65
import logging
6+
import textwrap
77
from pathlib import Path
8-
from textwrap import indent
9-
from typing import TYPE_CHECKING
108

119
from ..message import Message
1210
from .base import ToolSpec, ToolUse
1311

14-
if TYPE_CHECKING:
15-
from ..logmanager import LogManager
16-
17-
1812
logger = logging.getLogger(__name__)
1913

2014

21-
def _format_message_snippet(msg: Message, max_length: int = 100) -> str:
22-
"""Format a message snippet for display."""
23-
first_newline = msg.content.find("\n")
24-
max_length = min(max_length, first_newline) if first_newline != -1 else max_length
25-
content = msg.content[:max_length]
26-
return f"{msg.role.capitalize()}: {content}" + (
27-
"..." if len(content) <= len(msg.content) else ""
28-
)
29-
30-
3115
def _get_matching_messages(log_manager, query: str, system=False) -> list[Message]:
3216
"""Get messages matching the query."""
3317
return [
@@ -38,36 +22,9 @@ def _get_matching_messages(log_manager, query: str, system=False) -> list[Messag
3822
]
3923

4024

41-
def _summarize_conversation(
42-
log_manager: "LogManager", include_summary: bool
43-
) -> list[str]:
44-
"""Summarize a conversation."""
45-
# noreorder
46-
from ..llm import summarize as llm_summarize # fmt: skip
47-
48-
summary_lines = []
49-
if include_summary:
50-
summary = llm_summarize(log_manager.log.messages)
51-
summary_lines.append(indent(f"Summary: {summary.content}", " "))
52-
else:
53-
non_system_messages = [msg for msg in log_manager.log if msg.role != "system"]
54-
if non_system_messages:
55-
first_msg = non_system_messages[0]
56-
last_msg = non_system_messages[-1]
57-
58-
summary_lines.append(
59-
f" First message: {_format_message_snippet(first_msg)}"
60-
)
61-
if last_msg != first_msg:
62-
summary_lines.append(
63-
f" Last message: {_format_message_snippet(last_msg)}"
64-
)
65-
66-
summary_lines.append(f" Total messages: {len(log_manager.log)}")
67-
return summary_lines
68-
69-
70-
def list_chats(max_results: int = 5, include_summary: bool = False) -> None:
25+
def list_chats(
26+
max_results: int = 5, metadata=False, include_summary: bool = False
27+
) -> None:
7128
"""
7229
List recent chat conversations and optionally summarize them using an LLM.
7330
@@ -77,24 +34,30 @@ def list_chats(max_results: int = 5, include_summary: bool = False) -> None:
7734
If True, uses an LLM to generate a comprehensive summary.
7835
If False, uses a simple strategy showing snippets of the first and last messages.
7936
"""
80-
# noreorder
81-
from ..logmanager import LogManager, get_user_conversations # fmt: skip
37+
from ..llm import summarize # fmt: skip
38+
from ..logmanager import LogManager, list_conversations # fmt: skip
8239

83-
conversations = list(itertools.islice(get_user_conversations(), max_results))
40+
conversations = list_conversations(max_results)
8441
if not conversations:
8542
print("No conversations found.")
8643
return
8744

8845
print(f"Recent conversations (showing up to {max_results}):")
8946
for i, conv in enumerate(conversations, 1):
90-
print(f"\n{i}. {conv.name}")
91-
print(f" Created: {conv.created}")
47+
if metadata:
48+
print() # Add a newline between conversations
49+
print(f"{i:2}. {textwrap.indent(conv.format(metadata=True), ' ')[4:]}")
9250

9351
log_path = Path(conv.path)
9452
log_manager = LogManager.load(log_path)
9553

96-
summary_lines = _summarize_conversation(log_manager, include_summary)
97-
print("\n".join(summary_lines))
54+
# Use the LLM to generate a summary if requested
55+
if include_summary:
56+
summary = summarize(log_manager.log.messages)
57+
print(
58+
f"\n Summary:\n{textwrap.indent(summary.content, ' > ', predicate=lambda _: True)}"
59+
)
60+
print()
9861

9962

10063
def search_chats(query: str, max_results: int = 5, system=False) -> None:
@@ -106,11 +69,10 @@ def search_chats(query: str, max_results: int = 5, system=False) -> None:
10669
max_results (int): Maximum number of conversations to display.
10770
system (bool): Whether to include system messages in the search.
10871
"""
109-
# noreorder
110-
from ..logmanager import LogManager, get_user_conversations # fmt: skip
72+
from ..logmanager import LogManager, list_conversations # fmt: skip
11173

11274
results: list[dict] = []
113-
for conv in get_user_conversations():
75+
for conv in list_conversations(max_results):
11476
log_path = Path(conv.path)
11577
log_manager = LogManager.load(log_path)
11678

@@ -119,37 +81,31 @@ def search_chats(query: str, max_results: int = 5, system=False) -> None:
11981
if matching_messages:
12082
results.append(
12183
{
122-
"conversation": conv.name,
84+
"conversation": conv,
12385
"log_manager": log_manager,
12486
"matching_messages": matching_messages,
12587
}
12688
)
12789

128-
if len(results) >= max_results:
129-
break
130-
131-
# Sort results by the number of matching messages, in descending order
132-
results.sort(key=lambda x: len(x["matching_messages"]), reverse=True)
133-
13490
if not results:
13591
print(f"No results found for query: '{query}'")
13692
return
13793

94+
# Sort results by the number of matching messages, in descending order
95+
results.sort(key=lambda x: len(x["matching_messages"]), reverse=True)
96+
13897
print(f"Search results for query: '{query}'")
13998
print(f"Found matches in {len(results)} conversation(s):")
14099

141100
for i, result in enumerate(results, 1):
142-
print(f"\n{i}. Conversation: {result['conversation']}")
101+
conversation = result["conversation"]
102+
print(f"\n{i}. {conversation.format()}")
143103
print(f" Number of matching messages: {len(result['matching_messages'])}")
144104

145-
summary_lines = _summarize_conversation(
146-
result["log_manager"], include_summary=False
147-
)
148-
print("\n".join(summary_lines))
149-
105+
# Show sample matches
150106
print(" Sample matches:")
151107
for j, msg in enumerate(result["matching_messages"][:3], 1):
152-
print(f" {j}. {_format_message_snippet(msg)}")
108+
print(f" {j}. {msg.format(max_length=100)}")
153109
if len(result["matching_messages"]) > 3:
154110
print(
155111
f" ... and {len(result['matching_messages']) - 3} more matching message(s)"
@@ -165,23 +121,18 @@ def read_chat(conversation: str, max_results: int = 5, incl_system=False) -> Non
165121
max_results (int): Maximum number of messages to display.
166122
incl_system (bool): Whether to include system messages.
167123
"""
168-
# noreorder
169-
from ..logmanager import LogManager, get_conversations # fmt: skip
170-
171-
conversations = list(get_conversations())
124+
from ..logmanager import LogManager, list_conversations # fmt: skip
172125

173-
for conv in conversations:
126+
for conv in list_conversations():
174127
if conv.name == conversation:
175128
log_path = Path(conv.path)
176129
logmanager = LogManager.load(log_path)
177130
print(f"Reading conversation: {conversation}")
178131
i = 0
179132
for msg in logmanager.log:
180133
if msg.role != "system" or incl_system:
181-
print(f"{i}. {_format_message_snippet(msg)}")
134+
print(f"{i}. {msg.format(max_length=100)}")
182135
i += 1
183-
else:
184-
print(f"{i}. (system message)")
185136
if i >= max_results:
186137
break
187138
break

gptme/util.py renamed to gptme/util/__init__.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
"""
2+
Utility package for gptme.
3+
"""
4+
15
import functools
26
import io
37
import logging
@@ -17,7 +21,7 @@
1721
from rich.console import Console
1822
from rich.syntax import Syntax
1923

20-
from .clipboard import copy, set_copytext
24+
from ..clipboard import copy, set_copytext
2125

2226
EMOJI_WARN = "⚠️"
2327

@@ -319,8 +323,8 @@ def decorator(func): # pragma: no cover
319323
return func
320324

321325
# noreorder
322-
from .message import len_tokens # fmt: skip
323-
from .tools import init_tools # fmt: skip
326+
from ..message import len_tokens # fmt: skip
327+
from ..tools import init_tools # fmt: skip
324328

325329
init_tools()
326330

0 commit comments

Comments
 (0)