Skip to content

Commit

Permalink
fix: fix typo (#1870)
Browse files Browse the repository at this point in the history
Co-authored-by: Kevin Lin <kevinlin@Kevins-MacBook-Pro.local>
  • Loading branch information
kl2806 and Kevin Lin authored Oct 11, 2024
1 parent 8fc8c55 commit 30ff274
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion letta/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def _get_ai_reply(
first_message=first_message,
# streaming
stream=stream,
stream_inferface=self.interface,
stream_interface=self.interface,
# putting inner thoughts in func args or not
inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs_option,
)
Expand Down
26 changes: 13 additions & 13 deletions letta/llm_api/llm_api_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def create(
use_tool_naming: bool = True,
# streaming?
stream: bool = False,
stream_inferface: Optional[Union[AgentRefreshStreamingInterface, AgentChunkStreamingInterface]] = None,
stream_interface: Optional[Union[AgentRefreshStreamingInterface, AgentChunkStreamingInterface]] = None,
# TODO move to llm_config?
# if unspecified (None), default to something we've tested
inner_thoughts_in_kwargs_option: OptionState = OptionState.DEFAULT,
Expand Down Expand Up @@ -149,28 +149,28 @@ def create(

if stream: # Client requested token streaming
data.stream = True
assert isinstance(stream_inferface, AgentChunkStreamingInterface) or isinstance(
stream_inferface, AgentRefreshStreamingInterface
), type(stream_inferface)
assert isinstance(stream_interface, AgentChunkStreamingInterface) or isinstance(
stream_interface, AgentRefreshStreamingInterface
), type(stream_interface)
response = openai_chat_completions_process_stream(
url=llm_config.model_endpoint, # https://api.openai.com/v1 -> https://api.openai.com/v1/chat/completions
api_key=model_settings.openai_api_key,
chat_completion_request=data,
stream_inferface=stream_inferface,
stream_interface=stream_interface,
)
else: # Client did not request token streaming (expect a blocking backend response)
data.stream = False
if isinstance(stream_inferface, AgentChunkStreamingInterface):
stream_inferface.stream_start()
if isinstance(stream_interface, AgentChunkStreamingInterface):
stream_interface.stream_start()
try:
response = openai_chat_completions_request(
url=llm_config.model_endpoint, # https://api.openai.com/v1 -> https://api.openai.com/v1/chat/completions
api_key=model_settings.openai_api_key,
chat_completion_request=data,
)
finally:
if isinstance(stream_inferface, AgentChunkStreamingInterface):
stream_inferface.stream_end()
if isinstance(stream_interface, AgentChunkStreamingInterface):
stream_interface.stream_end()

if inner_thoughts_in_kwargs:
response = unpack_all_inner_thoughts_from_kwargs(response=response, inner_thoughts_key=INNER_THOUGHTS_KWARG)
Expand Down Expand Up @@ -317,8 +317,8 @@ def create(
# They mention that none of the messages can have names, but it seems to not error out (for now)

data.stream = False
if isinstance(stream_inferface, AgentChunkStreamingInterface):
stream_inferface.stream_start()
if isinstance(stream_interface, AgentChunkStreamingInterface):
stream_interface.stream_start()
try:
# groq uses the openai chat completions API, so this component should be reusable
assert model_settings.groq_api_key is not None, "Groq key is missing"
Expand All @@ -328,8 +328,8 @@ def create(
chat_completion_request=data,
)
finally:
if isinstance(stream_inferface, AgentChunkStreamingInterface):
stream_inferface.stream_end()
if isinstance(stream_interface, AgentChunkStreamingInterface):
stream_interface.stream_end()

if inner_thoughts_in_kwargs:
response = unpack_all_inner_thoughts_from_kwargs(response=response, inner_thoughts_key=INNER_THOUGHTS_KWARG)
Expand Down
28 changes: 14 additions & 14 deletions letta/llm_api/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def openai_chat_completions_process_stream(
url: str,
api_key: str,
chat_completion_request: ChatCompletionRequest,
stream_inferface: Optional[Union[AgentChunkStreamingInterface, AgentRefreshStreamingInterface]] = None,
stream_interface: Optional[Union[AgentChunkStreamingInterface, AgentRefreshStreamingInterface]] = None,
create_message_id: bool = True,
create_message_datetime: bool = True,
) -> ChatCompletionResponse:
Expand All @@ -167,7 +167,7 @@ def openai_chat_completions_process_stream(
on the chunks received from the OpenAI-compatible server POST SSE response.
"""
assert chat_completion_request.stream == True
assert stream_inferface is not None, "Required"
assert stream_interface is not None, "Required"

# Count the prompt tokens
# TODO move to post-request?
Expand Down Expand Up @@ -220,8 +220,8 @@ def openai_chat_completions_process_stream(
),
)

if stream_inferface:
stream_inferface.stream_start()
if stream_interface:
stream_interface.stream_start()

n_chunks = 0 # approx == n_tokens
try:
Expand All @@ -230,17 +230,17 @@ def openai_chat_completions_process_stream(
):
assert isinstance(chat_completion_chunk, ChatCompletionChunkResponse), type(chat_completion_chunk)

if stream_inferface:
if isinstance(stream_inferface, AgentChunkStreamingInterface):
stream_inferface.process_chunk(
if stream_interface:
if isinstance(stream_interface, AgentChunkStreamingInterface):
stream_interface.process_chunk(
chat_completion_chunk,
message_id=chat_completion_response.id if create_message_id else chat_completion_chunk.id,
message_date=chat_completion_response.created if create_message_datetime else chat_completion_chunk.created,
)
elif isinstance(stream_inferface, AgentRefreshStreamingInterface):
stream_inferface.process_refresh(chat_completion_response)
elif isinstance(stream_interface, AgentRefreshStreamingInterface):
stream_interface.process_refresh(chat_completion_response)
else:
raise TypeError(stream_inferface)
raise TypeError(stream_interface)

if chunk_idx == 0:
# initialize the choice objects which we will increment with the deltas
Expand Down Expand Up @@ -314,13 +314,13 @@ def openai_chat_completions_process_stream(
n_chunks += 1

except Exception as e:
if stream_inferface:
stream_inferface.stream_end()
if stream_interface:
stream_interface.stream_end()
print(f"Parsing ChatCompletion stream failed with error:\n{str(e)}")
raise e
finally:
if stream_inferface:
stream_inferface.stream_end()
if stream_interface:
stream_interface.stream_end()

# make sure we didn't leave temp stuff in
assert all([c.finish_reason != TEMP_STREAM_FINISH_REASON for c in chat_completion_response.choices])
Expand Down

0 comments on commit 30ff274

Please sign in to comment.