Skip to content

Commit

Permalink
Rename additional context for llm response to program execution context
Browse files Browse the repository at this point in the history
  • Loading branch information
sabaimran committed Dec 5, 2024
1 parent 886fe4a commit 8953ac0
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 24 deletions.
4 changes: 2 additions & 2 deletions src/khoj/processor/conversation/anthropic/anthropic_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def converse_anthropic(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None,
additional_context_for_llm_response: Optional[List[str]] = None,
program_execution_context: Optional[List[str]] = None,
tracer: dict = {},
):
"""
Expand Down Expand Up @@ -224,7 +224,7 @@ def converse_anthropic(
generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files,
generated_images=generated_images,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
)

messages, system_prompt = format_messages_for_anthropic(messages, system_prompt)
Expand Down
4 changes: 2 additions & 2 deletions src/khoj/processor/conversation/google/gemini_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def converse_gemini(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None,
additional_context_for_llm_response: List[str] = None,
program_execution_context: List[str] = None,
tracer={},
):
"""
Expand Down Expand Up @@ -235,7 +235,7 @@ def converse_gemini(
generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files,
generated_images=generated_images,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
)

messages, system_prompt = format_messages_for_gemini(messages, system_prompt)
Expand Down
2 changes: 1 addition & 1 deletion src/khoj/processor/conversation/offline/chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def converse_offline(
model_type=ChatModelOptions.ModelType.OFFLINE,
query_files=query_files,
generated_files=generated_files,
additional_context_for_llm_response=additional_context,
program_execution_context=additional_context,
)

logger.debug(f"Conversation Context for {model}: {messages_to_print(messages)}")
Expand Down
4 changes: 2 additions & 2 deletions src/khoj/processor/conversation/openai/gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def converse(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: Optional[str] = None,
additional_context_for_llm_response: List[str] = None,
program_execution_context: List[str] = None,
tracer: dict = {},
):
"""
Expand Down Expand Up @@ -226,7 +226,7 @@ def converse(
generated_excalidraw_diagram=generated_excalidraw_diagram,
generated_files=generated_files,
generated_images=generated_images,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
)
logger.debug(f"Conversation Context for GPT: {messages_to_print(messages)}")

Expand Down
5 changes: 2 additions & 3 deletions src/khoj/processor/conversation/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -1043,12 +1043,11 @@

additional_program_context = PromptTemplate.from_template(
"""
Here's some additional context about what happened while I was executing this query:
Here are some additional results from the query execution:
{context}
""".strip()
""".strip()
)


personality_prompt_safety_expert_lax = PromptTemplate.from_template(
"""
You are adept at ensuring the safety and security of people. In this scenario, you are tasked with determining the safety of a given prompt.
Expand Down
8 changes: 3 additions & 5 deletions src/khoj/processor/conversation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def generate_chatml_messages_with_context(
generated_images: Optional[list[str]] = None,
generated_files: List[FileAttachment] = None,
generated_excalidraw_diagram: str = None,
additional_context_for_llm_response: List[str] = [],
program_execution_context: List[str] = [],
):
"""Generate chat messages with appropriate context from previous conversation to send to the chat model"""
# Set max prompt size from user config or based on pre-configured for model and machine specs
Expand Down Expand Up @@ -485,12 +485,10 @@ def generate_chatml_messages_with_context(
if generated_excalidraw_diagram:
messages.append(ChatMessage(content=prompts.generated_diagram_attachment.format(), role="assistant"))

if additional_context_for_llm_response:
if program_execution_context:
messages.append(
ChatMessage(
content=prompts.additional_program_context.format(
context="\n".join(additional_context_for_llm_response)
),
content=prompts.additional_program_context.format(context="\n".join(program_execution_context)),
role="assistant",
)
)
Expand Down
10 changes: 5 additions & 5 deletions src/khoj/routers/api_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -774,7 +774,7 @@ def collect_telemetry():
generated_images: List[str] = []
generated_files: List[FileAttachment] = []
generated_excalidraw_diagram: str = None
additional_context_for_llm_response: List[str] = []
program_execution_context: List[str] = []

if conversation_commands == [ConversationCommand.Default] or is_automated_task:
chosen_io = await aget_data_sources_and_output_format(
Expand Down Expand Up @@ -1080,7 +1080,7 @@ def collect_telemetry():
async for result in send_event(ChatEvent.STATUS, f"**Ran code snippets**: {len(code_results)}"):
yield result
except ValueError as e:
additional_context_for_llm_response.append(f"Failed to run code")
program_execution_context.append(f"Failed to run code")
logger.warning(
f"Failed to use code tool: {e}. Attempting to respond without code results",
exc_info=True,
Expand Down Expand Up @@ -1122,7 +1122,7 @@ def collect_telemetry():

inferred_queries.append(improved_image_prompt)
if generated_image is None or status_code != 200:
additional_context_for_llm_response.append(f"Failed to generate image with {improved_image_prompt}")
program_execution_context.append(f"Failed to generate image with {improved_image_prompt}")
async for result in send_event(ChatEvent.STATUS, f"Failed to generate image"):
yield result
else:
Expand Down Expand Up @@ -1175,7 +1175,7 @@ def collect_telemetry():
yield result
else:
error_message = "Failed to generate diagram. Please try again later."
additional_context_for_llm_response.append(
program_execution_context.append(
f"AI attempted to programmatically generate a diagram but failed due to a program issue. Generally, it is able to do so, but encountered a system issue this time. AI can suggest text description or rendering of the diagram or user can try again with a simpler prompt."
)

Expand Down Expand Up @@ -1208,7 +1208,7 @@ def collect_telemetry():
generated_images,
generated_files,
generated_excalidraw_diagram,
additional_context_for_llm_response,
program_execution_context,
tracer,
)

Expand Down
8 changes: 4 additions & 4 deletions src/khoj/routers/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1188,7 +1188,7 @@ def generate_chat_response(
generated_images: List[str] = None,
raw_generated_files: List[FileAttachment] = [],
generated_excalidraw_diagram: str = None,
additional_context_for_llm_response: List[str] = [],
program_execution_context: List[str] = [],
tracer: dict = {},
) -> Tuple[Union[ThreadedGenerator, Iterator[str]], Dict[str, str]]:
# Initialize Variables
Expand Down Expand Up @@ -1280,7 +1280,7 @@ def generate_chat_response(
generated_files=raw_generated_files,
generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
tracer=tracer,
)

Expand All @@ -1307,7 +1307,7 @@ def generate_chat_response(
generated_files=raw_generated_files,
generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
tracer=tracer,
)
elif conversation_config.model_type == ChatModelOptions.ModelType.GOOGLE:
Expand All @@ -1333,7 +1333,7 @@ def generate_chat_response(
generated_files=raw_generated_files,
generated_images=generated_images,
generated_excalidraw_diagram=generated_excalidraw_diagram,
additional_context_for_llm_response=additional_context_for_llm_response,
program_execution_context=program_execution_context,
tracer=tracer,
)

Expand Down

0 comments on commit 8953ac0

Please sign in to comment.