Skip to content

Commit

Permalink
use gpt-4o-mini (langchain-ai#357)
Browse files Browse the repository at this point in the history
  • Loading branch information
vbarda authored Jul 23, 2024
1 parent 0c093fb commit d4b7e75
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 10 deletions.
5 changes: 3 additions & 2 deletions MODIFY.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,11 @@ The LLM is used inside the `/chat` endpoint for generating the final answer, and
Without any modification, we offer a few LLM providers out of the box:

- `gpt-3.5-turbo-0125` by OpenAI
- `gpt-4o-mini-2024-07-18` by OpenAI
- `claude-3-haiku-20240307` by Anthropic
- `mixtral-8x7b` by Fireworks
- `gemini-pro` by Google
- `llama3-70b-8192` by Groq
- `command` by Cohere

These are all located at the bottom of the [`./backend/chain.py`](./backend/chain.py) file. You have a few options for modifying this:
Expand All @@ -89,7 +90,7 @@ First, I'll demonstrate how to replace all options with a single provider, as it

```python
llm = ChatOpenAI(
model="gpt-3.5-turbo-0125",
model="gpt-4o-mini-2024-07-18",
streaming=True,
temperature=0,
).configurable_alternatives(
Expand Down
8 changes: 4 additions & 4 deletions backend/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@
Standalone Question:"""


OPENAI_MODEL_KEY = "openai_gpt_3_5_turbo"
OPENAI_MODEL_KEY = "openai_gpt_4o_mini"
ANTHROPIC_MODEL_KEY = "anthropic_claude_3_haiku"
FIREWORKS_MIXTRAL_MODEL_KEY = "fireworks_mixtral"
GOOGLE_MODEL_KEY = "google_gemini_pro"
Expand Down Expand Up @@ -134,7 +134,7 @@ class AgentState(TypedDict):
feedback_urls: dict[str, list[str]]


gpt_3_5 = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0, streaming=True)
gpt_4o_mini = ChatOpenAI(model="gpt-4o-mini-2024-07-18", temperature=0, streaming=True)
claude_3_haiku = ChatAnthropic(
model="claude-3-haiku-20240307",
temperature=0,
Expand Down Expand Up @@ -164,7 +164,7 @@ class AgentState(TypedDict):
temperature=0,
groq_api_key=os.environ.get("GROQ_API_KEY", "not_provided"),
)
llm = gpt_3_5.configurable_alternatives(
llm = gpt_4o_mini.configurable_alternatives(
# This gives this field an id
# When configuring the end runnable, we can then use this id to configure this field
ConfigurableField(id="model_name"),
Expand All @@ -178,7 +178,7 @@ class AgentState(TypedDict):
},
).with_fallbacks(
[
gpt_3_5,
gpt_4o_mini,
claude_3_haiku,
fireworks_mixtral,
gemini_pro,
Expand Down
8 changes: 4 additions & 4 deletions frontend/app/components/ChatWindow.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import { useLangGraphClient } from "../hooks/useLangGraphClient";
import { useStreamState } from "../hooks/useStreamState";
import { useLocalStorage } from "../hooks/useLocalStorage";

const MODEL_TYPES = ["openai_gpt_3_5_turbo", "anthropic_claude_3_haiku"];
const MODEL_TYPES = ["openai_gpt_4o_mini", "anthropic_claude_3_haiku"];

const defaultLlmValue =
MODEL_TYPES[Math.floor(Math.random() * MODEL_TYPES.length)];
Expand All @@ -59,7 +59,7 @@ export function ChatWindow() {
const [input, setInput] = useState("");
const [isLoading, setIsLoading] = useState(false);
const [llm, setLlm] = useState(
searchParams.get("llm") ?? "openai_gpt_3_5_turbo",
searchParams.get("llm") ?? "openai_gpt_4o_mini",
);
const [llmIsLoading, setLlmIsLoading] = useState(true);
const [assistantId, setAssistantId] = useState<string>("");
Expand Down Expand Up @@ -316,8 +316,8 @@ export function ChatWindow() {
}}
width={"240px"}
>
<option value="openai_gpt_3_5_turbo">
GPT-3.5-Turbo
<option value="openai_gpt_4o_mini">
GPT-4o Mini
</option>
<option value="anthropic_claude_3_haiku">
Claude 3 Haiku
Expand Down

0 comments on commit d4b7e75

Please sign in to comment.