-
Notifications
You must be signed in to change notification settings - Fork 5.5k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
0f27029
commit 6d1ab98
Showing
4 changed files
with
81 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
llm: | ||
api_type: "openai" # or azure / ollama / groq etc. | ||
base_url: "YOUR_gpt-3.5-turbo_BASE_URL" | ||
api_key: "YOUR_gpt-3.5-turbo_API_KEY" | ||
model: "gpt-3.5-turbo" # or gpt-3.5-turbo | ||
# proxy: "YOUR_gpt-3.5-turbo_PROXY" # for LLM API requests | ||
# timeout: 600 # Optional. If set to 0, default value is 300. | ||
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/ | ||
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's | ||
|
||
models: | ||
"YOUR_MODEL_NAME_1": # model: "gpt-4-turbo" # or gpt-3.5-turbo | ||
api_type: "openai" # or azure / ollama / groq etc. | ||
base_url: "YOUR_MODEL_1_BASE_URL" | ||
api_key: "YOUR_MODEL_1_API_KEY" | ||
# proxy: "YOUR_MODEL_1_PROXY" # for LLM API requests | ||
# timeout: 600 # Optional. If set to 0, default value is 300. | ||
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/ | ||
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's | ||
"YOUR_MODEL_NAME_2": # model: "gpt-4-turbo" # or gpt-3.5-turbo | ||
api_type: "openai" # or azure / ollama / groq etc. | ||
base_url: "YOUR_MODEL_2_BASE_URL" | ||
api_key: "YOUR_MODEL_2_API_KEY" | ||
proxy: "YOUR_MODEL_2_PROXY" # for LLM API requests | ||
# timeout: 600 # Optional. If set to 0, default value is 300. | ||
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/ | ||
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
import pytest | ||
|
||
|
||
from metagpt.config2 import Config | ||
from metagpt.roles.role import Role, RoleReactMode | ||
from metagpt.actions.action import Action | ||
from metagpt.context import Context | ||
from metagpt.provider.llm_provider_registry import create_llm_instance | ||
from metagpt.const import TEST_DATA_PATH | ||
|
||
def test_set_llm(): | ||
config1 = Config.default() | ||
config2 = Config.default() | ||
config2.llm.model = "gpt-3.5-turbo" | ||
|
||
context = Context(config=config1) | ||
act = Action(context=context) | ||
assert act.config.llm.model == config1.llm.model | ||
|
||
llm2 = create_llm_instance(config2.llm) | ||
act.llm = llm2 | ||
assert act.llm.model == llm2.model | ||
|
||
role = Role(context=context) | ||
role.set_actions([act]) | ||
assert act.llm.model == llm2.model | ||
|
||
role1 = Role(context=context) | ||
act1 = Action(context=context) | ||
assert act1.config.llm.model == config1.llm.model | ||
act1.config = config2 | ||
role1.set_actions([act1]) | ||
assert act1.llm.model == llm2.model | ||
|
||
# multiple LLM | ||
|
||
config3_path = TEST_DATA_PATH / "config/config2_multi_llm.yaml" | ||
dict3 = Config.read_yaml(config3_path) | ||
config3 = Config(**dict3) | ||
context3 = Context(config=config3) | ||
role3 = Role(context=context3) | ||
act3 = Action(context=context3, llm_name_or_type="YOUR_MODEL_NAME_1") | ||
assert act3.config.llm.model == "gpt-3.5-turbo" | ||
assert act3.llm.model == "gpt-4-turbo" | ||
role3.set_actions([act3]) | ||
assert act3.config.llm.model == "gpt-3.5-turbo" | ||
assert act3.llm.model == "gpt-4-turbo" | ||
|
||
|
||
|
||
|
||
|
||
|