Skip to content

Commit 25ba941

Browse files
authored
Merge pull request #1561 from KennyDizi/main
Support reasoning effort via configuration
2 parents c15ed62 + d097266 commit 25ba941

File tree

5 files changed

+33
-2
lines changed

5 files changed

+33
-2
lines changed

docs/docs/usage-guide/changing_a_model.md

+9
Original file line numberDiff line numberDiff line change
@@ -204,3 +204,12 @@ custom_model_max_tokens= ...
204204

205205
4. Most reasoning models do not support chat-style inputs (`system` and `user` messages) or temperature settings.
206206
To bypass chat templates and temperature controls, set `config.custom_reasoning_model = true` in your configuration file.
207+
208+
## Dedicated parameters
209+
210+
### OpenAI models
211+
212+
[config]
213+
reasoning_efffort= = "medium" # "low", "medium", "high"
214+
215+
With the OpenAI models that support reasoning effort (eg: o3-mini), you can specify its reasoning effort via `config` section. The default value is `medium`. You can change it to `high` or `low` based on your usage.

pr_agent/algo/__init__.py

+5
Original file line numberDiff line numberDiff line change
@@ -104,3 +104,8 @@
104104
"o3-mini-2025-01-31",
105105
"o1-preview"
106106
]
107+
108+
SUPPORT_REASONING_EFFORT_MODELS = [
109+
"o3-mini",
110+
"o3-mini-2025-01-31"
111+
]

pr_agent/algo/ai_handlers/litellm_ai_handler.py

+13-2
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
from litellm import acompletion
77
from tenacity import retry, retry_if_exception_type, stop_after_attempt
88

9-
from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, USER_MESSAGE_ONLY_MODELS
9+
from pr_agent.algo import NO_SUPPORT_TEMPERATURE_MODELS, SUPPORT_REASONING_EFFORT_MODELS, USER_MESSAGE_ONLY_MODELS
1010
from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
11-
from pr_agent.algo.utils import get_version
11+
from pr_agent.algo.utils import ReasoningEffort, get_version
1212
from pr_agent.config_loader import get_settings
1313
from pr_agent.log import get_logger
1414

@@ -101,6 +101,9 @@ def __init__(self):
101101
# Model that doesn't support temperature argument
102102
self.no_support_temperature_models = NO_SUPPORT_TEMPERATURE_MODELS
103103

104+
# Models that support reasoning effort
105+
self.support_reasoning_models = SUPPORT_REASONING_EFFORT_MODELS
106+
104107
def prepare_logs(self, response, system, user, resp, finish_reason):
105108
response_log = response.dict().copy()
106109
response_log['system'] = system
@@ -228,8 +231,16 @@ async def chat_completion(self, model: str, system: str, user: str, temperature:
228231

229232
# Add temperature only if model supports it
230233
if model not in self.no_support_temperature_models and not get_settings().config.custom_reasoning_model:
234+
get_logger().info(f"Adding temperature with value {temperature} to model {model}.")
231235
kwargs["temperature"] = temperature
232236

237+
# Add reasoning_effort if model supports it
238+
if (model in self.support_reasoning_models):
239+
supported_reasoning_efforts = [ReasoningEffort.HIGH.value, ReasoningEffort.MEDIUM.value, ReasoningEffort.LOW.value]
240+
reasoning_effort = get_settings().config.reasoning_effort if (get_settings().config.reasoning_effort in supported_reasoning_efforts) else ReasoningEffort.MEDIUM.value
241+
get_logger().info(f"Adding reasoning_effort with value {reasoning_effort} to model {model}.")
242+
kwargs["reasoning_effort"] = reasoning_effort
243+
233244
if get_settings().litellm.get("enable_callbacks", False):
234245
kwargs = self.add_litellm_callbacks(kwargs)
235246

pr_agent/algo/utils.py

+5
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,11 @@ class PRReviewHeader(str, Enum):
5050
REGULAR = "## PR Reviewer Guide"
5151
INCREMENTAL = "## Incremental PR Reviewer Guide"
5252

53+
class ReasoningEffort(str, Enum):
54+
HIGH = "high"
55+
MEDIUM = "medium"
56+
LOW = "low"
57+
5358

5459
class PRDescriptionHeader(str, Enum):
5560
CHANGES_WALKTHROUGH = "### **Changes walkthrough** 📝"

pr_agent/settings/configuration.toml

+1
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ ignore_pr_authors = [] # authors to ignore from PR agent when an PR is created
4848
#
4949
is_auto_command = false # will be auto-set to true if the command is triggered by an automation
5050
enable_ai_metadata = false # will enable adding ai metadata
51+
reasoning_effort = "medium" # "low", "medium", "high"
5152
# auto approval 💎
5253
enable_auto_approval=false # Set to true to enable auto-approval of PRs under certain conditions
5354
auto_approve_for_low_review_effort=-1 # -1 to disable, [1-5] to set the threshold for auto-approval

0 commit comments

Comments
 (0)