Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions codex-rs/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

28 changes: 28 additions & 0 deletions codex-rs/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,34 @@ Users can specify config values at multiple levels. Order of precedence is as fo
3. as an entry in `config.toml`, e.g., `model = "o3"`
4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `codex-mini-latest`)

## model_reasoning_effort

If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:

- `"low"`
- `"medium"` (default)
- `"high"`

To disable reasoning, set `model_reasoning_effort` to `"none"` in your config:

```toml
model_reasoning_effort = "none" # disable reasoning
```

## model_reasoning_summary

If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries), this can be set to:

- `"auto"` (default)
- `"concise"`
- `"detailed"`

To disable reasoning summaries, set `model_reasoning_summary` to `"none"` in your config:

```toml
model_reasoning_summary = "none" # disable reasoning summaries
```

## sandbox_permissions

List of permissions to grant to the sandbox that Codex uses to execute untrusted commands:
Expand Down
2 changes: 2 additions & 0 deletions codex-rs/core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ rand = "0.9"
reqwest = { version = "0.12", features = ["json", "stream"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
strum = "0.27.1"
strum_macros = "0.27.1"
thiserror = "2.0.12"
time = { version = "0.3", features = ["formatting", "local-offset", "macros"] }
tokio = { version = "1", features = [
Expand Down
26 changes: 17 additions & 9 deletions codex-rs/core/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,13 @@ use tracing::warn;

use crate::chat_completions::AggregateStreamExt;
use crate::chat_completions::stream_chat_completions;
use crate::client_common::Payload;
use crate::client_common::Prompt;
use crate::client_common::Reasoning;
use crate::client_common::ResponseEvent;
use crate::client_common::ResponseStream;
use crate::client_common::Summary;
use crate::client_common::ResponsesApiRequest;
use crate::client_common::create_reasoning_param_for_request;
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::CodexErr;
use crate::error::EnvVarError;
use crate::error::Result;
Expand All @@ -41,14 +42,23 @@ pub struct ModelClient {
model: String,
client: reqwest::Client,
provider: ModelProviderInfo,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
}

impl ModelClient {
pub fn new(model: impl ToString, provider: ModelProviderInfo) -> Self {
pub fn new(
model: impl ToString,
provider: ModelProviderInfo,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Self {
Self {
model: model.to_string(),
client: reqwest::Client::new(),
provider,
effort,
summary,
}
}

Expand Down Expand Up @@ -98,17 +108,15 @@ impl ModelClient {

let full_instructions = prompt.get_full_instructions();
let tools_json = create_tools_json_for_responses_api(prompt, &self.model)?;
let payload = Payload {
let reasoning = create_reasoning_param_for_request(&self.model, self.effort, self.summary);
let payload = ResponsesApiRequest {
model: &self.model,
instructions: &full_instructions,
input: &prompt.input,
tools: &tools_json,
tool_choice: "auto",
parallel_tool_calls: false,
reasoning: Some(Reasoning {
effort: "high",
summary: Some(Summary::Auto),
}),
reasoning,
previous_response_id: prompt.prev_id.clone(),
store: prompt.store,
stream: true,
Expand Down
84 changes: 77 additions & 7 deletions codex-rs/core/src/client_common.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use crate::config_types::ReasoningEffort as ReasoningEffortConfig;
use crate::config_types::ReasoningSummary as ReasoningSummaryConfig;
use crate::error::Result;
use crate::models::ResponseItem;
use futures::Stream;
Expand Down Expand Up @@ -52,25 +54,59 @@ pub enum ResponseEvent {

#[derive(Debug, Serialize)]
pub(crate) struct Reasoning {
pub(crate) effort: &'static str,
pub(crate) effort: OpenAiReasoningEffort,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) summary: Option<Summary>,
pub(crate) summary: Option<OpenAiReasoningSummary>,
}

/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum OpenAiReasoningEffort {
Low,
#[default]
Medium,
High,
}

impl From<ReasoningEffortConfig> for Option<OpenAiReasoningEffort> {
fn from(effort: ReasoningEffortConfig) -> Self {
match effort {
ReasoningEffortConfig::Low => Some(OpenAiReasoningEffort::Low),
ReasoningEffortConfig::Medium => Some(OpenAiReasoningEffort::Medium),
ReasoningEffortConfig::High => Some(OpenAiReasoningEffort::High),
ReasoningEffortConfig::None => None,
}
}
}

/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
#[derive(Debug, Serialize)]
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Default, Clone, Copy)]
#[serde(rename_all = "lowercase")]
pub(crate) enum Summary {
pub(crate) enum OpenAiReasoningSummary {
#[default]
Auto,
#[allow(dead_code)] // Will go away once this is configurable.
Concise,
#[allow(dead_code)] // Will go away once this is configurable.
Detailed,
}

impl From<ReasoningSummaryConfig> for Option<OpenAiReasoningSummary> {
fn from(summary: ReasoningSummaryConfig) -> Self {
match summary {
ReasoningSummaryConfig::Auto => Some(OpenAiReasoningSummary::Auto),
ReasoningSummaryConfig::Concise => Some(OpenAiReasoningSummary::Concise),
ReasoningSummaryConfig::Detailed => Some(OpenAiReasoningSummary::Detailed),
ReasoningSummaryConfig::None => None,
}
}
}

/// Request object that is serialized as JSON and POST'ed when using the
/// Responses API.
#[derive(Debug, Serialize)]
pub(crate) struct Payload<'a> {
pub(crate) struct ResponsesApiRequest<'a> {
pub(crate) model: &'a str,
pub(crate) instructions: &'a str,
// TODO(mbolin): ResponseItem::Other should not be serialized. Currently,
Expand All @@ -88,6 +124,40 @@ pub(crate) struct Payload<'a> {
pub(crate) stream: bool,
}

pub(crate) fn create_reasoning_param_for_request(
model: &str,
effort: ReasoningEffortConfig,
summary: ReasoningSummaryConfig,
) -> Option<Reasoning> {
let effort: Option<OpenAiReasoningEffort> = effort.into();
let effort = effort?;

if model_supports_reasoning_summaries(model) {
Some(Reasoning {
effort,
summary: summary.into(),
})
} else {
None
}
}

pub fn model_supports_reasoning_summaries(model: &str) -> bool {
// Currently, we hardcode this rule to decide whether enable reasoning.
// We expect reasoning to apply only to OpenAI models, but we do not want
// users to have to mess with their config to disable reasoning for models
// that do not support it, such as `gpt-4.1`.
//
// Though if a user is using Codex with non-OpenAI models that, say, happen
// to start with "o", then they can set `model_reasoning_effort = "none` in
// config.toml to disable reasoning.
//
// Ultimately, this should also be configurable in config.toml, but we
// need to have defaults that "just work." Perhaps we could have a
// "reasoning models pattern" as part of ModelProviderInfo?
model.starts_with("o") || model.starts_with("codex")
}

pub(crate) struct ResponseStream {
pub(crate) rx_event: mpsc::Receiver<Result<ResponseEvent>>,
}
Expand Down
11 changes: 10 additions & 1 deletion codex-rs/core/src/codex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ impl Codex {
let configure_session = Op::ConfigureSession {
provider: config.model_provider.clone(),
model: config.model.clone(),
model_reasoning_effort: config.model_reasoning_effort,
model_reasoning_summary: config.model_reasoning_summary,
instructions,
approval_policy: config.approval_policy,
sandbox_policy: config.sandbox_policy.clone(),
Expand Down Expand Up @@ -554,6 +556,8 @@ async fn submission_loop(
Op::ConfigureSession {
provider,
model,
model_reasoning_effort,
model_reasoning_summary,
instructions,
approval_policy,
sandbox_policy,
Expand All @@ -575,7 +579,12 @@ async fn submission_loop(
return;
}

let client = ModelClient::new(model.clone(), provider.clone());
let client = ModelClient::new(
model.clone(),
provider.clone(),
model_reasoning_effort,
model_reasoning_summary,
);

// abort any current running session and clone its state
let retain_zdr_transcript =
Expand Down
21 changes: 21 additions & 0 deletions codex-rs/core/src/config.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
use crate::config_profile::ConfigProfile;
use crate::config_types::History;
use crate::config_types::McpServerConfig;
use crate::config_types::ReasoningEffort;
use crate::config_types::ReasoningSummary;
use crate::config_types::ShellEnvironmentPolicy;
use crate::config_types::ShellEnvironmentPolicyToml;
use crate::config_types::Tui;
Expand Down Expand Up @@ -112,6 +114,14 @@ pub struct Config {
///
/// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
pub codex_linux_sandbox_exe: Option<PathBuf>,

/// If not "none", the value to use for `reasoning.effort` when making a
/// request using the Responses API.
pub model_reasoning_effort: ReasoningEffort,

/// If not "none", the value to use for `reasoning.summary` when making a
/// request using the Responses API.
pub model_reasoning_summary: ReasoningSummary,
}

impl Config {
Expand Down Expand Up @@ -281,6 +291,9 @@ pub struct ConfigToml {
/// When set to `true`, `AgentReasoning` events will be hidden from the
/// UI/output. Defaults to `false`.
pub hide_agent_reasoning: Option<bool>,

pub model_reasoning_effort: Option<ReasoningEffort>,
pub model_reasoning_summary: Option<ReasoningSummary>,
}

fn deserialize_sandbox_permissions<'de, D>(
Expand Down Expand Up @@ -444,6 +457,8 @@ impl Config {
codex_linux_sandbox_exe,

hide_agent_reasoning: cfg.hide_agent_reasoning.unwrap_or(false),
model_reasoning_effort: cfg.model_reasoning_effort.unwrap_or_default(),
model_reasoning_summary: cfg.model_reasoning_summary.unwrap_or_default(),
};
Ok(config)
}
Expand Down Expand Up @@ -786,6 +801,8 @@ disable_response_storage = true
tui: Tui::default(),
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
},
o3_profile_config
);
Expand Down Expand Up @@ -826,6 +843,8 @@ disable_response_storage = true
tui: Tui::default(),
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
};

assert_eq!(expected_gpt3_profile_config, gpt3_profile_config);
Expand Down Expand Up @@ -881,6 +900,8 @@ disable_response_storage = true
tui: Tui::default(),
codex_linux_sandbox_exe: None,
hide_agent_reasoning: false,
model_reasoning_effort: ReasoningEffort::default(),
model_reasoning_summary: ReasoningSummary::default(),
};

assert_eq!(expected_zdr_profile_config, zdr_profile_config);
Expand Down
30 changes: 30 additions & 0 deletions codex-rs/core/src/config_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,11 @@
// definitions that do not contain business logic.

use std::collections::HashMap;
use strum_macros::Display;
use wildmatch::WildMatchPattern;

use serde::Deserialize;
use serde::Serialize;

#[derive(Deserialize, Debug, Clone, PartialEq)]
pub struct McpServerConfig {
Expand Down Expand Up @@ -175,3 +177,31 @@ impl From<ShellEnvironmentPolicyToml> for ShellEnvironmentPolicy {
}
}
}

/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningEffort {
Low,
#[default]
Medium,
High,
/// Option to disable reasoning.
None,
}

/// A summary of the reasoning performed by the model. This can be useful for
/// debugging and understanding the model's reasoning process.
/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningSummary {
#[default]
Auto,
Concise,
Detailed,
/// Option to disable reasoning summaries.
None,
}
2 changes: 2 additions & 0 deletions codex-rs/core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,5 @@ mod rollout;
mod safety;
mod user_notification;
pub mod util;

pub use client_common::model_supports_reasoning_summaries;
Loading