diff --git a/codex-rs/core/src/models_manager/manager.rs b/codex-rs/core/src/models_manager/manager.rs index a751847055f..e35b942c082 100644 --- a/codex-rs/core/src/models_manager/manager.rs +++ b/codex-rs/core/src/models_manager/manager.rs @@ -29,7 +29,7 @@ use crate::models_manager::model_presets::builtin_model_presets; const MODEL_CACHE_FILE: &str = "models_cache.json"; const DEFAULT_MODEL_CACHE_TTL: Duration = Duration::from_secs(300); const MODELS_REFRESH_TIMEOUT: Duration = Duration::from_secs(5); -const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.1-codex-max"; +const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.2-codex"; const OPENAI_DEFAULT_CHATGPT_MODEL: &str = "gpt-5.2-codex"; const CODEX_AUTO_BALANCED_MODEL: &str = "codex-auto-balanced"; diff --git a/codex-rs/core/src/models_manager/model_presets.rs b/codex-rs/core/src/models_manager/model_presets.rs index 003995c8959..e60b229ec7b 100644 --- a/codex-rs/core/src/models_manager/model_presets.rs +++ b/codex-rs/core/src/models_manager/model_presets.rs @@ -39,7 +39,7 @@ static PRESETS: Lazy> = Lazy::new(|| { is_default: true, upgrade: None, show_in_picker: true, - supported_in_api: false, + supported_in_api: true, }, ModelPreset { id: "gpt-5.1-codex-max".to_string(), diff --git a/codex-rs/core/tests/suite/list_models.rs b/codex-rs/core/tests/suite/list_models.rs index dc9d25ddbf7..5f21e94537b 100644 --- a/codex-rs/core/tests/suite/list_models.rs +++ b/codex-rs/core/tests/suite/list_models.rs @@ -49,6 +49,7 @@ async fn list_models_returns_chatgpt_models() -> Result<()> { fn expected_models_for_api_key() -> Vec { vec![ + gpt_52_codex(), gpt_5_1_codex_max(), gpt_5_1_codex_mini(), gpt_5_2(), @@ -108,7 +109,7 @@ fn gpt_52_codex() -> ModelPreset { is_default: true, upgrade: None, show_in_picker: true, - supported_in_api: false, + supported_in_api: true, } } @@ -137,7 +138,7 @@ fn gpt_5_1_codex_max() -> ModelPreset { "Extra high reasoning depth for complex problems", ), ], - is_default: true, + is_default: false, upgrade: Some(gpt52_codex_upgrade()), show_in_picker: true, supported_in_api: true, diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap index 3bcf7746c1a..905925709e3 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__model_selection_popup.snap @@ -5,11 +5,12 @@ expression: popup Select Model and Effort Access legacy models by running codex -m or in your config.toml -› 1. gpt-5.1-codex-max (default) Codex-optimized flagship for deep and fast - reasoning. - 2. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but - less capable. - 3. gpt-5.2 Latest frontier model with improvements - across knowledge, reasoning and coding +› 1. gpt-5.2-codex (default) Latest frontier agentic coding model. + 2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast + reasoning. + 3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less + capable. + 4. gpt-5.2 Latest frontier model with improvements across + knowledge, reasoning and coding Press enter to select reasoning effort, or esc to dismiss. diff --git a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap index dbe1f2e7cc4..190f9c29332 100644 --- a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap +++ b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__model_selection_popup.snap @@ -5,11 +5,12 @@ expression: popup Select Model and Effort Access legacy models by running codex -m or in your config.toml -› 1. gpt-5.1-codex-max (default) Codex-optimized flagship for deep and fast - reasoning. - 2. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but - less capable. - 3. gpt-5.2 Latest frontier model with improvements - across knowledge, reasoning and coding +› 1. gpt-5.2-codex (default) Latest frontier agentic coding model. + 2. gpt-5.1-codex-max Codex-optimized flagship for deep and fast + reasoning. + 3. gpt-5.1-codex-mini Optimized for codex. Cheaper, faster, but less + capable. + 4. gpt-5.2 Latest frontier model with improvements across + knowledge, reasoning and coding Press enter to select reasoning effort, or esc to dismiss.