diff --git a/codex-rs/app-server-protocol/schema/json/ClientRequest.json b/codex-rs/app-server-protocol/schema/json/ClientRequest.json index 9b676f8e520..4a40cc9ce9b 100644 --- a/codex-rs/app-server-protocol/schema/json/ClientRequest.json +++ b/codex-rs/app-server-protocol/schema/json/ClientRequest.json @@ -1171,6 +1171,7 @@ }, "Personality": { "enum": [ + "none", "friendly", "pragmatic" ], diff --git a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json index f0b16e23586..87220712eb2 100644 --- a/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json +++ b/codex-rs/app-server-protocol/schema/json/codex_app_server_protocol.schemas.json @@ -12120,6 +12120,7 @@ }, "Personality": { "enum": [ + "none", "friendly", "pragmatic" ], diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeParams.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeParams.json index cc05de490a3..8167ea779e8 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadResumeParams.json @@ -251,6 +251,7 @@ }, "Personality": { "enum": [ + "none", "friendly", "pragmatic" ], diff --git a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartParams.json b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartParams.json index 03bed7f93c6..f2b0052e6db 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/ThreadStartParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/ThreadStartParams.json @@ -29,6 +29,7 @@ }, "Personality": { "enum": [ + "none", "friendly", "pragmatic" ], diff --git a/codex-rs/app-server-protocol/schema/json/v2/TurnStartParams.json b/codex-rs/app-server-protocol/schema/json/v2/TurnStartParams.json index 6a739b31ac2..be351a48ead 100644 --- a/codex-rs/app-server-protocol/schema/json/v2/TurnStartParams.json +++ b/codex-rs/app-server-protocol/schema/json/v2/TurnStartParams.json @@ -66,6 +66,7 @@ }, "Personality": { "enum": [ + "none", "friendly", "pragmatic" ], diff --git a/codex-rs/app-server-protocol/schema/typescript/Personality.ts b/codex-rs/app-server-protocol/schema/typescript/Personality.ts index b9ccad4dc2c..45165f4e33d 100644 --- a/codex-rs/app-server-protocol/schema/typescript/Personality.ts +++ b/codex-rs/app-server-protocol/schema/typescript/Personality.ts @@ -2,4 +2,4 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -export type Personality = "friendly" | "pragmatic"; +export type Personality = "none" | "friendly" | "pragmatic"; diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index 055ca6f32a1..69a75247173 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -147,6 +147,8 @@ Start a fresh thread when you need a new Codex conversation. { "method": "thread/started", "params": { "thread": { … } } } ``` +Valid `personality` values are `"friendly"`, `"pragmatic"`, and `"none"`. When `"none"` is selected, the personality placeholder is replaced with an empty string. + To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`: ```json diff --git a/codex-rs/core/config.schema.json b/codex-rs/core/config.schema.json index 138b71e2dca..8e9550c092f 100644 --- a/codex-rs/core/config.schema.json +++ b/codex-rs/core/config.schema.json @@ -708,6 +708,7 @@ }, "Personality": { "enum": [ + "none", "friendly", "pragmatic" ], diff --git a/codex-rs/core/tests/common/responses.rs b/codex-rs/core/tests/common/responses.rs index 86a332874ef..7d3b0770859 100644 --- a/codex-rs/core/tests/common/responses.rs +++ b/codex-rs/core/tests/common/responses.rs @@ -375,6 +375,10 @@ pub fn sse(events: Vec) -> String { out } +pub fn sse_completed(id: &str) -> String { + sse(vec![ev_response_created(id), ev_completed(id)]) +} + /// Convenience: SSE event for a completed response with a specific id. pub fn ev_completed(id: &str) -> Value { serde_json::json!({ diff --git a/codex-rs/core/tests/suite/personality.rs b/codex-rs/core/tests/suite/personality.rs index 919ce852edf..ff2c0cfec02 100644 --- a/codex-rs/core/tests/suite/personality.rs +++ b/codex-rs/core/tests/suite/personality.rs @@ -19,12 +19,10 @@ use codex_protocol::openai_models::TruncationPolicyConfig; use codex_protocol::openai_models::default_input_modalities; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; -use core_test_support::responses::ev_completed; -use core_test_support::responses::ev_response_created; use core_test_support::responses::mount_models_once; use core_test_support::responses::mount_sse_once; use core_test_support::responses::mount_sse_sequence; -use core_test_support::responses::sse; +use core_test_support::responses::sse_completed; use core_test_support::responses::start_mock_server; use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; @@ -78,11 +76,7 @@ async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Res skip_if_no_network!(Ok(())); let server = start_mock_server().await; - let resp_mock = mount_sse_once( - &server, - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - ) - .await; + let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await; let mut builder = test_codex() .with_model("gpt-5.2-codex") .with_config(|config| { @@ -128,11 +122,7 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result< skip_if_no_network!(Ok(())); let server = start_mock_server().await; - let resp_mock = mount_sse_once( - &server, - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - ) - .await; + let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await; let mut builder = test_codex() .with_model("gpt-5.2-codex") .with_config(|config| { @@ -181,6 +171,111 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result< Ok(()) } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> { + skip_if_no_network!(Ok(())); + + let server = start_mock_server().await; + let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await; + let mut builder = test_codex() + .with_model("gpt-5.2-codex") + .with_config(|config| { + config.features.disable(Feature::RemoteModels); + config.features.enable(Feature::Personality); + config.personality = Some(Personality::None); + }); + let test = builder.build(&server).await?; + + test.codex + .submit(Op::UserTurn { + items: vec![UserInput::Text { + text: "hello".into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + cwd: test.cwd_path().to_path_buf(), + approval_policy: test.config.approval_policy.value(), + sandbox_policy: SandboxPolicy::ReadOnly, + model: test.session_configured.model.clone(), + effort: test.config.model_reasoning_effort, + summary: ReasoningSummary::Auto, + collaboration_mode: None, + personality: None, + }) + .await?; + + wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await; + + let request = resp_mock.single_request(); + let instructions_text = request.instructions_text(); + assert!( + !instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE), + "expected no friendly personality template, got: {instructions_text:?}" + ); + assert!( + !instructions_text.contains(LOCAL_PRAGMATIC_TEMPLATE), + "expected no pragmatic personality template, got: {instructions_text:?}" + ); + assert!( + !instructions_text.contains("{{ personality }}"), + "expected personality placeholder to be removed, got: {instructions_text:?}" + ); + + let developer_texts = request.message_input_texts("developer"); + assert!( + !developer_texts + .iter() + .any(|text| text.contains("")), + "did not expect a personality update message when personality is None" + ); + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn default_personality_is_friendly_without_config_toml() -> anyhow::Result<()> { + skip_if_no_network!(Ok(())); + + let server = start_mock_server().await; + let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await; + let mut builder = test_codex() + .with_model("gpt-5.2-codex") + .with_config(|config| { + config.features.disable(Feature::RemoteModels); + config.features.enable(Feature::Personality); + }); + let test = builder.build(&server).await?; + + test.codex + .submit(Op::UserTurn { + items: vec![UserInput::Text { + text: "hello".into(), + text_elements: Vec::new(), + }], + final_output_json_schema: None, + cwd: test.cwd_path().to_path_buf(), + approval_policy: test.config.approval_policy.value(), + sandbox_policy: SandboxPolicy::ReadOnly, + model: test.session_configured.model.clone(), + effort: test.config.model_reasoning_effort, + summary: ReasoningSummary::Auto, + collaboration_mode: None, + personality: None, + }) + .await?; + + wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await; + + let request = resp_mock.single_request(); + let instructions_text = request.instructions_text(); + assert!( + instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE), + "expected default friendly template, got: {instructions_text:?}" + ); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); @@ -188,10 +283,7 @@ async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> let server = start_mock_server().await; let resp_mock = mount_sse_sequence( &server, - vec![ - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]), - ], + vec![sse_completed("resp-1"), sse_completed("resp-2")], ) .await; let mut builder = test_codex() @@ -287,10 +379,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho let server = start_mock_server().await; let resp_mock = mount_sse_sequence( &server, - vec![ - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]), - ], + vec![sse_completed("resp-1"), sse_completed("resp-2")], ) .await; let mut builder = test_codex() @@ -397,10 +486,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()> let server = start_mock_server().await; let resp_mock = mount_sse_sequence( &server, - vec![ - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]), - ], + vec![sse_completed("resp-1"), sse_completed("resp-2")], ) .await; let mut builder = test_codex() @@ -537,11 +623,7 @@ async fn ignores_remote_personality_if_remote_models_disabled() -> anyhow::Resul ) .await; - let resp_mock = mount_sse_once( - &server, - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - ) - .await; + let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await; let mut builder = test_codex() .with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing()) @@ -657,11 +739,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow ) .await; - let resp_mock = mount_sse_once( - &server, - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - ) - .await; + let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await; let mut builder = test_codex() .with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing()) @@ -774,10 +852,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() - let resp_mock = mount_sse_sequence( &server, - vec![ - sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]), - sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]), - ], + vec![sse_completed("resp-1"), sse_completed("resp-2")], ) .await; diff --git a/codex-rs/docs/codex_mcp_interface.md b/codex-rs/docs/codex_mcp_interface.md index 8847b6b4ee9..8b1b5f70457 100644 --- a/codex-rs/docs/codex_mcp_interface.md +++ b/codex-rs/docs/codex_mcp_interface.md @@ -73,6 +73,8 @@ Send input to the active turn: - `sendUserMessage` → enqueue items to the conversation - `sendUserTurn` → structured turn with explicit `cwd`, `approvalPolicy`, `sandboxPolicy`, `model`, optional `effort`, `summary`, optional `personality`, and optional `outputSchema` (JSON Schema for the final assistant message) +Valid `personality` values are `friendly`, `pragmatic`, and `none`. When `none` is selected, the personality placeholder is replaced with an empty string. + For v2 threads, `turn/start` also accepts `outputSchema` to constrain the final assistant message for that turn. Interrupt a running turn: `interruptConversation`. diff --git a/codex-rs/docs/protocol_v1.md b/codex-rs/docs/protocol_v1.md index 8f68ecc9a60..4d4e5c147c7 100644 --- a/codex-rs/docs/protocol_v1.md +++ b/codex-rs/docs/protocol_v1.md @@ -72,6 +72,9 @@ For complete documentation of the `Op` and `EventMsg` variants, refer to [protoc - `Op::UserInputAnswer` – Provide answers for a `request_user_input` tool call - `Op::ListSkills` – Request skills for one or more cwd values (optionally `force_reload`) - `Op::UserTurn` and `Op::OverrideTurnContext` accept an optional `personality` override that updates the model’s communication style + +Valid `personality` values are `friendly`, `pragmatic`, and `none`. When `none` is selected, the personality placeholder is replaced with an empty string. + - `EventMsg` - `EventMsg::AgentMessage` – Messages from the `Model` - `EventMsg::AgentMessageContentDelta` – Streaming assistant text diff --git a/codex-rs/protocol/src/config_types.rs b/codex-rs/protocol/src/config_types.rs index aa28e1a6fac..f7d7b4f4317 100644 --- a/codex-rs/protocol/src/config_types.rs +++ b/codex-rs/protocol/src/config_types.rs @@ -96,6 +96,7 @@ pub enum WindowsSandboxLevel { #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum Personality { + None, Friendly, Pragmatic, } diff --git a/codex-rs/protocol/src/openai_models.rs b/codex-rs/protocol/src/openai_models.rs index 90cf34f3935..36298e9ca50 100644 --- a/codex-rs/protocol/src/openai_models.rs +++ b/codex-rs/protocol/src/openai_models.rs @@ -335,6 +335,7 @@ impl ModelInstructionsVariables { pub fn get_personality_message(&self, personality: Option) -> Option { if let Some(personality) = personality { match personality { + Personality::None => Some(String::new()), Personality::Friendly => self.personality_friendly.clone(), Personality::Pragmatic => self.personality_pragmatic.clone(), } @@ -546,6 +547,10 @@ mod tests { model.get_model_instructions(Some(Personality::Pragmatic)), "Hello\n" ); + assert_eq!( + model.get_model_instructions(Some(Personality::None)), + "Hello\n" + ); assert_eq!(model.get_model_instructions(None), "Hello\n"); let model_no_personality = test_model(Some(ModelMessages { @@ -564,6 +569,10 @@ mod tests { model_no_personality.get_model_instructions(Some(Personality::Pragmatic)), "Hello\n" ); + assert_eq!( + model_no_personality.get_model_instructions(Some(Personality::None)), + "Hello\n" + ); assert_eq!(model_no_personality.get_model_instructions(None), "Hello\n"); } @@ -603,6 +612,10 @@ mod tests { personality_variables.get_personality_message(Some(Personality::Pragmatic)), Some("pragmatic".to_string()) ); + assert_eq!( + personality_variables.get_personality_message(Some(Personality::None)), + Some(String::new()) + ); assert_eq!( personality_variables.get_personality_message(None), Some("default".to_string()) @@ -621,6 +634,10 @@ mod tests { personality_variables.get_personality_message(Some(Personality::Pragmatic)), None ); + assert_eq!( + personality_variables.get_personality_message(Some(Personality::None)), + Some(String::new()) + ); assert_eq!( personality_variables.get_personality_message(None), Some("default".to_string()) @@ -639,6 +656,10 @@ mod tests { personality_variables.get_personality_message(Some(Personality::Pragmatic)), Some("pragmatic".to_string()) ); + assert_eq!( + personality_variables.get_personality_message(Some(Personality::None)), + Some(String::new()) + ); assert_eq!(personality_variables.get_personality_message(None), None); } } diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 878d4d5da2d..99164efa61a 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -2326,6 +2326,7 @@ impl App { fn personality_label(personality: Personality) -> &'static str { match personality { + Personality::None => "None", Personality::Friendly => "Friendly", Personality::Pragmatic => "Pragmatic", } diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 191873ca5f8..f29403f68c4 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -5505,6 +5505,7 @@ impl ChatWidget { fn personality_label(personality: Personality) -> &'static str { match personality { + Personality::None => "None", Personality::Friendly => "Friendly", Personality::Pragmatic => "Pragmatic", } @@ -5512,6 +5513,7 @@ impl ChatWidget { fn personality_description(personality: Personality) -> &'static str { match personality { + Personality::None => "No personality instructions.", Personality::Friendly => "Warm, collaborative, and helpful.", Personality::Pragmatic => "Concise, task-focused, and direct.", }