Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1171,6 +1171,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12120,6 +12120,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.

export type Personality = "friendly" | "pragmatic";
export type Personality = "none" | "friendly" | "pragmatic";
2 changes: 2 additions & 0 deletions codex-rs/app-server/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,8 @@ Start a fresh thread when you need a new Codex conversation.
{ "method": "thread/started", "params": { "thread": { … } } }
```

Valid `personality` values are `"friendly"`, `"pragmatic"`, and `"none"`. When `"none"` is selected, the personality placeholder is replaced with an empty string.

To continue a stored session, call `thread/resume` with the `thread.id` you previously recorded. The response shape matches `thread/start`, and no additional notifications are emitted. You can also pass the same configuration overrides supported by `thread/start`, such as `personality`:

```json
Expand Down
1 change: 1 addition & 0 deletions codex-rs/core/config.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -708,6 +708,7 @@
},
"Personality": {
"enum": [
"none",
"friendly",
"pragmatic"
],
Expand Down
4 changes: 4 additions & 0 deletions codex-rs/core/tests/common/responses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,10 @@ pub fn sse(events: Vec<Value>) -> String {
out
}

pub fn sse_completed(id: &str) -> String {
sse(vec![ev_response_created(id), ev_completed(id)])
}

/// Convenience: SSE event for a completed response with a specific id.
pub fn ev_completed(id: &str) -> Value {
serde_json::json!({
Expand Down
153 changes: 114 additions & 39 deletions codex-rs/core/tests/suite/personality.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,10 @@ use codex_protocol::openai_models::TruncationPolicyConfig;
use codex_protocol::openai_models::default_input_modalities;
use codex_protocol::user_input::UserInput;
use core_test_support::load_default_config_for_test;
use core_test_support::responses::ev_completed;
use core_test_support::responses::ev_response_created;
use core_test_support::responses::mount_models_once;
use core_test_support::responses::mount_sse_once;
use core_test_support::responses::mount_sse_sequence;
use core_test_support::responses::sse;
use core_test_support::responses::sse_completed;
use core_test_support::responses::start_mock_server;
use core_test_support::skip_if_no_network;
use core_test_support::test_codex::test_codex;
Expand Down Expand Up @@ -78,11 +76,7 @@ async fn user_turn_personality_none_does_not_add_update_message() -> anyhow::Res
skip_if_no_network!(Ok(()));

let server = start_mock_server().await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
Expand Down Expand Up @@ -128,11 +122,7 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
skip_if_no_network!(Ok(()));

let server = start_mock_server().await;
let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
Expand Down Expand Up @@ -181,17 +171,119 @@ async fn config_personality_some_sets_instructions_template() -> anyhow::Result<
Ok(())
}

#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn config_personality_none_sends_no_personality() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));

let server = start_mock_server().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
config.personality = Some(Personality::None);
});
let test = builder.build(&server).await?;

test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;

let request = resp_mock.single_request();
let instructions_text = request.instructions_text();
assert!(
!instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE),
"expected no friendly personality template, got: {instructions_text:?}"
);
assert!(
!instructions_text.contains(LOCAL_PRAGMATIC_TEMPLATE),
"expected no pragmatic personality template, got: {instructions_text:?}"
);
assert!(
!instructions_text.contains("{{ personality }}"),
"expected personality placeholder to be removed, got: {instructions_text:?}"
);

let developer_texts = request.message_input_texts("developer");
assert!(
!developer_texts
.iter()
.any(|text| text.contains("<personality_spec>")),
"did not expect a personality update message when personality is None"
);

Ok(())
}

#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn default_personality_is_friendly_without_config_toml() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));

let server = start_mock_server().await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;
let mut builder = test_codex()
.with_model("gpt-5.2-codex")
.with_config(|config| {
config.features.disable(Feature::RemoteModels);
config.features.enable(Feature::Personality);
});
let test = builder.build(&server).await?;

test.codex
.submit(Op::UserTurn {
items: vec![UserInput::Text {
text: "hello".into(),
text_elements: Vec::new(),
}],
final_output_json_schema: None,
cwd: test.cwd_path().to_path_buf(),
approval_policy: test.config.approval_policy.value(),
sandbox_policy: SandboxPolicy::ReadOnly,
model: test.session_configured.model.clone(),
effort: test.config.model_reasoning_effort,
summary: ReasoningSummary::Auto,
collaboration_mode: None,
personality: None,
})
.await?;

wait_for_event(&test.codex, |ev| matches!(ev, EventMsg::TurnComplete(_))).await;

let request = resp_mock.single_request();
let instructions_text = request.instructions_text();
assert!(
instructions_text.contains(LOCAL_FRIENDLY_TEMPLATE),
"expected default friendly template, got: {instructions_text:?}"
);

Ok(())
}

#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn user_turn_personality_some_adds_update_message() -> anyhow::Result<()> {
skip_if_no_network!(Ok(()));

let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
Expand Down Expand Up @@ -287,10 +379,7 @@ async fn user_turn_personality_same_value_does_not_add_update_message() -> anyho
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
Expand Down Expand Up @@ -397,10 +486,7 @@ async fn user_turn_personality_skips_if_feature_disabled() -> anyhow::Result<()>
let server = start_mock_server().await;
let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;
let mut builder = test_codex()
Expand Down Expand Up @@ -537,11 +623,7 @@ async fn ignores_remote_personality_if_remote_models_disabled() -> anyhow::Resul
)
.await;

let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;

let mut builder = test_codex()
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
Expand Down Expand Up @@ -657,11 +739,7 @@ async fn remote_model_friendly_personality_instructions_with_feature() -> anyhow
)
.await;

let resp_mock = mount_sse_once(
&server,
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
)
.await;
let resp_mock = mount_sse_once(&server, sse_completed("resp-1")).await;

let mut builder = test_codex()
.with_auth(codex_core::CodexAuth::create_dummy_chatgpt_auth_for_testing())
Expand Down Expand Up @@ -774,10 +852,7 @@ async fn user_turn_personality_remote_model_template_includes_update_message() -

let resp_mock = mount_sse_sequence(
&server,
vec![
sse(vec![ev_response_created("resp-1"), ev_completed("resp-1")]),
sse(vec![ev_response_created("resp-2"), ev_completed("resp-2")]),
],
vec![sse_completed("resp-1"), sse_completed("resp-2")],
)
.await;

Expand Down
2 changes: 2 additions & 0 deletions codex-rs/docs/codex_mcp_interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@ Send input to the active turn:
- `sendUserMessage` → enqueue items to the conversation
- `sendUserTurn` → structured turn with explicit `cwd`, `approvalPolicy`, `sandboxPolicy`, `model`, optional `effort`, `summary`, optional `personality`, and optional `outputSchema` (JSON Schema for the final assistant message)

Valid `personality` values are `friendly`, `pragmatic`, and `none`. When `none` is selected, the personality placeholder is replaced with an empty string.

For v2 threads, `turn/start` also accepts `outputSchema` to constrain the final assistant message for that turn.

Interrupt a running turn: `interruptConversation`.
Expand Down
3 changes: 3 additions & 0 deletions codex-rs/docs/protocol_v1.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ For complete documentation of the `Op` and `EventMsg` variants, refer to [protoc
- `Op::UserInputAnswer` – Provide answers for a `request_user_input` tool call
- `Op::ListSkills` – Request skills for one or more cwd values (optionally `force_reload`)
- `Op::UserTurn` and `Op::OverrideTurnContext` accept an optional `personality` override that updates the model’s communication style

Valid `personality` values are `friendly`, `pragmatic`, and `none`. When `none` is selected, the personality placeholder is replaced with an empty string.

- `EventMsg`
- `EventMsg::AgentMessage` – Messages from the `Model`
- `EventMsg::AgentMessageContentDelta` – Streaming assistant text
Expand Down
1 change: 1 addition & 0 deletions codex-rs/protocol/src/config_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ pub enum WindowsSandboxLevel {
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum Personality {
None,
Friendly,
Pragmatic,
}
Expand Down
21 changes: 21 additions & 0 deletions codex-rs/protocol/src/openai_models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ impl ModelInstructionsVariables {
pub fn get_personality_message(&self, personality: Option<Personality>) -> Option<String> {
if let Some(personality) = personality {
match personality {
Personality::None => Some(String::new()),
Personality::Friendly => self.personality_friendly.clone(),
Personality::Pragmatic => self.personality_pragmatic.clone(),
}
Expand Down Expand Up @@ -546,6 +547,10 @@ mod tests {
model.get_model_instructions(Some(Personality::Pragmatic)),
"Hello\n"
);
assert_eq!(
model.get_model_instructions(Some(Personality::None)),
"Hello\n"
);
assert_eq!(model.get_model_instructions(None), "Hello\n");

let model_no_personality = test_model(Some(ModelMessages {
Expand All @@ -564,6 +569,10 @@ mod tests {
model_no_personality.get_model_instructions(Some(Personality::Pragmatic)),
"Hello\n"
);
assert_eq!(
model_no_personality.get_model_instructions(Some(Personality::None)),
"Hello\n"
);
assert_eq!(model_no_personality.get_model_instructions(None), "Hello\n");
}

Expand Down Expand Up @@ -603,6 +612,10 @@ mod tests {
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
Some("pragmatic".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
Some(String::new())
);
assert_eq!(
personality_variables.get_personality_message(None),
Some("default".to_string())
Expand All @@ -621,6 +634,10 @@ mod tests {
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
None
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
Some(String::new())
);
assert_eq!(
personality_variables.get_personality_message(None),
Some("default".to_string())
Expand All @@ -639,6 +656,10 @@ mod tests {
personality_variables.get_personality_message(Some(Personality::Pragmatic)),
Some("pragmatic".to_string())
);
assert_eq!(
personality_variables.get_personality_message(Some(Personality::None)),
Some(String::new())
);
assert_eq!(personality_variables.get_personality_message(None), None);
}
}
1 change: 1 addition & 0 deletions codex-rs/tui/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2326,6 +2326,7 @@ impl App {

fn personality_label(personality: Personality) -> &'static str {
match personality {
Personality::None => "None",
Personality::Friendly => "Friendly",
Personality::Pragmatic => "Pragmatic",
}
Expand Down
Loading
Loading