Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 54 additions & 4 deletions codex-rs/codex-api/src/sse/chat.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use codex_client::StreamResponse;
use codex_protocol::models::ContentItem;
use codex_protocol::models::ReasoningItemContent;
use codex_protocol::models::ResponseItem;
use codex_protocol::protocol::TokenUsage;
use eventsource_stream::Eventsource;
use futures::Stream;
use futures::StreamExt;
Expand Down Expand Up @@ -50,6 +51,8 @@ pub async fn process_chat_sse<S>(
let mut assistant_item: Option<ResponseItem> = None;
let mut reasoning_item: Option<ResponseItem> = None;
let mut completed_sent = false;
let mut token_usage: Option<TokenUsage> = None;
let mut response_id = String::new();

loop {
let start = Instant::now();
Expand Down Expand Up @@ -78,8 +81,8 @@ pub async fn process_chat_sse<S>(
if !completed_sent {
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
token_usage: None,
response_id: response_id.clone(),
token_usage: token_usage.clone(),
}))
.await;
}
Expand Down Expand Up @@ -110,6 +113,16 @@ pub async fn process_chat_sse<S>(
}
};

if response_id.is_empty()
&& let Some(id) = value.get("id").and_then(serde_json::Value::as_str)
{
response_id = id.to_string();
}

if let Some(usage) = value.get("usage") {
token_usage = parse_openai_usage(usage);
}

let Some(choices) = value.get("choices").and_then(|c| c.as_array()) else {
continue;
};
Expand Down Expand Up @@ -201,8 +214,8 @@ pub async fn process_chat_sse<S>(
if !completed_sent {
let _ = tx_event
.send(Ok(ResponseEvent::Completed {
response_id: String::new(),
token_usage: None,
response_id: response_id.clone(),
token_usage: token_usage.clone(),
}))
.await;
completed_sent = true;
Expand Down Expand Up @@ -297,6 +310,43 @@ async fn append_reasoning_text(
}
}

fn parse_openai_usage(usage: &serde_json::Value) -> Option<TokenUsage> {
let prompt_tokens = usage
.get("prompt_tokens")
.and_then(serde_json::Value::as_i64)
.unwrap_or(0);

let completion_tokens = usage
.get("completion_tokens")
.and_then(serde_json::Value::as_i64)
.unwrap_or(0);

let total_tokens = usage
.get("total_tokens")
.and_then(serde_json::Value::as_i64)
.unwrap_or(0);

let cached_input_tokens = usage
.get("prompt_tokens_details")
.and_then(|d| d.get("cached_tokens"))
.and_then(serde_json::Value::as_i64)
.unwrap_or(0);

let reasoning_output_tokens = usage
.get("completion_tokens_details")
.and_then(|d| d.get("reasoning_tokens"))
.and_then(serde_json::Value::as_i64)
.unwrap_or(0);

Some(TokenUsage {
input_tokens: prompt_tokens,
cached_input_tokens,
output_tokens: completion_tokens,
reasoning_output_tokens,
total_tokens,
})
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
Loading