Skip to content

Commit 8f6926a

Browse files
committed
feat: parse and return token usage in Chat Completions stream
- Capture `token_usage` and `response_id` from SSE chunks in `process_chat_sse`. - Add `parse_openai_usage` helper to map OpenAI usage fields to the internal struct.
1 parent 9630097 commit 8f6926a

File tree

1 file changed

+54
-4
lines changed

1 file changed

+54
-4
lines changed

codex-rs/codex-api/src/sse/chat.rs

Lines changed: 54 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ use codex_client::StreamResponse;
66
use codex_protocol::models::ContentItem;
77
use codex_protocol::models::ReasoningItemContent;
88
use codex_protocol::models::ResponseItem;
9+
use codex_protocol::protocol::TokenUsage;
910
use eventsource_stream::Eventsource;
1011
use futures::Stream;
1112
use futures::StreamExt;
@@ -50,6 +51,8 @@ pub async fn process_chat_sse<S>(
5051
let mut assistant_item: Option<ResponseItem> = None;
5152
let mut reasoning_item: Option<ResponseItem> = None;
5253
let mut completed_sent = false;
54+
let mut token_usage: Option<TokenUsage> = None;
55+
let mut response_id = String::new();
5356

5457
loop {
5558
let start = Instant::now();
@@ -78,8 +81,8 @@ pub async fn process_chat_sse<S>(
7881
if !completed_sent {
7982
let _ = tx_event
8083
.send(Ok(ResponseEvent::Completed {
81-
response_id: String::new(),
82-
token_usage: None,
84+
response_id: response_id.clone(),
85+
token_usage: token_usage.clone(),
8386
}))
8487
.await;
8588
}
@@ -110,6 +113,16 @@ pub async fn process_chat_sse<S>(
110113
}
111114
};
112115

116+
if response_id.is_empty() {
117+
if let Some(id) = value.get("id").and_then(|s| s.as_str()) {
118+
response_id = id.to_string();
119+
}
120+
}
121+
122+
if let Some(usage) = value.get("usage") {
123+
token_usage = parse_openai_usage(usage);
124+
}
125+
113126
let Some(choices) = value.get("choices").and_then(|c| c.as_array()) else {
114127
continue;
115128
};
@@ -201,8 +214,8 @@ pub async fn process_chat_sse<S>(
201214
if !completed_sent {
202215
let _ = tx_event
203216
.send(Ok(ResponseEvent::Completed {
204-
response_id: String::new(),
205-
token_usage: None,
217+
response_id: response_id.clone(),
218+
token_usage: token_usage.clone(),
206219
}))
207220
.await;
208221
completed_sent = true;
@@ -297,6 +310,43 @@ async fn append_reasoning_text(
297310
}
298311
}
299312

313+
fn parse_openai_usage(usage: &serde_json::Value) -> Option<TokenUsage> {
314+
let prompt_tokens = usage
315+
.get("prompt_tokens")
316+
.and_then(|v| v.as_i64())
317+
.unwrap_or(0);
318+
319+
let completion_tokens = usage
320+
.get("completion_tokens")
321+
.and_then(|v| v.as_i64())
322+
.unwrap_or(0);
323+
324+
let total_tokens = usage
325+
.get("total_tokens")
326+
.and_then(|v| v.as_i64())
327+
.unwrap_or(0);
328+
329+
let cached_input_tokens = usage
330+
.get("prompt_tokens_details")
331+
.and_then(|d| d.get("cached_tokens"))
332+
.and_then(|v| v.as_i64())
333+
.unwrap_or(0);
334+
335+
let reasoning_output_tokens = usage
336+
.get("completion_tokens_details")
337+
.and_then(|d| d.get("reasoning_tokens"))
338+
.and_then(|v| v.as_i64())
339+
.unwrap_or(0);
340+
341+
Some(TokenUsage {
342+
input_tokens: prompt_tokens,
343+
cached_input_tokens,
344+
output_tokens: completion_tokens,
345+
reasoning_output_tokens,
346+
total_tokens,
347+
})
348+
}
349+
300350
#[cfg(test)]
301351
mod tests {
302352
use super::*;

0 commit comments

Comments
 (0)