Skip to content

server: maintain chat completion id for streaming responses #5880

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions examples/server/oai.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ inline static json oaicompat_completion_params_parse(
return llama_params;
}

inline static json format_final_response_oaicompat(const json &request, const task_result &response, bool streaming = false)
inline static json format_final_response_oaicompat(const json &request, const task_result &response, std::string id, bool streaming = false)
{
json result = response.result_json;

Expand Down Expand Up @@ -105,7 +105,7 @@ inline static json format_final_response_oaicompat(const json &request, const ta
json{{"completion_tokens", num_tokens_predicted},
{"prompt_tokens", num_prompt_tokens},
{"total_tokens", num_tokens_predicted + num_prompt_tokens}}},
{"id", gen_chatcmplid()}};
{"id", id}};

if (server_verbose) {
res["__verbose"] = result;
Expand All @@ -119,7 +119,7 @@ inline static json format_final_response_oaicompat(const json &request, const ta
}

// return value is vector as there is one case where we might need to generate two responses
inline static std::vector<json> format_partial_response_oaicompat(const task_result &response) {
inline static std::vector<json> format_partial_response_oaicompat(const task_result &response, std::string id) {
json result = response.result_json;

if (!result.contains("model") || !result.contains("oaicompat_token_ctr")) {
Expand Down Expand Up @@ -165,7 +165,7 @@ inline static std::vector<json> format_partial_response_oaicompat(const task_res
{"role", "assistant"}
}}}})},
{"created", t},
{"id", gen_chatcmplid()},
{"id", id},
{"model", modelname},
{"object", "chat.completion.chunk"}};

Expand All @@ -176,7 +176,7 @@ inline static std::vector<json> format_partial_response_oaicompat(const task_res
{"content", content}}}
}})},
{"created", t},
{"id", gen_chatcmplid()},
{"id", id},
{"model", modelname},
{"object", "chat.completion.chunk"}};

Expand All @@ -202,7 +202,7 @@ inline static std::vector<json> format_partial_response_oaicompat(const task_res

json ret = json{{"choices", choices},
{"created", t},
{"id", gen_chatcmplid()},
{"id", id},
{"model", modelname},
{"object", "chat.completion.chunk"}};

Expand Down
7 changes: 4 additions & 3 deletions examples/server/server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3221,13 +3221,14 @@ int main(int argc, char **argv)
const int task_id = llama.queue_tasks.get_new_id();
llama.queue_results.add_waiting_task_id(task_id);
llama.request_completion(task_id, data, false, false, -1);
const std::string completion_id = gen_chatcmplid();

if (!json_value(data, "stream", false)) {
std::string completion_text;
task_result result = llama.queue_results.recv(task_id);

if (!result.error && result.stop) {
json oaicompat_result = format_final_response_oaicompat(data, result);
json oaicompat_result = format_final_response_oaicompat(data, result, completion_id);

res.set_content(oaicompat_result.dump(-1, ' ', false,
json::error_handler_t::replace),
Expand All @@ -3238,11 +3239,11 @@ int main(int argc, char **argv)
}
llama.queue_results.remove_waiting_task_id(task_id);
} else {
const auto chunked_content_provider = [task_id, &llama](size_t, httplib::DataSink &sink) {
const auto chunked_content_provider = [task_id, &llama, completion_id](size_t, httplib::DataSink &sink) {
while (true) {
task_result llama_result = llama.queue_results.recv(task_id);
if (!llama_result.error) {
std::vector<json> result_array = format_partial_response_oaicompat( llama_result);
std::vector<json> result_array = format_partial_response_oaicompat(llama_result, completion_id);

for (auto it = result_array.begin(); it != result_array.end(); ++it)
{
Expand Down
Loading