From ea5a7fbc9532f4ae3f4f55bda0078ab9ce64729e Mon Sep 17 00:00:00 2001 From: Elsa Date: Tue, 25 Jul 2023 18:07:27 +0800 Subject: [PATCH 1/5] Use coversation template from fastchat for api proxy Fix eventsource format --- examples/server/api_like_OAI.py | 67 ++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 21 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index aa325a03ee444..3f166609c3036 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -1,14 +1,18 @@ import argparse from flask import Flask, jsonify, request, Response +from flask_cors import CORS import urllib.parse import requests import time import json +from fastchat import conversation app = Flask(__name__) +CORS(app) parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") +parser.add_argument("--chat-prompt-model", type=str, help="Set the model", default="") parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") @@ -29,25 +33,46 @@ def is_present(json, key): return True +use_conversation_template = args.chat_prompt_model != "" -#convert chat to prompt -def convert_chat(messages): - prompt = "" + args.chat_prompt.replace("\\n", "\n") - - system_n = args.system_name.replace("\\n", "\n") - user_n = args.user_name.replace("\\n", "\n") - ai_n = args.ai_name.replace("\\n", "\n") - stop = args.stop.replace("\\n", "\n") +if use_conversation_template: + conv = conversation.get_conv_template(args.chat_prompt_model) + stop_token = conv.stop_str +else: + stop_token = args.stop - for line in messages: - if (line["role"] == "system"): - prompt += f"{system_n}{line['content']}" - if (line["role"] == "user"): - prompt += f"{user_n}{line['content']}" - if (line["role"] == "assistant"): - prompt += f"{ai_n}{line['content']}{stop}" - prompt += ai_n.rstrip() +#convert chat to prompt +def convert_chat(messages): + if use_conversation_template: + conv = conversation.get_conv_template(args.chat_prompt_model) + for line in messages: + if (line["role"] == "system"): + try: + conv.set_system_msg(line["content"]) + except Exception: + pass + elif (line["role"] == "user"): + conv.append_message(conv.roles[0], line["content"]) + elif (line["role"] == "assistant"): + conv.append_message(conv.roles[1], line["content"]) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + else: + prompt = "" + args.chat_prompt.replace("\\n", "\n") + system_n = args.system_name.replace("\\n", "\n") + user_n = args.user_name.replace("\\n", "\n") + ai_n = args.ai_name.replace("\\n", "\n") + stop = stop_token.replace("\\n", "\n") + + for line in messages: + if (line["role"] == "system"): + prompt += f"{system_n}{line['content']}" + if (line["role"] == "user"): + prompt += f"{user_n}{line['content']}" + if (line["role"] == "assistant"): + prompt += f"{ai_n}{line['content']}{stop}" + prompt += ai_n.rstrip() return prompt @@ -69,8 +94,8 @@ def make_postData(body, chat=False, stream=False): if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] if(is_present(body, "seed")): postData["seed"] = body["seed"] if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] - if (args.stop != ""): - postData["stop"] = [args.stop] + if stop_token: # "" or None + postData["stop"] = [stop_token] else: postData["stop"] = [] if(is_present(body, "stop")): postData["stop"] += body["stop"] @@ -173,12 +198,12 @@ def generate(): data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) time_now = int(time.time()) resData = make_resData_stream({}, chat=True, time_now=time_now, start=True) - yield 'data: {}\n'.format(json.dumps(resData)) + yield 'data: {}\n\n'.format(json.dumps(resData)) for line in data.iter_lines(): if line: decoded_line = line.decode('utf-8') resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now) - yield 'data: {}\n'.format(json.dumps(resData)) + yield 'data: {}\n\n'.format(json.dumps(resData)) return Response(generate(), mimetype='text/event-stream') @@ -212,7 +237,7 @@ def generate(): if line: decoded_line = line.decode('utf-8') resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now) - yield 'data: {}\n'.format(json.dumps(resData)) + yield 'data: {}\n\n'.format(json.dumps(resData)) return Response(generate(), mimetype='text/event-stream') if __name__ == '__main__': From bee2a3d981c9e9fdfa5d5f47c2d38920e96d1607 Mon Sep 17 00:00:00 2001 From: Elsa Date: Tue, 25 Jul 2023 18:16:31 +0800 Subject: [PATCH 2/5] Add docs Make fschat and flask-cors optional --- examples/server/README.md | 10 ++++++++++ examples/server/api_like_OAI.py | 17 +++++++++++------ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index e5ca8269b9d56..051a7509008d1 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -195,6 +195,14 @@ bash chat.sh API example using Python Flask: [api_like_OAI.py](api_like_OAI.py) This example must be used with server.cpp +requirements: + +```shell +pip install flask flask-cors fschat # flask-cors and fschat are optional. flask-cors is used to allow cross-origin requests, fschat is used for integration of chat template +``` + +Run the server: + ```sh python api_like_OAI.py ``` @@ -204,6 +212,8 @@ After running the API server, you can use it in Python by setting the API base U openai.api_base = "http://:port" ``` +For better integration with the model, it is recommended to utilize the `--chat-prompt-model` parameter when starting up the system, rather than relying solely on parameters like `--user-name`. This specific parameter accepts model names that have been registered within the [FastChat/conversation.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py) file, an example would be `llama-2`. + Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API ### Extending or building alternative Web Front End diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 3f166609c3036..50f2750c1d266 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -1,18 +1,23 @@ import argparse from flask import Flask, jsonify, request, Response -from flask_cors import CORS import urllib.parse import requests import time import json -from fastchat import conversation - +try: + from fastchat import conversation +except ImportError: + conversation = None app = Flask(__name__) -CORS(app) +try: + from flask_cors import CORS + CORS(app) +except ImportError: + pass parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") -parser.add_argument("--chat-prompt-model", type=str, help="Set the model", default="") +parser.add_argument("--chat-prompt-model", type=str, help="Set the model name of conversation template", default="") parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") @@ -33,7 +38,7 @@ def is_present(json, key): return True -use_conversation_template = args.chat_prompt_model != "" +use_conversation_template = args.chat_prompt_model != "" and conversation is not None if use_conversation_template: conv = conversation.get_conv_template(args.chat_prompt_model) From 712c2e90b1f2919c9a1cbe6455ffce8cde795262 Mon Sep 17 00:00:00 2001 From: Elsa Date: Wed, 2 Aug 2023 09:33:23 +0800 Subject: [PATCH 3/5] Use conv.set_system_message from upstream --- examples/server/api_like_OAI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 50f2750c1d266..179a574164ad3 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -54,7 +54,7 @@ def convert_chat(messages): for line in messages: if (line["role"] == "system"): try: - conv.set_system_msg(line["content"]) + conv.set_system_message(line["content"]) except Exception: pass elif (line["role"] == "user"): From ea73dace986f05b6b35c799880c7eaea7ee578f4 Mon Sep 17 00:00:00 2001 From: Elsa Date: Mon, 7 Aug 2023 18:09:15 +0800 Subject: [PATCH 4/5] Fix when `stop` in request is null --- examples/server/api_like_OAI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 179a574164ad3..4049e1f4a92d2 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -103,7 +103,7 @@ def make_postData(body, chat=False, stream=False): postData["stop"] = [stop_token] else: postData["stop"] = [] - if(is_present(body, "stop")): postData["stop"] += body["stop"] + if(is_present(body, "stop")): postData["stop"] += body["stop"] or [] postData["n_keep"] = -1 postData["stream"] = stream From 0876952924289d5865eb41a297bb6d7336c3e962 Mon Sep 17 00:00:00 2001 From: Laura Date: Sun, 17 Dec 2023 21:11:46 +0100 Subject: [PATCH 5/5] Implement credentialed CORS according to MDN --- examples/server/server.cpp | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 04038530f94da..5b9499c004cd4 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2710,9 +2710,15 @@ int main(int argc, char **argv) return false; }; - svr.set_default_headers({{"Server", "llama.cpp"}, - {"Access-Control-Allow-Origin", "*"}, - {"Access-Control-Allow-Headers", "content-type"}}); + svr.set_default_headers({{"Server", "llama.cpp"}}); + + // CORS preflight + svr.Options(R"(.*)", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); + res.set_header("Access-Control-Allow-Credentials", "true"); + res.set_header("Access-Control-Allow-Methods", "POST"); + res.set_header("Access-Control-Allow-Headers", "*"); + }); // this is only called if no index.html is found in the public --path svr.Get("/", [](const httplib::Request &, httplib::Response &res) @@ -2744,7 +2750,7 @@ int main(int argc, char **argv) svr.Get("/props", [&llama](const httplib::Request & /*req*/, httplib::Response &res) { - res.set_header("Access-Control-Allow-Origin", "*"); + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); json data = { { "user_name", llama.name_user.c_str() }, { "assistant_name", llama.name_assistant.c_str() } @@ -2754,6 +2760,7 @@ int main(int argc, char **argv) svr.Post("/completion", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); if (!validate_api_key(req, res)) { return; } @@ -2821,10 +2828,9 @@ int main(int argc, char **argv) } }); - - svr.Get("/v1/models", [¶ms](const httplib::Request&, httplib::Response& res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); std::time_t t = std::time(0); json models = { @@ -2842,9 +2848,11 @@ int main(int argc, char **argv) res.set_content(models.dump(), "application/json; charset=utf-8"); }); + // TODO: add mount point without "/v1" prefix -- how? svr.Post("/v1/chat/completions", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); if (!validate_api_key(req, res)) { return; } @@ -2918,6 +2926,7 @@ int main(int argc, char **argv) svr.Post("/infill", [&llama, &validate_api_key](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); if (!validate_api_key(req, res)) { return; } @@ -2990,6 +2999,7 @@ int main(int argc, char **argv) svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); const json body = json::parse(req.body); std::vector tokens; if (body.count("content") != 0) @@ -3002,6 +3012,7 @@ int main(int argc, char **argv) svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); const json body = json::parse(req.body); std::string content; if (body.count("tokens") != 0) @@ -3016,6 +3027,7 @@ int main(int argc, char **argv) svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res) { + res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin")); const json body = json::parse(req.body); json prompt; if (body.count("content") != 0)