Skip to content

Commit

Permalink
Undo the addition chat with system parameter.
Browse files Browse the repository at this point in the history
It already has a way.
  • Loading branch information
JG-Adams authored Aug 12, 2024
1 parent fd7cb64 commit c80634e
Showing 1 changed file with 0 additions and 74 deletions.
74 changes: 0 additions & 74 deletions singleheader/ollama.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35059,20 +35059,6 @@ namespace ollama
type = message_type::chat;
}

// Create a request for a chat completion with system.
request(const std::string& model, const ollama::messages& messages, const std::string& system, const json& options=nullptr, bool stream=false, const std::string& format="json", const std::string& keep_alive_duration="5m"): request()
{
(*this)["model"] = model;
(*this)["messages"] = messages.to_json();
(*this)["system"] = system;
(*this)["stream"] = stream;

if (options!=nullptr) (*this)["options"] = options["options"];
//(*this)["format"] = format;
(*this)["keep_alive"] = keep_alive_duration;
type = message_type::chat;
}

// Request for a chat completion with a single message
request(const std::string& model, const ollama::message& message, const json& options=nullptr, bool stream=false, const std::string& format="json", const std::string& keep_alive_duration="5m") :request(model, messages(message), options, stream, format, keep_alive_duration ){}

Expand Down Expand Up @@ -35349,56 +35335,6 @@ class Ollama
return false;
}

// Generate a non-streaming reply as a string.
ollama::response chat(const std::string& model, const ollama::messages& messages, const std::string& system, json options=nullptr, const std::string& format="json", const std::string& keep_alive_duration="5m")
{
ollama::response response;
ollama::request request(model, messages, system, options, false, format, keep_alive_duration);

std::string request_string = request.dump();
if (ollama::log_requests) std::cout << request_string << std::endl;

if (auto res = this->cli->Post("/api/chat",request_string, "application/json"))
{
if (ollama::log_replies) std::cout << res->body << std::endl;

response = ollama::response(res->body, ollama::message_type::chat);
if ( response.has_error() ) { if (ollama::use_exceptions) throw ollama::exception("Ollama response returned error: "+response.get_error() ); }

}
else
{
if (ollama::use_exceptions) throw ollama::exception("No response returned from server "+this->server_url+". Error was: "+httplib::to_string( res.error() ));
}

return response;
}

bool chat(const std::string& model, const ollama::messages& messages, const std::string& system, std::function<void(const ollama::response&)> on_receive_token, const json& options=nullptr, const std::string& format="json", const std::string& keep_alive_duration="5m")
{
ollama::response response;
ollama::request request(model, messages, system, options, true, format, keep_alive_duration);

std::string request_string = request.dump();
if (ollama::log_requests) std::cout << request_string << std::endl;

auto stream_callback = [on_receive_token](const char *data, size_t data_length)->bool{

std::string message(data, data_length);
if (ollama::log_replies) std::cout << message << std::endl;
ollama::response response(message, ollama::message_type::chat);
if ( response.has_error() ) { if (ollama::use_exceptions) throw ollama::exception("Ollama response returned error: "+response.get_error() ); }
on_receive_token(response);

return true;
};

if (auto res = this->cli->Post("/api/chat", request_string, "application/json", stream_callback)) { return true; }
else { if (ollama::use_exceptions) throw ollama::exception( "No response from server returned at URL"+this->server_url+" Error: "+httplib::to_string( res.error() ) ); }

return false;
}

bool create_model(const std::string& modelName, const std::string& modelFile, bool loadFromFile=true)
{

Expand Down Expand Up @@ -35750,16 +35686,6 @@ namespace ollama
return ollama.generate(model, prompt, system, on_receive_response, options, images);
}

inline ollama::response chat(const std::string& model, const ollama::messages& messages, const std::string& system, const json& options=nullptr, const std::string& format="json", const std::string& keep_alive_duration="5m")
{
return ollama.chat(model, messages, system, options, format, keep_alive_duration);
}

inline bool chat(const std::string& model, const ollama::messages& messages, const std::string& system, std::function<void(const ollama::response&)> on_receive_response, const json& options=nullptr, const std::string& format="json", const std::string& keep_alive_duration="5m")
{
return ollama.chat(model, messages, system, on_receive_response, options, format, keep_alive_duration);
}

inline bool create(const std::string& modelName, const std::string& modelFile, bool loadFromFile=true)
{
return ollama.create_model(modelName, modelFile, loadFromFile);
Expand Down

0 comments on commit c80634e

Please sign in to comment.