From 94b940d49c6c0a2703deb4e934ff5d469d370563 Mon Sep 17 00:00:00 2001 From: Mikhail Khludnev Date: Sat, 2 Mar 2024 23:24:32 +0300 Subject: [PATCH] #2280 demonstrate using selfhosted ollama as Eval Model. --- .../experimental/evals/models/test_ollama.py | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 tests/experimental/evals/models/test_ollama.py diff --git a/tests/experimental/evals/models/test_ollama.py b/tests/experimental/evals/models/test_ollama.py new file mode 100644 index 00000000000..a475516d6b5 --- /dev/null +++ b/tests/experimental/evals/models/test_ollama.py @@ -0,0 +1,43 @@ +import os +import unittest +from unittest import mock + +from models import LiteLLMModel + +@mock.patch.dict(os.environ, {"OLLAMA_API_BASE": "just to make validate_env happy"}, clear=True) +@mock.patch("litellm.llms.ollama.get_ollama_response") +def test_selfhosted_ollama_via_model_kwargs(get_ollama_response): + ollama_response = unittest.mock.MagicMock() + ollama_response["choices"][0]["message"]["content"] = "barely understand Python mocks" + ollama_response.choices[0].message.content = "42 per tail" + + get_ollama_response.return_value = ollama_response + + lllmm = LiteLLMModel(model="ollama/monstral", + model_kwargs=dict( + base_url="http://hosted.olla.ma:11434")) + result = lllmm("How much is the fish?") + + assert result == "42 per tail" + call_args = get_ollama_response.call_args[0] + assert call_args[0] == "http://hosted.olla.ma:11434" + assert call_args[1] == "monstral" + assert "How much is the fish?" in call_args[2] + +@mock.patch.dict(os.environ, {"OLLAMA_API_BASE": "http://hosted.olla.ma:11434"}, clear=True) +@mock.patch("litellm.llms.ollama.get_ollama_response") +def test_selfhosted_ollama_via_env(get_ollama_response): + ollama_response = unittest.mock.MagicMock() + ollama_response["choices"][0]["message"]["content"] = "barely understand Python mocks" + ollama_response.choices[0].message.content = "42 per tail" + + get_ollama_response.return_value = ollama_response + + lllmm = LiteLLMModel(model="ollama/monstral") + result = lllmm("How much is the fish?") + + assert result == "42 per tail" + call_args = get_ollama_response.call_args[0] + assert call_args[0] == "http://hosted.olla.ma:11434" + assert call_args[1] == "monstral" + assert "How much is the fish?" in call_args[2]