diff --git a/pyproject.toml b/pyproject.toml index 7528a2ecc9..65efbcd680 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ dependencies = [ # Core Dependencies "pydantic>=2.4.2", "openai>=1.13.3", - "litellm==1.60.2", + "litellm==1.66.3", "instructor>=1.3.3", # Text Processing "pdfplumber>=0.11.4", diff --git a/tests/litellm_update_test.py b/tests/litellm_update_test.py new file mode 100644 index 0000000000..c242cad52d --- /dev/null +++ b/tests/litellm_update_test.py @@ -0,0 +1,48 @@ +from unittest.mock import MagicMock, patch + +import pytest + +from crewai.llm import LLM + + +def test_llm_call_with_litellm_1_66_3(): + """Test that the LLM class works with litellm v1.66.3+""" + llm = LLM( + model="gpt-3.5-turbo", + temperature=0.7, + max_tokens=50, + stop=["STOP"], + presence_penalty=0.1, + frequency_penalty=0.1, + ) + messages = [{"role": "user", "content": "Say 'Hello, World!' and then say STOP"}] + + with patch("litellm.completion") as mocked_completion: + mock_message = MagicMock() + mock_message.content = "Hello, World! I won't say the stop word." + mock_choice = MagicMock() + mock_choice.message = mock_message + mock_response = MagicMock() + mock_response.choices = [mock_choice] + mock_response.usage = { + "prompt_tokens": 10, + "completion_tokens": 10, + "total_tokens": 20, + } + + mocked_completion.return_value = mock_response + + response = llm.call(messages) + + mocked_completion.assert_called_once() + + assert "Hello, World!" in response + assert "STOP" not in response + + _, kwargs = mocked_completion.call_args + assert kwargs["model"] == "gpt-3.5-turbo" + assert kwargs["temperature"] == 0.7 + assert kwargs["max_tokens"] == 50 + assert kwargs["stop"] == ["STOP"] + assert kwargs["presence_penalty"] == 0.1 + assert kwargs["frequency_penalty"] == 0.1 diff --git a/uv.lock b/uv.lock index d9c9d5c66f..83164c68d9 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,4 @@ version = 1 -revision = 1 requires-python = ">=3.10, <3.13" resolution-markers = [ "python_full_version < '3.11' and platform_python_implementation == 'PyPy' and sys_platform == 'darwin'", @@ -715,7 +714,7 @@ requires-dist = [ { name = "json-repair", specifier = ">=0.25.2" }, { name = "json5", specifier = ">=0.10.0" }, { name = "jsonref", specifier = ">=1.1.0" }, - { name = "litellm", specifier = "==1.60.2" }, + { name = "litellm", specifier = "==1.66.3" }, { name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=0.1.29" }, { name = "openai", specifier = ">=1.13.3" }, { name = "openpyxl", specifier = ">=3.1.5" }, @@ -735,7 +734,6 @@ requires-dist = [ { name = "tomli-w", specifier = ">=1.1.0" }, { name = "uv", specifier = ">=0.4.25" }, ] -provides-extras = ["tools", "embeddings", "agentops", "fastembed", "pdfplumber", "pandas", "openpyxl", "mem0", "docling", "aisuite"] [package.metadata.requires-dev] dev = [ @@ -2266,7 +2264,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.60.2" +version = "1.66.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -2281,9 +2279,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/94/8f/704cdb0fdbdd49dc5062a39ae5f1a8f308ae0ffd746df6e0137fc1776b8a/litellm-1.60.2.tar.gz", hash = "sha256:a8170584fcfd6f5175201d869e61ccd8a40ffe3264fc5e53c5b805ddf8a6e05a", size = 6447447 } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/e5f4824ce69d83c2208397a6522df50e0132ca626779101580121b9d342b/litellm-1.66.3.tar.gz", hash = "sha256:909564f5dc33d7dac236de6cc8066512834467bcebe3494a664d72ae6506a5ca", size = 7223923 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/ba/0eaec9aee9f99fdf46ef1c0bddcfe7f5720b182f84f6ed27f13145d5ded2/litellm-1.60.2-py3-none-any.whl", hash = "sha256:1cb08cda04bf8c5ef3e690171a779979e4b16a5e3a24cd8dc1f198e7f198d5c4", size = 6746809 }, + { url = "https://files.pythonhosted.org/packages/20/a1/5e44417a06f3fecdfb164d0774992301293ad73a67763e49c6b97ed61db2/litellm-1.66.3-py3-none-any.whl", hash = "sha256:f1c662afec14225cee3bae7c93961857edf13fcece42fe46d921d9df50f70dd2", size = 7582219 }, ] [[package]] @@ -2988,6 +2986,7 @@ name = "nvidia-nccl-cu12" version = "2.20.5" source = { registry = "https://pypi.org/simple" } wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/bb/d09dda47c881f9ff504afd6f9ca4f502ded6d8fc2f572cacc5e39da91c28/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1fc150d5c3250b170b29410ba682384b14581db722b2531b0d8d33c595f33d01", size = 176238458 }, { url = "https://files.pythonhosted.org/packages/4b/2a/0a131f572aa09f741c30ccd45a8e56316e8be8dfc7bc19bf0ab7cfef7b19/nvidia_nccl_cu12-2.20.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:057f6bf9685f75215d0c53bf3ac4a10b3e6578351de307abad9e18a99182af56", size = 176249402 }, ] @@ -2997,6 +2996,7 @@ version = "12.6.85" source = { registry = "https://pypi.org/simple" } wheels = [ { url = "https://files.pythonhosted.org/packages/9d/d7/c5383e47c7e9bf1c99d5bd2a8c935af2b6d705ad831a7ec5c97db4d82f4f/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a", size = 19744971 }, + { url = "https://files.pythonhosted.org/packages/31/db/dc71113d441f208cdfe7ae10d4983884e13f464a6252450693365e166dcf/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41", size = 19270338 }, ] [[package]]