diff --git a/examples/rate_limiting_demo.py b/examples/rate_limiting_demo.py new file mode 100644 index 0000000..cdac1dc --- /dev/null +++ b/examples/rate_limiting_demo.py @@ -0,0 +1,292 @@ +""" +Demonstration of rate limiting functionality. + +This example shows how rate limiting prevents API throttling errors +when making many rapid requests. +""" + +import os +import time + +from dotenv import load_dotenv +from langchain_core.messages import HumanMessage + +from langchain_gradient import ChatGradient + +load_dotenv() + + +def demo_without_rate_limiting(): + """ + Demo: Making requests WITHOUT rate limiting. + + This will likely fail with 429 errors if you exceed API limits. + """ + print("=" * 60) + print("DEMO 1: Without Rate Limiting (May Fail)") + print("=" * 60) + + llm = ChatGradient( + model="llama3.3-70b-instruct", + api_key=os.getenv("DIGITALOCEAN_INFERENCE_KEY"), + enable_rate_limiting=False, # Disabled + ) + + print("Making 10 rapid requests...") + start_time = time.time() + + success_count = 0 + error_count = 0 + + for i in range(10): + try: + response = llm.invoke([HumanMessage(content=f"Say 'Request {i}'")]) + success_count += 1 + print(f"✅ Request {i}: Success") + except Exception as e: + error_count += 1 + print(f"❌ Request {i}: Failed - {str(e)[:50]}") + + elapsed = time.time() - start_time + + print("\nResults:") + print(f" Time taken: {elapsed:.2f}s") + print(f" Successful: {success_count}") + print(f" Failed: {error_count}") + print() + + +def demo_with_rate_limiting(): + """ + Demo: Making requests WITH rate limiting. + + This will succeed for all requests, automatically waiting when needed. + """ + print("=" * 60) + print("DEMO 2: With Rate Limiting (All Succeed)") + print("=" * 60) + + llm = ChatGradient( + model="llama3.3-70b-instruct", + api_key=os.getenv("DIGITALOCEAN_INFERENCE_KEY"), + enable_rate_limiting=True, + max_requests_per_minute=5, # Low limit for demo purposes + ) + + print("Making 10 requests with rate limiting...") + print(f"Rate limit: {llm.max_requests_per_minute} requests/minute\n") + + start_time = time.time() + + for i in range(10): + # Check current usage before request + usage = llm._rate_limiter.get_current_usage() + print(f"Request {i}: Current usage = {usage['usage_percentage']:.1f}%") + + response = llm.invoke([HumanMessage(content=f"Say 'Request {i}'")]) + print(f"✅ Request {i}: Success - {response.content[:30]}...") + + elapsed = time.time() - start_time + + print("\nResults:") + print(f" Time taken: {elapsed:.2f}s") + print(" All requests succeeded!") + print(" Rate limiting prevented any failures") + print() + + +def demo_usage_monitoring(): + """ + Demo: Monitor rate limit usage in real-time. + """ + print("=" * 60) + print("DEMO 3: Real-Time Usage Monitoring") + print("=" * 60) + + llm = ChatGradient( + model="llama3.3-70b-instruct", + api_key=os.getenv("DIGITALOCEAN_INFERENCE_KEY"), + enable_rate_limiting=True, + max_requests_per_minute=10, + ) + + print("Making 5 requests and monitoring usage...\n") + + for i in range(5): + # Get usage BEFORE request + usage_before = llm._rate_limiter.get_current_usage() + + response = llm.invoke([HumanMessage(content=f"Request {i}")]) + + # Get usage AFTER request + usage_after = llm._rate_limiter.get_current_usage() + + print(f"Request {i}:") + print( + f" Before: {usage_before['current_requests']}/{usage_before['max_requests']} " + f"({usage_before['usage_percentage']:.1f}%)" + ) + print( + f" After: {usage_after['current_requests']}/{usage_after['max_requests']} " + f"({usage_after['usage_percentage']:.1f}%)" + ) + print() + + print("Final usage stats:") + final_usage = llm._rate_limiter.get_current_usage() + print(f" Current requests in window: {final_usage['current_requests']}") + print(f" Maximum allowed: {final_usage['max_requests']}") + print(f" Usage percentage: {final_usage['usage_percentage']:.1f}%") + print() + + +def demo_custom_limits(): + """ + Demo: Using custom rate limits for different scenarios. + """ + print("=" * 60) + print("DEMO 4: Custom Rate Limits") + print("=" * 60) + + scenarios = [ + ("Conservative (30/min)", 30), + ("Standard (60/min)", 60), + ("Aggressive (120/min)", 120), + ] + + for name, limit in scenarios: + print(f"\nScenario: {name}") + + llm = ChatGradient( + model="llama3.3-70b-instruct", + api_key=os.getenv("DIGITALOCEAN_INFERENCE_KEY"), + enable_rate_limiting=True, + max_requests_per_minute=limit, + ) + + print(f" Rate limit: {limit} requests/minute") + print(f" That's 1 request every {60 / limit:.2f} seconds") + + # Make 3 quick requests + start = time.time() + for i in range(3): + llm.invoke([HumanMessage(content="Quick test")]) + elapsed = time.time() - start + + print(f" Time for 3 requests: {elapsed:.2f}s") + + print() + + +def demo_rate_limiter_reset(): + """ + Demo: Resetting the rate limiter. + """ + print("=" * 60) + print("DEMO 5: Rate Limiter Reset") + print("=" * 60) + + llm = ChatGradient( + model="llama3.3-70b-instruct", + api_key=os.getenv("DIGITALOCEAN_INFERENCE_KEY"), + enable_rate_limiting=True, + max_requests_per_minute=5, + ) + + print("Making 5 requests to hit the limit...") + for i in range(5): + llm.invoke([HumanMessage(content=f"Request {i}")]) + + usage = llm._rate_limiter.get_current_usage() + print( + f"Usage after 5 requests: {usage['current_requests']}/{usage['max_requests']}" + ) + + print("\nResetting rate limiter...") + llm._rate_limiter.reset() + + usage_after = llm._rate_limiter.get_current_usage() + print( + f"Usage after reset: {usage_after['current_requests']}/{usage_after['max_requests']}" + ) + + print("\nRate limiter is now cleared and ready for new requests!") + print() + + +def demo_streaming_with_rate_limiting(): + """ + Demo: Rate limiting with streaming responses. + """ + print("=" * 60) + print("DEMO 6: Streaming with Rate Limiting") + print("=" * 60) + + llm = ChatGradient( + model="llama3.3-70b-instruct", + api_key=os.getenv("DIGITALOCEAN_INFERENCE_KEY"), + enable_rate_limiting=True, + max_requests_per_minute=5, + streaming=True, + ) + + print("Making 3 streaming requests...\n") + + for i in range(3): + print(f"Stream {i}: ", end="", flush=True) + + for chunk in llm.stream([HumanMessage(content=f"Count to 3 for request {i}")]): + print(chunk.content, end="", flush=True) + + print() # New line after stream + + usage = llm._rate_limiter.get_current_usage() + print(f" Usage: {usage['usage_percentage']:.1f}%\n") + + print("All streaming requests completed successfully!") + print() + + +if __name__ == "__main__": + # Check for API key + if not os.getenv("DIGITALOCEAN_INFERENCE_KEY"): + print("❌ Error: DIGITALOCEAN_INFERENCE_KEY not set!") + print("Please set your API key in .env file") + exit(1) + + print("\n") + print("╔" + "═" * 58 + "╗") + print("║" + " " * 10 + "RATE LIMITING DEMONSTRATIONS" + " " * 20 + "║") + print("╚" + "═" * 58 + "╝") + print("\n") + + # Run demos + try: + # Demo 1: Without rate limiting (may fail) + # Uncomment at your own risk - may cause 429 errors! + # demo_without_rate_limiting() + + # Demo 2: With rate limiting (always succeeds) + demo_with_rate_limiting() + + # Demo 3: Usage monitoring + demo_usage_monitoring() + + # Demo 4: Custom limits + demo_custom_limits() + + # Demo 5: Reset functionality + demo_rate_limiter_reset() + + # Demo 6: Streaming + demo_streaming_with_rate_limiting() + + print("=" * 60) + print("All demos completed successfully! ✅") + print("=" * 60) + + except KeyboardInterrupt: + print("\n\nDemo interrupted by user.") + except Exception as e: + print(f"\n\n❌ Error during demo: {str(e)}") + raise diff --git a/langchain_gradient/chat_models.py b/langchain_gradient/chat_models.py index 0844ff7..db40c4b 100644 --- a/langchain_gradient/chat_models.py +++ b/langchain_gradient/chat_models.py @@ -18,6 +18,7 @@ from typing_extensions import TypedDict from .constants import ALLOWED_MODEL_FIELDS +from .rate_limiter import RateLimiter # NEW IMPORT class StreamOptions(TypedDict, total=False): @@ -73,24 +74,6 @@ class ChatGradient(BaseChatModel): max_retries : int Max number of retries. Defaults to 2. - Example - ------- - ```python - from langchain_core.messages import HumanMessage - from langchain_gradient import ChatGradient - - chat = ChatGradient(model_name="llama3.3-70b-instruct") - response = chat.invoke([ - HumanMessage(content="What is the capital of France?") - ]) - print(response) - ``` - - Output: - ```python - AIMessage(content="The capital of France is Paris.") - ``` - Methods ------- _generate(messages, ...) @@ -98,6 +81,7 @@ class ChatGradient(BaseChatModel): _stream(messages, ...) Stream chat completions for the given messages. """ + api_key: Optional[str] = Field( default=os.environ.get("DIGITALOCEAN_INFERENCE_KEY"), exclude=True, @@ -142,6 +126,15 @@ class ChatGradient(BaseChatModel): max_retries: int = 2 """Max number of retries.""" + # NEW RATE LIMITING FIELDS + enable_rate_limiting: bool = Field(default=False) + """Enable automatic rate limiting to prevent 429 errors.""" + max_requests_per_minute: int = Field(default=60) + """Maximum number of requests per minute when rate limiting is enabled.""" + + # Private field to store rate limiter instance + _rate_limiter: Optional[RateLimiter] = None + @model_validator(mode="before") @classmethod def validate_temperature(cls, values: dict[str, Any]) -> Any: @@ -151,14 +144,30 @@ def validate_temperature(cls, values: dict[str, Any]) -> Any: values["temperature"] = 1 return values + # NEW VALIDATOR FOR RATE LIMITING + @model_validator(mode="after") + def initialize_rate_limiter(self) -> "ChatGradient": + """Initialize rate limiter if enabled.""" + if self.enable_rate_limiting: + if self.max_requests_per_minute <= 0: + raise ValueError( + f"max_requests_per_minute must be positive, " + f"got {self.max_requests_per_minute}" + ) + self._rate_limiter = RateLimiter( + max_requests=self.max_requests_per_minute, + time_window=60.0, # 1 minute + ) + return self + @property def user_agent_package(self) -> str: - return f"LangChain" + return "LangChain" @property def user_agent_version(self) -> str: return "0.1.22" - + @property def _llm_type(self) -> str: """Return type of chat model.""" @@ -166,24 +175,12 @@ def _llm_type(self) -> str: @property def _identifying_params(self) -> Dict[str, Any]: - """Return a dictionary of identifying parameters. - - This information is used by the LangChain callback system, which - is used for tracing purposes make it possible to monitor LLMs. - """ - return { - # The model name allows users to specify custom token counting - # rules in LLM monitoring applications (e.g., in LangSmith users - # can provide per token pricing for their model and monitor - # costs for the given LLM.) - "model_name": self.model_name, - } + """Return identifying parameters for tracing.""" + return {"model_name": self.model_name} def _update_parameters_with_model_fields(self, parameters: dict) -> None: - # Only add explicitly supported model fields for field in ALLOWED_MODEL_FIELDS: value = getattr(self, field, None) - # Use the alias if defined (e.g., model_name -> model) model_field = self.__class__.model_fields.get(field) key = ( model_field.alias @@ -193,6 +190,7 @@ def _update_parameters_with_model_fields(self, parameters: dict) -> None: if key not in parameters and value is not None: parameters[key] = value + def _generate( self, messages: List[BaseMessage], @@ -205,6 +203,10 @@ def _generate( "Gradient model access key not provided. Set DIGITALOCEAN_INFERENCE_KEY env var or pass api_key param." ) + # NEW: Apply rate limiting if enabled + if self._rate_limiter: + self._rate_limiter.wait_if_needed() + inference_client = Gradient( model_access_key=self.api_key, base_url="https://inference.do-ai.run/v1", @@ -213,56 +215,41 @@ def _generate( user_agent_version=self.user_agent_version, ) + # ... rest of the method stays exactly the same ... + def convert_message(msg: BaseMessage) -> Dict[str, Any]: - if hasattr(msg, "type"): - role = {"human": "user", "ai": "assistant", "system": "system"}.get( - msg.type, msg.type - ) - else: - role = getattr(msg, "role", "user") + role = {"human": "user", "ai": "assistant", "system": "system"}.get( + getattr(msg, "type", "user"), getattr(msg, "type", "user") + ) return {"role": role, "content": msg.content} - parameters: Dict[str, Any] = { + parameters = { "messages": [convert_message(m) for m in messages], "model": self.model_name, } - self._update_parameters_with_model_fields(parameters) - if "stop_sequences" in parameters: parameters["stop"] = parameters.pop("stop_sequences") - # Only pass expected keyword arguments to create() completion = inference_client.chat.completions.create(**parameters) choice = completion.choices[0] - content = ( - choice.message.content - if hasattr(choice.message, "content") - else choice.message - ) + content = getattr(choice.message, "content", choice.message) usage = getattr(completion, "usage", {}) - response_metadata = { - "finish_reason": getattr(choice, "finish_reason", None), - "token_usage": { - "completion_tokens": getattr(usage, "completion_tokens", None), - "prompt_tokens": getattr(usage, "prompt_tokens", None), - "total_tokens": getattr(usage, "total_tokens", None), + + message = AIMessage( + content=content, + additional_kwargs={"refusal": getattr(choice.message, "refusal", None)}, + response_metadata={ + "finish_reason": getattr(choice, "finish_reason", None), + "token_usage": { + "completion_tokens": getattr(usage, "completion_tokens", None), + "prompt_tokens": getattr(usage, "prompt_tokens", None), + "total_tokens": getattr(usage, "total_tokens", None), + }, + "model_name": getattr(completion, "model", None), + "id": getattr(completion, "id", None), }, - "model_name": getattr(completion, "model", None), - "id": getattr(completion, "id", None), - } - message_kwargs = { - "content": content, - "additional_kwargs": {"refusal": getattr(choice.message, "refusal", None)}, - "response_metadata": response_metadata, - } - if self.stream_options and self.stream_options.get("include_usage"): - message_kwargs["usage_metadata"] = { - "input_tokens": getattr(usage, "prompt_tokens", None), - "output_tokens": getattr(usage, "completion_tokens", None), - "total_tokens": getattr(usage, "total_tokens", None), - } - message = AIMessage(**message_kwargs) + ) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @@ -273,6 +260,9 @@ def _stream( run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: + if self.enable_rate_limiting and self._rate_limiter: + self._rate_limiter.wait_for_slot() + if not self.api_key: raise ValueError( "Gradient model access key not provided. Set DIGITALOCEAN_INFERENCE_KEY env var or pass api_key param." @@ -282,67 +272,43 @@ def _stream( model_access_key=self.api_key, base_url="https://inference.do-ai.run/v1", user_agent_package=self.user_agent_package, - user_agent_version=self.user_agent_version, + user_agent_version=self.user_agent_version, ) def convert_message(msg: BaseMessage) -> Dict[str, Any]: - if hasattr(msg, "type"): - role = {"human": "user", "ai": "assistant", "system": "system"}.get( - msg.type, msg.type - ) - else: - role = getattr(msg, "role", "user") + role = {"human": "user", "ai": "assistant", "system": "system"}.get( + getattr(msg, "type", "user"), getattr(msg, "type", "user") + ) return {"role": role, "content": msg.content} - parameters: Dict[str, Any] = { + parameters = { "messages": [convert_message(m) for m in messages], - "stream": True, # Enable streaming + "stream": True, "model": self.model_name, } - self._update_parameters_with_model_fields(parameters) try: stream = inference_client.chat.completions.create(**parameters) for completion in stream: - # Extract the streamed content content = completion.choices[0].delta.content if not content: - continue # skip empty chunks + continue chunk = ChatGenerationChunk(message=AIMessageChunk(content=content)) if run_manager: run_manager.on_llm_new_token(content, chunk=chunk) yield chunk - # Optionally yield usage metadata at the end if available - if self.stream_options and self.stream_options.get("include_usage"): - usage = getattr(completion, "usage", {}) - usage_metadata = { - "input_tokens": getattr(usage, "prompt_tokens", None), - "output_tokens": getattr(usage, "completion_tokens", None), - "total_tokens": getattr(usage, "total_tokens", None), - } - if any(v is not None for v in usage_metadata.values()): - yield ChatGenerationChunk( - message=AIMessageChunk( - content="", - usage_metadata=usage_metadata, # type: ignore - ) - ) except Exception as e: - # Yield an error chunk if possible - error_chunk = ChatGenerationChunk( + yield ChatGenerationChunk( message=AIMessageChunk( content=f"[ERROR] {str(e)}", response_metadata={"error": str(e)} ) ) - yield error_chunk @property def init_from_env_params(self) -> tuple[dict, dict, dict]: - # env_vars, model_params, expected_attrs - # Map DIGITALOCEAN_INFERENCE_KEY -> api_key, and require model param return ( {"DIGITALOCEAN_INFERENCE_KEY": "test-env-key"}, {"model": "bird-brain-001", "buffer_length": 50}, @@ -355,7 +321,6 @@ def is_lc_serializable(cls) -> bool: def __getstate__(self) -> dict: state = self.__dict__.copy() - # Exclude sensitive credentials from serialization state.pop("api_key", None) return state diff --git a/langchain_gradient/constants.py b/langchain_gradient/constants.py index 6116fd6..a53fee3 100644 --- a/langchain_gradient/constants.py +++ b/langchain_gradient/constants.py @@ -17,5 +17,7 @@ "user", "timeout", "stream_options", + "enable_rate_limiting", # NEW + "max_requests_per_minute", # NEW # Add new fields here as needed } diff --git a/langchain_gradient/rate_limiter.py b/langchain_gradient/rate_limiter.py new file mode 100644 index 0000000..1150c42 --- /dev/null +++ b/langchain_gradient/rate_limiter.py @@ -0,0 +1,131 @@ +"""Rate limiting implementation for API requests.""" + +import time +from collections import deque +from threading import Lock + + +class RateLimiter: + """ + Thread-safe rate limiter using token bucket algorithm. + + The token bucket algorithm allows for burst traffic while maintaining + an average rate limit. Tokens are added at a fixed rate, and each + request consumes one token. + + Attributes: + max_requests: Maximum number of requests allowed in the time window + time_window: Time window in seconds for the rate limit + requests: Deque storing timestamps of recent requests + lock: Threading lock for thread-safe operations + + Example: + >>> limiter = RateLimiter(max_requests=60, time_window=60) + >>> limiter.wait_if_needed() # Waits if rate limit would be exceeded + """ + + def __init__(self, max_requests: int = 60, time_window: float = 60.0): + """ + Initialize rate limiter. + + Args: + max_requests: Maximum number of requests allowed in time window. + Default is 60 requests. + time_window: Time window in seconds. Default is 60 seconds. + + Raises: + ValueError: If max_requests or time_window are invalid. + """ + if max_requests <= 0: + raise ValueError(f"max_requests must be positive, got {max_requests}") + if time_window <= 0: + raise ValueError(f"time_window must be positive, got {time_window}") + + self.max_requests = max_requests + self.time_window = time_window + self.requests: deque = deque() + self.lock = Lock() + + def wait_if_needed(self) -> None: + """ + Wait if making a request now would exceed the rate limit. + + This method is thread-safe and can be called from multiple threads. + It automatically removes expired request timestamps and calculates + the required wait time if the rate limit would be exceeded. + + The method uses a sliding window approach: it only considers requests + made within the current time window. + """ + with self.lock: + current_time = time.time() + + # Remove requests outside the current time window + while self.requests and self.requests[0] <= current_time - self.time_window: + self.requests.popleft() + + # Check if we need to wait + if len(self.requests) >= self.max_requests: + # Calculate wait time: time until oldest request expires + oldest_request = self.requests[0] + wait_time = self.time_window - (current_time - oldest_request) + + if wait_time > 0: + print( + f"⏳ Rate limit reached ({self.max_requests} requests " + f"per {self.time_window}s). Waiting {wait_time:.2f}s..." + ) + time.sleep(wait_time) + + # Remove the oldest request after waiting + self.requests.popleft() + + # Record this request + self.requests.append(time.time()) + + def get_current_usage(self) -> dict: + """ + Get current rate limit usage statistics. + + Returns: + Dictionary containing: + - current_requests: Number of requests in current window + - max_requests: Maximum allowed requests + - time_window: Time window in seconds + - usage_percentage: Percentage of rate limit used + """ + with self.lock: + current_time = time.time() + + # Remove expired requests + while self.requests and self.requests[0] <= current_time - self.time_window: + self.requests.popleft() + + current_requests = len(self.requests) + usage_percentage = (current_requests / self.max_requests) * 100 + + return { + "current_requests": current_requests, + "max_requests": self.max_requests, + "time_window": self.time_window, + "usage_percentage": round(usage_percentage, 2), + } + + def reset(self) -> None: + """ + Reset the rate limiter by clearing all request history. + + This is useful for testing or when you want to start fresh. + """ + with self.lock: + self.requests.clear() + + def __repr__(self) -> str: + """String representation of the rate limiter.""" + usage = self.get_current_usage() + return ( + f"RateLimiter(" + f"max_requests={self.max_requests}, " + f"time_window={self.time_window}s, " + f"current_usage={usage['current_requests']}/{self.max_requests})" + ) diff --git a/poetry.lock b/poetry.lock index d982981..865d425 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,7 +6,6 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -18,7 +17,6 @@ version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, @@ -32,7 +30,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -41,7 +39,6 @@ version = "25.1.0" description = "The uncompromising code formatter." optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "black-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32"}, {file = "black-25.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da"}, @@ -88,7 +85,6 @@ version = "0.1.0a17" description = "The official Python library for GradientAI" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a17-py3-none-any.whl", hash = "sha256:8aafe466f8f2df7bd508fe96079a85589ef927609988e6b64f125bbec2839818"}, {file = "c63a5cfe_b235_4fbe_8bbb_82a9e02a482a_python-0.1.0a17.tar.gz", hash = "sha256:dc7bd8b29a0bafb83166a5ef93873a8b8a601a4746b92c3c918909370f922276"}, @@ -111,7 +107,6 @@ version = "2025.7.14" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.7" -groups = ["main", "test"] files = [ {file = "certifi-2025.7.14-py3-none-any.whl", hash = "sha256:6b31f564a415d79ee77df69d757bb49a5bb53bd9f756cbbe24394ffd6fc1f4b2"}, {file = "certifi-2025.7.14.tar.gz", hash = "sha256:8ea99dbdfaaf2ba2f9bac77b9249ef62ec5218e7c2b2e903378ed5fccf765995"}, @@ -123,7 +118,6 @@ version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -203,7 +197,6 @@ version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" -groups = ["main", "test"] files = [ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, @@ -305,8 +298,6 @@ version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" files = [ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, @@ -315,29 +306,12 @@ files = [ [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -[[package]] -name = "click" -version = "8.2.1" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.10" -groups = ["dev"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" or python_version >= \"3.13\"" -files = [ - {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, - {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - [[package]] name = "codespell" version = "2.4.1" description = "Fix common misspellings in text files" optional = false python-versions = ">=3.8" -groups = ["codespell"] files = [ {file = "codespell-2.4.1-py3-none-any.whl", hash = "sha256:3dadafa67df7e4a3dbf51e0d7315061b80d265f9552ebd699b3dd6834b47e425"}, {file = "codespell-2.4.1.tar.gz", hash = "sha256:299fcdcb09d23e81e35a671bbe746d5ad7e8385972e65dbb833a2eaac33c01e5"}, @@ -346,7 +320,7 @@ files = [ [package.extras] dev = ["Pygments", "build", "chardet", "pre-commit", "pytest", "pytest-cov", "pytest-dependency", "ruff", "tomli", "twine"] hard-encoding-detection = ["chardet"] -toml = ["tomli ; python_version < \"3.11\""] +toml = ["tomli"] types = ["chardet (>=5.1.0)", "mypy", "pytest", "pytest-cov", "pytest-dependency"] [[package]] @@ -355,12 +329,10 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev", "test"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "sys_platform == \"win32\"", dev = "platform_system == \"Windows\"", test = "sys_platform == \"win32\""} [[package]] name = "distro" @@ -368,43 +340,17 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, ] -[[package]] -name = "do-gradientai" -version = "0.1.0a19" -description = "The official Python library for GradientAI" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "do_gradientai-0.1.0a19-py3-none-any.whl", hash = "sha256:fad7825382b6989121ed5fceee33b9823ab7e3c35c69dfda0b761c1b306c4204"}, - {file = "do_gradientai-0.1.0a19.tar.gz", hash = "sha256:6cd5e67568c56d8384b0563d499ab09844425dd6518d3770f9009b07a7d93a2f"}, -] - -[package.dependencies] -anyio = ">=3.5.0,<5" -distro = ">=1.7.0,<2" -httpx = ">=0.23.0,<1" -pydantic = ">=1.9.0,<3" -sniffio = "*" -typing-extensions = ">=4.10,<5" - -[package.extras] -aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"] - [[package]] name = "exceptiongroup" version = "1.3.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["main", "test"] -markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, @@ -422,7 +368,6 @@ version = "3.0.0" description = "The official Python library for the Gradient API" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "gradient-3.0.0-py3-none-any.whl", hash = "sha256:f5b84e52d3e5f535b29869ba7f1ade18e953f2ea7ef6ca4ac3bd5eee64dc274f"}, {file = "gradient-3.0.0.tar.gz", hash = "sha256:e26b5b889e008229ccb94b8351f96e5e0dc36c2a91e0f4b354b60201321ca32e"}, @@ -445,7 +390,6 @@ version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, @@ -457,7 +401,6 @@ version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, @@ -479,7 +422,6 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -492,7 +434,7 @@ httpcore = "==1.*" idna = "*" [package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] +brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -504,7 +446,6 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main", "test"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -519,8 +460,6 @@ version = "8.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" -groups = ["main", "test"] -markers = "python_version == \"3.9\"" files = [ {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, @@ -530,12 +469,12 @@ files = [ zipp = ">=3.20" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] @@ -544,7 +483,6 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -556,7 +494,6 @@ version = "1.33" description = "Apply JSON-Patches (RFC 6902)" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" -groups = ["main", "test"] files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, @@ -571,7 +508,6 @@ version = "4.1.1" description = "jsonpickle encodes/decodes any Python object to/from JSON" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "jsonpickle-4.1.1-py3-none-any.whl", hash = "sha256:bb141da6057898aa2438ff268362b126826c812a1721e31cf08a6e142910dc91"}, {file = "jsonpickle-4.1.1.tar.gz", hash = "sha256:f86e18f13e2b96c1c1eede0b7b90095bbb61d99fedc14813c44dc2f361dbbae1"}, @@ -582,7 +518,7 @@ cov = ["pytest-cov"] dev = ["black", "pyupgrade"] docs = ["furo", "rst.linker (>=1.9)", "sphinx (>=3.5)"] packaging = ["build", "setuptools (>=61.2)", "setuptools_scm[toml] (>=6.0)", "twine"] -testing = ["PyYAML", "atheris (>=2.3.0,<2.4.0) ; python_version < \"3.12\"", "bson", "ecdsa", "feedparser", "gmpy2", "numpy", "pandas", "pymongo", "pytest (>=6.0,!=8.1.*)", "pytest-benchmark", "pytest-benchmark[histogram]", "pytest-checkdocs (>=1.2.3)", "pytest-enabler (>=1.0.1)", "pytest-ruff (>=0.2.1)", "scikit-learn", "scipy (>=1.9.3) ; python_version > \"3.10\"", "scipy ; python_version <= \"3.10\"", "simplejson", "sqlalchemy", "ujson"] +testing = ["PyYAML", "atheris (>=2.3.0,<2.4.0)", "bson", "ecdsa", "feedparser", "gmpy2", "numpy", "pandas", "pymongo", "pytest (>=6.0,!=8.1.*)", "pytest-benchmark", "pytest-benchmark[histogram]", "pytest-checkdocs (>=1.2.3)", "pytest-enabler (>=1.0.1)", "pytest-ruff (>=0.2.1)", "scikit-learn", "scipy", "scipy (>=1.9.3)", "simplejson", "sqlalchemy", "ujson"] [[package]] name = "jsonpointer" @@ -590,7 +526,6 @@ version = "3.0.0" description = "Identify specific nodes in a JSON document (RFC 6901)" optional = false python-versions = ">=3.7" -groups = ["main", "test"] files = [ {file = "jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942"}, {file = "jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef"}, @@ -602,7 +537,6 @@ version = "0.3.69" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "langchain_core-0.3.69-py3-none-any.whl", hash = "sha256:383e9cb4919f7ef4b24bf8552ef42e4323c064924fea88b28dd5d7ddb740d3b8"}, {file = "langchain_core-0.3.69.tar.gz", hash = "sha256:c132961117cc7f0227a4c58dd3e209674a6dd5b7e74abc61a0df93b0d736e283"}, @@ -623,7 +557,6 @@ version = "0.3.20" description = "Standard tests for LangChain implementations" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "langchain_tests-0.3.20-py3-none-any.whl", hash = "sha256:6cc7ae64eb8dea65360a968840abe8d947c5382b95e065431c9dd061ee1dacd8"}, {file = "langchain_tests-0.3.20.tar.gz", hash = "sha256:b94c05e37d191d4768a1a5064f2ca4053bacd48ff41e10af245ffa6a065ead4d"}, @@ -651,7 +584,6 @@ version = "0.4.8" description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "langsmith-0.4.8-py3-none-any.whl", hash = "sha256:ca2f6024ab9d2cd4d091b2e5b58a5d2cb0c354a0c84fe214145a89ad450abae0"}, {file = "langsmith-0.4.8.tar.gz", hash = "sha256:50eccb744473dd6bd3e0fe024786e2196b1f8598f8defffce7ac31113d6c140f"}, @@ -678,7 +610,6 @@ version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, @@ -703,7 +634,6 @@ version = "0.1.2" description = "Markdown URL utilities" optional = false python-versions = ">=3.7" -groups = ["main", "test"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -715,7 +645,6 @@ version = "6.6.3" description = "multidict implementation" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "multidict-6.6.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a2be5b7b35271f7fff1397204ba6708365e3d773579fe2a30625e16c4b4ce817"}, {file = "multidict-6.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12f4581d2930840295c461764b9a65732ec01250b46c6b2c510d7ee68872b140"}, @@ -838,7 +767,6 @@ version = "1.17.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.9" -groups = ["typing"] files = [ {file = "mypy-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8e08de6138043108b3b18f09d3f817a4783912e48828ab397ecf183135d84d6"}, {file = "mypy-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce4a17920ec144647d448fc43725b5873548b1aae6c603225626747ededf582d"}, @@ -893,7 +821,6 @@ version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.8" -groups = ["dev", "typing"] files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -905,8 +832,6 @@ version = "2.0.2" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" -groups = ["main", "test"] -markers = "python_version == \"3.9\" or platform_python_implementation == \"PyPy\" and python_version < \"3.13\"" files = [ {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, @@ -955,80 +880,12 @@ files = [ {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, ] -[[package]] -name = "numpy" -version = "2.2.6" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.10" -groups = ["main", "test"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\" and python_version < \"3.13\"" -files = [ - {file = "numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb"}, - {file = "numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90"}, - {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163"}, - {file = "numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf"}, - {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83"}, - {file = "numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915"}, - {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680"}, - {file = "numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289"}, - {file = "numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d"}, - {file = "numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42"}, - {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491"}, - {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a"}, - {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf"}, - {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1"}, - {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab"}, - {file = "numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47"}, - {file = "numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3"}, - {file = "numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282"}, - {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87"}, - {file = "numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249"}, - {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49"}, - {file = "numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de"}, - {file = "numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4"}, - {file = "numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d"}, - {file = "numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566"}, - {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f"}, - {file = "numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f"}, - {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868"}, - {file = "numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d"}, - {file = "numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd"}, - {file = "numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40"}, - {file = "numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8"}, - {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f"}, - {file = "numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa"}, - {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571"}, - {file = "numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1"}, - {file = "numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff"}, - {file = "numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543"}, - {file = "numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00"}, - {file = "numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd"}, -] - [[package]] name = "numpy" version = "2.3.1" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.11" -groups = ["main", "test"] -markers = "python_version >= \"3.13\"" files = [ {file = "numpy-2.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6ea9e48336a402551f52cd8f593343699003d2353daa4b72ce8d34f66b722070"}, {file = "numpy-2.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ccb7336eaf0e77c1635b232c141846493a588ec9ea777a7c24d7166bb8533ae"}, @@ -1089,8 +946,6 @@ version = "3.11.0" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.9" -groups = ["main", "test"] -markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "orjson-3.11.0-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b8913baba9751f7400f8fa4ec18a8b618ff01177490842e39e47b66c1b04bc79"}, {file = "orjson-3.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d4d86910554de5c9c87bc560b3bdd315cc3988adbdc2acf5dda3797079407ed"}, @@ -1172,7 +1027,6 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["main", "dev", "test"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1184,7 +1038,6 @@ version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." optional = false python-versions = ">=3.8" -groups = ["dev", "typing"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -1196,7 +1049,6 @@ version = "4.3.8" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" -groups = ["dev"] files = [ {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, @@ -1213,7 +1065,6 @@ version = "1.6.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -1229,7 +1080,6 @@ version = "0.3.2" description = "Accelerated property cache" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770"}, {file = "propcache-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3"}, @@ -1337,7 +1187,6 @@ version = "9.0.0" description = "Get CPU info with pure Python" optional = false python-versions = "*" -groups = ["main", "test"] files = [ {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, @@ -1349,7 +1198,6 @@ version = "2.22" description = "C parser in Python" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, @@ -1361,7 +1209,6 @@ version = "2.11.7" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, @@ -1375,7 +1222,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] +timezone = ["tzdata"] [[package]] name = "pydantic-core" @@ -1383,7 +1230,6 @@ version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -1495,7 +1341,6 @@ version = "2.19.2" description = "Pygments is a syntax highlighting package written in Python." optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, @@ -1510,7 +1355,6 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" -groups = ["main", "test"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -1533,7 +1377,6 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -1552,7 +1395,6 @@ version = "5.0.1" description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "pytest-benchmark-5.0.1.tar.gz", hash = "sha256:8138178618c85586ce056c70cc5e92f4283c2e6198e8422c2c825aeb3ace6afd"}, {file = "pytest_benchmark-5.0.1-py3-none-any.whl", hash = "sha256:d75fec4cbf0d4fd91e020f425ce2d845e9c127c21bae35e77c84db8ed84bfaa6"}, @@ -1573,7 +1415,6 @@ version = "4.0.0" description = "Pytest plugin to create CodSpeed benchmarks" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "pytest_codspeed-4.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2517731b20a6aa9fe61d04822b802e1637ee67fd865189485b384a9d5897117f"}, {file = "pytest_codspeed-4.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1e5076bb5119d4f8248822b5cd6b768f70a18c7e1a7fbcd96a99cd4a6430096e"}, @@ -1606,7 +1447,6 @@ version = "0.13.4" description = "A pytest plugin powered by VCR.py to record and replay HTTP traffic" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "pytest_recording-0.13.4-py3-none-any.whl", hash = "sha256:ad49a434b51b1c4f78e85b1e6b74fdcc2a0a581ca16e52c798c6ace971f7f439"}, {file = "pytest_recording-0.13.4.tar.gz", hash = "sha256:568d64b2a85992eec4ae0a419c855d5fd96782c5fb016784d86f18053792768c"}, @@ -1626,7 +1466,6 @@ version = "0.7.0" description = "Pytest Plugin to disable socket calls during tests" optional = false python-versions = ">=3.8,<4.0" -groups = ["main", "test"] files = [ {file = "pytest_socket-0.7.0-py3-none-any.whl", hash = "sha256:7e0f4642177d55d317bbd58fc68c6bd9048d6eadb2d46a89307fa9221336ce45"}, {file = "pytest_socket-0.7.0.tar.gz", hash = "sha256:71ab048cbbcb085c15a4423b73b619a8b35d6a307f46f78ea46be51b1b7e11b3"}, @@ -1641,7 +1480,6 @@ version = "0.3.5" description = "Automatically rerun your tests on file modifications" optional = false python-versions = ">=3.7.0,<4.0.0" -groups = ["test"] files = [ {file = "pytest_watcher-0.3.5-py3-none-any.whl", hash = "sha256:af00ca52c7be22dc34c0fd3d7ffef99057207a73b05dc5161fe3b2fe91f58130"}, {file = "pytest_watcher-0.3.5.tar.gz", hash = "sha256:8896152460ba2b1a8200c12117c6611008ec96c8b2d811f0a05ab8a82b043ff8"}, @@ -1657,7 +1495,6 @@ version = "1.17.0" description = "digitalocean.com API to manage Droplets and Images" optional = false python-versions = "*" -groups = ["main"] files = [ {file = "python-digitalocean-1.17.0.tar.gz", hash = "sha256:107854fde1aafa21774e8053cf253b04173613c94531f75d5a039ad770562b24"}, {file = "python_digitalocean-1.17.0-py3-none-any.whl", hash = "sha256:0032168e022e85fca314eb3f8dfaabf82087f2ed40839eb28f1eeeeca5afb1fa"}, @@ -1673,7 +1510,6 @@ version = "1.1.1" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" -groups = ["main"] files = [ {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"}, {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"}, @@ -1688,7 +1524,6 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1751,7 +1586,6 @@ version = "2.32.4" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, @@ -1773,7 +1607,6 @@ version = "1.0.0" description = "A utility belt for advanced users of python-requests" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["main", "test"] files = [ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"}, {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"}, @@ -1788,7 +1621,6 @@ version = "14.0.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false python-versions = ">=3.8.0" -groups = ["main", "test"] files = [ {file = "rich-14.0.0-py3-none-any.whl", hash = "sha256:1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0"}, {file = "rich-14.0.0.tar.gz", hash = "sha256:82f1bc23a6a21ebca4ae0c45af9bdbc492ed20231dcb63f297d6d1021a9d5725"}, @@ -1808,7 +1640,6 @@ version = "0.12.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" -groups = ["dev"] files = [ {file = "ruff-0.12.4-py3-none-linux_armv6l.whl", hash = "sha256:cb0d261dac457ab939aeb247e804125a5d521b21adf27e721895b0d3f83a0d0a"}, {file = "ruff-0.12.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:55c0f4ca9769408d9b9bac530c30d3e66490bd2beb2d3dae3e4128a1f05c7442"}, @@ -1836,7 +1667,6 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" -groups = ["main", "test"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -1848,7 +1678,6 @@ version = "4.9.1" description = "Pytest Snapshot Test Utility" optional = false python-versions = ">=3.8.1" -groups = ["main", "test"] files = [ {file = "syrupy-4.9.1-py3-none-any.whl", hash = "sha256:b94cc12ed0e5e75b448255430af642516842a2374a46936dd2650cfb6dd20eda"}, {file = "syrupy-4.9.1.tar.gz", hash = "sha256:b7d0fcadad80a7d2f6c4c71917918e8ebe2483e8c703dfc8d49cdbb01081f9a4"}, @@ -1863,7 +1692,6 @@ version = "9.1.2" description = "Retry code until it succeeds" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138"}, {file = "tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb"}, @@ -1879,8 +1707,6 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["main", "dev", "test", "typing"] -markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -1922,12 +1748,10 @@ version = "4.14.1" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" -groups = ["main", "dev", "test", "typing"] files = [ {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, ] -markers = {dev = "python_version < \"3.13\""} [[package]] name = "typing-inspection" @@ -1935,7 +1759,6 @@ version = "0.4.1" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, @@ -1950,16 +1773,14 @@ version = "1.26.20" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" -groups = ["main", "test"] -markers = "platform_python_implementation == \"PyPy\" or python_version == \"3.9\"" files = [ {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, ] [package.extras] -brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""] -secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] @@ -1968,15 +1789,13 @@ version = "2.5.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" -groups = ["main", "test"] -markers = "platform_python_implementation != \"PyPy\" and python_version >= \"3.10\"" files = [ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, ] [package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -1987,7 +1806,6 @@ version = "7.0.0" description = "Automatically mock your HTTP interactions to simplify and speed up testing" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "vcrpy-7.0.0-py2.py3-none-any.whl", hash = "sha256:55791e26c18daa363435054d8b35bd41a4ac441b6676167635d1b37a71dbe124"}, {file = "vcrpy-7.0.0.tar.gz", hash = "sha256:176391ad0425edde1680c5b20738ea3dc7fb942520a48d2993448050986b3a50"}, @@ -2011,7 +1829,6 @@ version = "6.0.0" description = "Filesystem events monitoring" optional = false python-versions = ">=3.9" -groups = ["test"] files = [ {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, @@ -2054,7 +1871,6 @@ version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -2143,7 +1959,6 @@ version = "1.20.1" description = "Yet another URL library" optional = false python-versions = ">=3.9" -groups = ["main", "test"] files = [ {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4"}, {file = "yarl-1.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a"}, @@ -2262,15 +2077,13 @@ version = "3.23.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" -groups = ["main", "test"] -markers = "python_version == \"3.9\"" files = [ {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] @@ -2283,7 +2096,6 @@ version = "0.23.0" description = "Zstandard bindings for Python" optional = false python-versions = ">=3.8" -groups = ["main", "test"] files = [ {file = "zstandard-0.23.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bf0a05b6059c0528477fba9054d09179beb63744355cab9f38059548fedd46a9"}, {file = "zstandard-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fc9ca1c9718cb3b06634c7c8dec57d24e9438b2aa9a0f02b8bb36bf478538880"}, @@ -2391,6 +2203,6 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [metadata] -lock-version = "2.1" +lock-version = "2.0" python-versions = ">=3.9,<4.0" -content-hash = "c53016c9b1df0b3874ea7f62ae3db9767acce4446e17704d13c42977a8e6a0c4" +content-hash = "19e4029f8f94787d9c0d1605d7eadd3f2b27ca327dac7ba96e7c085de2058af5" diff --git a/pyproject.toml b/pyproject.toml index 9520743..b9bcfdd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,7 @@ optional = true [tool.poetry.group.dev.dependencies] black = "^25.1.0" ruff = "^0.12.4" +pytest = ">=7.4.3,<8.0.0" [tool.poetry.group.test.dependencies] pytest = "^7.4.3" diff --git a/tests/integration_tests/test_chat_model_aliases.py b/tests/integration_tests/test_chat_model_aliases.py index 1d70bed..1233889 100644 --- a/tests/integration_tests/test_chat_model_aliases.py +++ b/tests/integration_tests/test_chat_model_aliases.py @@ -1,8 +1,10 @@ import os + import pytest +from dotenv import load_dotenv from langchain_core.messages import HumanMessage + from langchain_gradient.chat_models import ChatGradient -from dotenv import load_dotenv load_dotenv() @@ -22,9 +24,10 @@ def test_model_alias(): assert result.content assert isinstance(result.content, str) + # def test_stop_sequences_alias(): # llm = ChatGradient(model=MODEL, api_key=API_KEY, stop_sequences=["cat", "cat,", "cat.", "Cat", "Cat,", "Cat."]) # prompt = [HumanMessage(content="Say: dog, cat, mouse.")] # result = llm.invoke(prompt) # assert result.content -# assert isinstance(result.content, str) \ No newline at end of file +# assert isinstance(result.content, str) diff --git a/tests/integration_tests/test_chat_model_param_support.py b/tests/integration_tests/test_chat_model_param_support.py index dbed6f9..7467c85 100644 --- a/tests/integration_tests/test_chat_model_param_support.py +++ b/tests/integration_tests/test_chat_model_param_support.py @@ -1,8 +1,10 @@ import os + import pytest +from dotenv import load_dotenv from langchain_core.messages import HumanMessage + from langchain_gradient.chat_models import ChatGradient -from dotenv import load_dotenv load_dotenv() @@ -13,6 +15,7 @@ reason="No Gradient API key set", ) + def test_openai_o3_mini_max_completion_tokens(): llm = ChatGradient(model="openai-o3-mini", api_key=API_KEY, max_tokens=256) prompt = [HumanMessage(content="Say hello to the world!")] @@ -20,16 +23,22 @@ def test_openai_o3_mini_max_completion_tokens(): assert result.content assert isinstance(result.content, str) + def test_openai_gpt_4o_max_tokens(): - llm = ChatGradient(model="openai-gpt-4o", api_key=API_KEY, max_completion_tokens=256) + llm = ChatGradient( + model="openai-gpt-4o", api_key=API_KEY, max_completion_tokens=256 + ) prompt = [HumanMessage(content="Say hello to the world!")] result = llm.invoke(prompt) assert result.content assert isinstance(result.content, str) + def test_openai_gpt_4o_mini_max_tokens(): - llm = ChatGradient(model="openai-gpt-4o-mini", api_key=API_KEY, max_completion_tokens=256) + llm = ChatGradient( + model="openai-gpt-4o-mini", api_key=API_KEY, max_completion_tokens=256 + ) prompt = [HumanMessage(content="Say hello to the world!")] result = llm.invoke(prompt) assert result.content - assert isinstance(result.content, str) \ No newline at end of file + assert isinstance(result.content, str) diff --git a/tests/integration_tests/test_chat_model_parameters.py b/tests/integration_tests/test_chat_model_parameters.py index 364b13a..868ece4 100644 --- a/tests/integration_tests/test_chat_model_parameters.py +++ b/tests/integration_tests/test_chat_model_parameters.py @@ -1,8 +1,10 @@ import os + import pytest +from dotenv import load_dotenv from langchain_core.messages import HumanMessage + from langchain_gradient.chat_models import ChatGradient -from dotenv import load_dotenv load_dotenv() @@ -18,9 +20,11 @@ def _basic_llm(**kwargs): return ChatGradient(model=MODEL, api_key=API_KEY, **kwargs) + def _basic_prompt(): return [HumanMessage(content="Say hello!")] + def test_temperature_param(): llm_cold = _basic_llm(temperature=0) llm_hot = _basic_llm(temperature=1) @@ -33,6 +37,7 @@ def test_temperature_param(): outputs = set(llm_hot.invoke(prompt).content for _ in range(3)) assert len(outputs) > 1 or result_hot.content != result_cold.content + def test_max_tokens_param(): llm = _basic_llm(max_tokens=3) prompt = [HumanMessage(content="Repeat the word 'hello' 10 times.")] @@ -41,6 +46,7 @@ def test_max_tokens_param(): # Should be very short due to max_tokens assert len(result.content.split()) <= 5 + # def test_stop_param(): # llm = _basic_llm(stop=["cat"]) # prompt = [HumanMessage(content="Say: dog, cat, mouse.")] @@ -64,6 +70,7 @@ def test_max_tokens_param(): # # With high frequency penalty, expect less repetition # assert result.content.lower().count("hi") <= 2 + def test_top_p_param(): llm = _basic_llm(top_p=0.1) prompt = [HumanMessage(content="Tell me a joke.")] @@ -72,6 +79,7 @@ def test_top_p_param(): # Not easy to assert, but should return a valid string assert isinstance(result.content, str) + # TODO: Should be tested with once its fixed in Gradient SDK # def test_n_param(): # llm = _basic_llm(n=2) @@ -80,6 +88,7 @@ def test_top_p_param(): # # Should return a list of generations or a message with n completions # assert hasattr(result, "message") or hasattr(result, "generations") + def test_timeout_param(): llm = _basic_llm(timeout=0.1) prompt = _basic_prompt() @@ -88,8 +97,9 @@ def test_timeout_param(): except Exception as e: assert "timeout" in str(e).lower() or isinstance(e, Exception) + def test_stream_options_include_usage(): llm = _basic_llm(stream_options={"include_usage": True}) prompt = _basic_prompt() result = llm.invoke(prompt) - assert hasattr(result, "usage_metadata") \ No newline at end of file + assert hasattr(result, "usage_metadata") diff --git a/tests/integration_tests/test_chat_model_streaming.py b/tests/integration_tests/test_chat_model_streaming.py index e9faab7..8341e3e 100644 --- a/tests/integration_tests/test_chat_model_streaming.py +++ b/tests/integration_tests/test_chat_model_streaming.py @@ -1,8 +1,10 @@ import os + import pytest +from dotenv import load_dotenv from langchain_core.messages import HumanMessage + from langchain_gradient.chat_models import ChatGradient -from dotenv import load_dotenv load_dotenv() @@ -14,6 +16,7 @@ reason="No Gradient API key set", ) + def test_stream_first_and_last_chunk(): llm = ChatGradient(model=MODEL, api_key=API_KEY, streaming=True) prompt = [HumanMessage(content="Display three cities in the world")] @@ -24,4 +27,3 @@ def test_stream_first_and_last_chunk(): assert chunk.content is not None found = True assert found, "No streamed completions were received." - \ No newline at end of file diff --git a/tests/integration_tests/test_chat_models.py b/tests/integration_tests/test_chat_models.py index e36fccd..0b5f4ff 100644 --- a/tests/integration_tests/test_chat_models.py +++ b/tests/integration_tests/test_chat_models.py @@ -4,8 +4,7 @@ import pytest from dotenv import load_dotenv -from langchain_core.messages import AIMessageChunk, HumanMessage -from langchain_core.outputs import ChatGenerationChunk +from langchain_core.messages import HumanMessage from langchain_gradient.chat_models import ChatGradient @@ -167,4 +166,3 @@ def test_unicode_prompt() -> None: llm = ChatGradient(api_key=os.environ.get("DIGITALOCEAN_INFERENCE_KEY")) result = llm.invoke([HumanMessage(content="你好,世界! 🌍")]) assert result.content is not None - diff --git a/tests/integration_tests/test_chat_models_all_models.py b/tests/integration_tests/test_chat_models_all_models.py index c812d1e..cb0d565 100644 --- a/tests/integration_tests/test_chat_models_all_models.py +++ b/tests/integration_tests/test_chat_models_all_models.py @@ -1,8 +1,10 @@ import os + import pytest +from dotenv import load_dotenv from langchain_core.messages import HumanMessage + from langchain_gradient.chat_models import ChatGradient -from dotenv import load_dotenv load_dotenv() @@ -13,7 +15,7 @@ "openai-gpt-4o", "llama3-8b-instruct", "deepseek-r1-distill-llama-70b", - "llama3.3-70b-instruct" + "llama3.3-70b-instruct", ] # "llama3-70b-instruct", @@ -22,6 +24,7 @@ # "anthropic-claude-3.5-haiku", # "anthropic-claude-3-opus", + @pytest.mark.skipif( not os.environ.get("DIGITALOCEAN_INFERENCE_KEY"), reason="No Gradient API key set", @@ -37,4 +40,4 @@ def test_chatgradient_all_models(model_name): result = llm.invoke(messages) assert result.content assert isinstance(result.content, str) - assert hasattr(result, "usage_metadata") or hasattr(result, "response_metadata") \ No newline at end of file + assert hasattr(result, "usage_metadata") or hasattr(result, "response_metadata") diff --git a/tests/integration_tests/test_rate_limiting.py b/tests/integration_tests/test_rate_limiting.py new file mode 100644 index 0000000..f160754 --- /dev/null +++ b/tests/integration_tests/test_rate_limiting.py @@ -0,0 +1,269 @@ +"""Integration tests for RateLimiter class.""" + +import time +from threading import Thread + +import pytest + +from langchain_gradient.rate_limiter import RateLimiter + + +class TestRateLimiterInitialization: + """Test rate limiter initialization.""" + + def test_default_initialization(self): + """Test rate limiter with default parameters.""" + limiter = RateLimiter() + assert limiter.max_requests == 60 + assert limiter.time_window == 60.0 + assert len(limiter.requests) == 0 + + def test_custom_initialization(self): + """Test rate limiter with custom parameters.""" + limiter = RateLimiter(max_requests=10, time_window=5.0) + assert limiter.max_requests == 10 + assert limiter.time_window == 5.0 + + def test_invalid_max_requests(self): + """Test that invalid max_requests raises ValueError.""" + with pytest.raises(ValueError, match="max_requests must be positive"): + RateLimiter(max_requests=0) + + with pytest.raises(ValueError, match="max_requests must be positive"): + RateLimiter(max_requests=-1) + + def test_invalid_time_window(self): + """Test that invalid time_window raises ValueError.""" + with pytest.raises(ValueError, match="time_window must be positive"): + RateLimiter(time_window=0) + + with pytest.raises(ValueError, match="time_window must be positive"): + RateLimiter(time_window=-1) + + +class TestRateLimiterBasicFunctionality: + """Test basic rate limiter functionality.""" + + def test_single_request(self): + """Test that a single request doesn't wait.""" + limiter = RateLimiter(max_requests=5, time_window=1.0) + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should not wait + assert elapsed < 0.1 + assert len(limiter.requests) == 1 + + def test_requests_under_limit(self): + """Test that requests under the limit don't wait.""" + limiter = RateLimiter(max_requests=5, time_window=1.0) + + start_time = time.time() + for _ in range(4): + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should not wait significantly + assert elapsed < 0.2 + assert len(limiter.requests) == 4 + + def test_requests_at_limit_wait(self): + """Test that exceeding the limit causes waiting.""" + limiter = RateLimiter(max_requests=3, time_window=1.0) + + # Make 3 requests (at limit) + for _ in range(3): + limiter.wait_if_needed() + + # 4th request should wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should wait approximately 1 second + assert elapsed >= 0.9 # Allow small margin + assert len(limiter.requests) == 3 # One removed after wait + + +class TestRateLimiterSlidingWindow: + """Test sliding window behavior.""" + + def test_old_requests_removed(self): + """Test that old requests are removed from tracking.""" + limiter = RateLimiter(max_requests=5, time_window=0.5) + + # Make 3 requests + for _ in range(3): + limiter.wait_if_needed() + + # Wait for window to expire + time.sleep(0.6) + + # Make another request - should not wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + assert elapsed < 0.1 # Should not wait + assert len(limiter.requests) == 1 # Old requests removed + + def test_partial_window_expiry(self): + """Test behavior when some requests have expired.""" + limiter = RateLimiter(max_requests=3, time_window=1.0) + + # Make 2 requests + for _ in range(2): + limiter.wait_if_needed() + + # Wait for half the window + time.sleep(0.5) + + # Make 2 more requests - total 4, but 2 are older + for _ in range(2): + limiter.wait_if_needed() + + # Should have 4 recent requests + assert len(limiter.requests) == 4 + + +class TestRateLimiterThreadSafety: + """Test thread safety of rate limiter.""" + + def test_concurrent_requests(self): + """Test that concurrent requests are handled safely.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + results = [] + + def make_request(): + limiter.wait_if_needed() + results.append(1) + + # Create 15 threads + threads = [Thread(target=make_request) for _ in range(15)] + + # Start all threads + start_time = time.time() + for thread in threads: + thread.start() + + # Wait for all threads to complete + for thread in threads: + thread.join() + + elapsed = time.time() - start_time + + # All 15 requests should complete + assert len(results) == 15 + + # Should have waited at least once (15 > 10 limit) + assert elapsed >= 0.9 + + +class TestRateLimiterUtilityMethods: + """Test utility methods.""" + + def test_get_current_usage_empty(self): + """Test usage stats with no requests.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + usage = limiter.get_current_usage() + + assert usage["current_requests"] == 0 + assert usage["max_requests"] == 10 + assert usage["time_window"] == 1.0 + assert usage["usage_percentage"] == 0.0 + + def test_get_current_usage_partial(self): + """Test usage stats with some requests.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + + for _ in range(5): + limiter.wait_if_needed() + + usage = limiter.get_current_usage() + + assert usage["current_requests"] == 5 + assert usage["usage_percentage"] == 50.0 + + def test_get_current_usage_removes_old(self): + """Test that usage stats remove old requests.""" + limiter = RateLimiter(max_requests=10, time_window=0.5) + + for _ in range(5): + limiter.wait_if_needed() + + time.sleep(0.6) + + usage = limiter.get_current_usage() + assert usage["current_requests"] == 0 + + def test_reset(self): + """Test reset functionality.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + + for _ in range(5): + limiter.wait_if_needed() + + assert len(limiter.requests) == 5 + + limiter.reset() + + assert len(limiter.requests) == 0 + + def test_repr(self): + """Test string representation.""" + limiter = RateLimiter(max_requests=10, time_window=60.0) + + for _ in range(3): + limiter.wait_if_needed() + + repr_str = repr(limiter) + + assert "RateLimiter" in repr_str + assert "max_requests=10" in repr_str + assert "time_window=60.0s" in repr_str + assert "current_usage=3/10" in repr_str + + +class TestRateLimiterEdgeCases: + """Test edge cases and boundary conditions.""" + + def test_very_high_limit(self): + """Test with very high request limit.""" + limiter = RateLimiter(max_requests=10000, time_window=1.0) + + start_time = time.time() + for _ in range(100): + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should not wait + assert elapsed < 0.5 + + def test_very_short_window(self): + """Test with very short time window.""" + limiter = RateLimiter(max_requests=2, time_window=0.1) + + # Make 2 requests + for _ in range(2): + limiter.wait_if_needed() + + # 3rd should wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + assert elapsed >= 0.05 # Should wait some time + + def test_single_request_limit(self): + """Test with limit of 1 request.""" + limiter = RateLimiter(max_requests=1, time_window=0.5) + + limiter.wait_if_needed() + + # 2nd request should wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + assert elapsed >= 0.4 diff --git a/tests/unit_tests/test_chat_models_with_rate_limiting.py b/tests/unit_tests/test_chat_models_with_rate_limiting.py new file mode 100644 index 0000000..aaeff3e --- /dev/null +++ b/tests/unit_tests/test_chat_models_with_rate_limiting.py @@ -0,0 +1,314 @@ +"""Unit tests for ChatGradient with rate limiting (no API calls needed).""" + +from unittest.mock import Mock, patch + +import pytest +from langchain_core.messages import HumanMessage + +from langchain_gradient.chat_models import ChatGradient + + +class TestChatGradientRateLimiting: + """Test rate limiting integration in ChatGradient.""" + + def test_rate_limiting_disabled_by_default(self): + """Test that rate limiting is disabled by default.""" + llm = ChatGradient(api_key="fake-key") + + assert llm.enable_rate_limiting is False + assert llm._rate_limiter is None + + def test_rate_limiting_can_be_enabled(self): + """Test that rate limiting can be enabled.""" + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=60 + ) + + assert llm.enable_rate_limiting is True + assert llm._rate_limiter is not None + assert llm._rate_limiter.max_requests == 60 + assert llm._rate_limiter.time_window == 60.0 + + def test_custom_rate_limit(self): + """Test that custom rate limits are applied.""" + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=30 + ) + + assert llm._rate_limiter.max_requests == 30 + + def test_invalid_rate_limit_raises_error(self): + """Test that invalid rate limits raise ValueError.""" + with pytest.raises( + ValueError, match="max_requests_per_minute must be positive" + ): + ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=0 + ) + + with pytest.raises( + ValueError, match="max_requests_per_minute must be positive" + ): + ChatGradient( + api_key="fake-key", + enable_rate_limiting=True, + max_requests_per_minute=-10, + ) + + @patch("langchain_gradient.chat_models.Gradient") + def test_rate_limiter_called_during_generate(self, mock_gradient): + """Test that rate limiter is called during _generate.""" + # Setup mock + mock_client = Mock() + mock_gradient.return_value = mock_client + + mock_completion = Mock() + mock_completion.choices = [Mock()] + mock_completion.choices[0].message.content = "Test response" + mock_completion.choices[0].finish_reason = "stop" + mock_completion.usage = Mock( + prompt_tokens=10, completion_tokens=20, total_tokens=30 + ) + mock_completion.model = "llama3.3-70b-instruct" + mock_completion.id = "test-id" + + mock_client.chat.completions.create.return_value = mock_completion + + # Create LLM with rate limiting + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=5 + ) + + # Spy on rate limiter + original_wait = llm._rate_limiter.wait_if_needed + wait_called = [] + + def mock_wait(): + wait_called.append(True) + original_wait() + + llm._rate_limiter.wait_if_needed = mock_wait + + # Make request + messages = [HumanMessage(content="Test")] + result = llm._generate(messages) + + # Verify rate limiter was called + assert len(wait_called) == 1 + assert result is not None + + @patch("langchain_gradient.chat_models.Gradient") + def test_rate_limiter_called_during_stream(self, mock_gradient): + """Test that rate limiter is called during _stream.""" + # Setup mock + mock_client = Mock() + mock_gradient.return_value = mock_client + + # Mock streaming response + mock_chunk1 = Mock() + mock_chunk1.choices = [Mock()] + mock_chunk1.choices[0].delta.content = "Hello" + + mock_chunk2 = Mock() + mock_chunk2.choices = [Mock()] + mock_chunk2.choices[0].delta.content = " World" + + mock_client.chat.completions.create.return_value = iter( + [mock_chunk1, mock_chunk2] + ) + + # Create LLM with rate limiting + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=5 + ) + + # Spy on rate limiter + wait_called = [] + original_wait = llm._rate_limiter.wait_if_needed + + def mock_wait(): + wait_called.append(True) + original_wait() + + llm._rate_limiter.wait_if_needed = mock_wait + + # Make streaming request + messages = [HumanMessage(content="Test")] + chunks = list(llm._stream(messages)) + + # Verify rate limiter was called + assert len(wait_called) == 1 + assert len(chunks) == 2 + + @patch("langchain_gradient.chat_models.Gradient") + def test_multiple_requests_increment_usage(self, mock_gradient): + """Test that multiple requests properly track usage.""" + # Setup mock + mock_client = Mock() + mock_gradient.return_value = mock_client + + mock_completion = Mock() + mock_completion.choices = [Mock()] + mock_completion.choices[0].message.content = "Response" + mock_completion.choices[0].finish_reason = "stop" + mock_completion.usage = Mock( + prompt_tokens=10, completion_tokens=20, total_tokens=30 + ) + mock_completion.model = "test-model" + mock_completion.id = "test-id" + + mock_client.chat.completions.create.return_value = mock_completion + + # Create LLM + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=10 + ) + + # Make 3 requests + messages = [HumanMessage(content="Test")] + for _ in range(3): + llm._generate(messages) + + # Check usage + usage = llm._rate_limiter.get_current_usage() + assert usage["current_requests"] == 3 + assert usage["max_requests"] == 10 + assert usage["usage_percentage"] == 30.0 + + def test_rate_limiter_attributes_accessible(self): + """Test that rate limiter attributes are accessible.""" + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=50 + ) + + # Should be able to access rate limiter + assert hasattr(llm, "_rate_limiter") + assert llm._rate_limiter is not None + + # Should be able to get usage + usage = llm._rate_limiter.get_current_usage() + assert isinstance(usage, dict) + assert "current_requests" in usage + assert "max_requests" in usage + assert "usage_percentage" in usage + + # Should be able to reset + llm._rate_limiter.reset() + usage_after = llm._rate_limiter.get_current_usage() + assert usage_after["current_requests"] == 0 + + def test_rate_limiting_params_in_model_fields(self): + """Test that rate limiting params are in ALLOWED_MODEL_FIELDS.""" + from langchain_gradient.constants import ALLOWED_MODEL_FIELDS + + assert "enable_rate_limiting" in ALLOWED_MODEL_FIELDS + assert "max_requests_per_minute" in ALLOWED_MODEL_FIELDS + + @patch("langchain_gradient.chat_models.Gradient") + def test_rate_limiting_with_no_api_key_raises_error(self, mock_gradient): + """Test that missing API key raises error even with rate limiting.""" + llm = ChatGradient( + api_key=None, # No API key + enable_rate_limiting=True, + ) + + with pytest.raises(ValueError, match="Gradient model access key not provided"): + llm._generate([HumanMessage(content="Test")]) + + def test_repr_of_rate_limiter(self): + """Test string representation of rate limiter.""" + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=60 + ) + + repr_str = repr(llm._rate_limiter) + + assert "RateLimiter" in repr_str + assert "max_requests=60" in repr_str + assert "time_window=60.0s" in repr_str + + +class TestRateLimiterIntegrationScenarios: + """Test realistic usage scenarios without API calls.""" + + @patch("langchain_gradient.chat_models.Gradient") + def test_burst_requests_scenario(self, mock_gradient): + """Simulate burst of requests scenario.""" + mock_client = Mock() + mock_gradient.return_value = mock_client + + mock_completion = Mock() + mock_completion.choices = [Mock()] + mock_completion.choices[0].message.content = "Response" + mock_completion.choices[0].finish_reason = "stop" + mock_completion.usage = Mock( + prompt_tokens=10, completion_tokens=20, total_tokens=30 + ) + mock_completion.model = "test-model" + mock_completion.id = "test-id" + mock_client.chat.completions.create.return_value = mock_completion + + llm = ChatGradient( + api_key="fake-key", + enable_rate_limiting=True, + max_requests_per_minute=5, # Low limit + ) + + # Simulate 10 requests (exceeds limit) + messages = [HumanMessage(content="Test")] + + # First 5 should be immediate + for i in range(5): + result = llm._generate(messages) + assert result is not None + + # Check we're at limit + usage = llm._rate_limiter.get_current_usage() + assert usage["usage_percentage"] == 100.0 + + @patch("langchain_gradient.chat_models.Gradient") + def test_concurrent_requests_scenario(self, mock_gradient): + """Simulate concurrent requests (thread safety).""" + from threading import Thread + + mock_client = Mock() + mock_gradient.return_value = mock_client + + mock_completion = Mock() + mock_completion.choices = [Mock()] + mock_completion.choices[0].message.content = "Response" + mock_completion.choices[0].finish_reason = "stop" + mock_completion.usage = Mock( + prompt_tokens=10, completion_tokens=20, total_tokens=30 + ) + mock_completion.model = "test-model" + mock_completion.id = "test-id" + mock_client.chat.completions.create.return_value = mock_completion + + llm = ChatGradient( + api_key="fake-key", enable_rate_limiting=True, max_requests_per_minute=20 + ) + + results = [] + + def make_request(): + try: + result = llm._generate([HumanMessage(content="Test")]) + results.append("success") + except Exception as e: + results.append(f"error: {e}") + + # Create 10 threads + threads = [Thread(target=make_request) for _ in range(10)] + + # Start all + for thread in threads: + thread.start() + + # Wait for all + for thread in threads: + thread.join() + + # All should succeed + assert len(results) == 10 + assert all(r == "success" for r in results) diff --git a/tests/unit_tests/test_rate_limiter.py b/tests/unit_tests/test_rate_limiter.py new file mode 100644 index 0000000..1cb1451 --- /dev/null +++ b/tests/unit_tests/test_rate_limiter.py @@ -0,0 +1,269 @@ +"""Unit tests for RateLimiter class.""" + +import time +from threading import Thread + +import pytest + +from langchain_gradient.rate_limiter import RateLimiter + + +class TestRateLimiterInitialization: + """Test rate limiter initialization.""" + + def test_default_initialization(self): + """Test rate limiter with default parameters.""" + limiter = RateLimiter() + assert limiter.max_requests == 60 + assert limiter.time_window == 60.0 + assert len(limiter.requests) == 0 + + def test_custom_initialization(self): + """Test rate limiter with custom parameters.""" + limiter = RateLimiter(max_requests=10, time_window=5.0) + assert limiter.max_requests == 10 + assert limiter.time_window == 5.0 + + def test_invalid_max_requests(self): + """Test that invalid max_requests raises ValueError.""" + with pytest.raises(ValueError, match="max_requests must be positive"): + RateLimiter(max_requests=0) + + with pytest.raises(ValueError, match="max_requests must be positive"): + RateLimiter(max_requests=-1) + + def test_invalid_time_window(self): + """Test that invalid time_window raises ValueError.""" + with pytest.raises(ValueError, match="time_window must be positive"): + RateLimiter(time_window=0) + + with pytest.raises(ValueError, match="time_window must be positive"): + RateLimiter(time_window=-1) + + +class TestRateLimiterBasicFunctionality: + """Test basic rate limiter functionality.""" + + def test_single_request(self): + """Test that a single request doesn't wait.""" + limiter = RateLimiter(max_requests=5, time_window=1.0) + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should not wait + assert elapsed < 0.1 + assert len(limiter.requests) == 1 + + def test_requests_under_limit(self): + """Test that requests under the limit don't wait.""" + limiter = RateLimiter(max_requests=5, time_window=1.0) + + start_time = time.time() + for _ in range(4): + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should not wait significantly + assert elapsed < 0.2 + assert len(limiter.requests) == 4 + + def test_requests_at_limit_wait(self): + """Test that exceeding the limit causes waiting.""" + limiter = RateLimiter(max_requests=3, time_window=1.0) + + # Make 3 requests (at limit) + for _ in range(3): + limiter.wait_if_needed() + + # 4th request should wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should wait approximately 1 second + assert elapsed >= 0.9 # Allow small margin + assert len(limiter.requests) == 3 # One removed after wait + + +class TestRateLimiterSlidingWindow: + """Test sliding window behavior.""" + + def test_old_requests_removed(self): + """Test that old requests are removed from tracking.""" + limiter = RateLimiter(max_requests=5, time_window=0.5) + + # Make 3 requests + for _ in range(3): + limiter.wait_if_needed() + + # Wait for window to expire + time.sleep(0.6) + + # Make another request - should not wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + assert elapsed < 0.1 # Should not wait + assert len(limiter.requests) == 1 # Old requests removed + + def test_partial_window_expiry(self): + """Test behavior when some requests have expired.""" + limiter = RateLimiter(max_requests=3, time_window=1.0) + + # Make 2 requests + for _ in range(2): + limiter.wait_if_needed() + + # Wait for half the window + time.sleep(0.5) + + # Make 2 more requests - total 4, but 2 are older + for _ in range(2): + limiter.wait_if_needed() + + assert len(limiter.requests) <= limiter.max_requests + assert len(limiter.requests) >= 2 # At least the 2 recent ones + + +class TestRateLimiterThreadSafety: + """Test thread safety of rate limiter.""" + + def test_concurrent_requests(self): + """Test that concurrent requests are handled safely.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + results = [] + + def make_request(): + limiter.wait_if_needed() + results.append(1) + + # Create 15 threads + threads = [Thread(target=make_request) for _ in range(15)] + + # Start all threads + start_time = time.time() + for thread in threads: + thread.start() + + # Wait for all threads to complete + for thread in threads: + thread.join() + + elapsed = time.time() - start_time + + # All 15 requests should complete + assert len(results) == 15 + + # Should have waited at least once (15 > 10 limit) + assert elapsed >= 0.9 + + +class TestRateLimiterUtilityMethods: + """Test utility methods.""" + + def test_get_current_usage_empty(self): + """Test usage stats with no requests.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + usage = limiter.get_current_usage() + + assert usage["current_requests"] == 0 + assert usage["max_requests"] == 10 + assert usage["time_window"] == 1.0 + assert usage["usage_percentage"] == 0.0 + + def test_get_current_usage_partial(self): + """Test usage stats with some requests.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + + for _ in range(5): + limiter.wait_if_needed() + + usage = limiter.get_current_usage() + + assert usage["current_requests"] == 5 + assert usage["usage_percentage"] == 50.0 + + def test_get_current_usage_removes_old(self): + """Test that usage stats remove old requests.""" + limiter = RateLimiter(max_requests=10, time_window=0.5) + + for _ in range(5): + limiter.wait_if_needed() + + time.sleep(0.6) + + usage = limiter.get_current_usage() + assert usage["current_requests"] == 0 + + def test_reset(self): + """Test reset functionality.""" + limiter = RateLimiter(max_requests=10, time_window=1.0) + + for _ in range(5): + limiter.wait_if_needed() + + assert len(limiter.requests) == 5 + + limiter.reset() + + assert len(limiter.requests) == 0 + + def test_repr(self): + """Test string representation.""" + limiter = RateLimiter(max_requests=10, time_window=60.0) + + for _ in range(3): + limiter.wait_if_needed() + + repr_str = repr(limiter) + + assert "RateLimiter" in repr_str + assert "max_requests=10" in repr_str + assert "time_window=60.0s" in repr_str + assert "current_usage=3/10" in repr_str + + +class TestRateLimiterEdgeCases: + """Test edge cases and boundary conditions.""" + + def test_very_high_limit(self): + """Test with very high request limit.""" + limiter = RateLimiter(max_requests=10000, time_window=1.0) + + start_time = time.time() + for _ in range(100): + limiter.wait_if_needed() + elapsed = time.time() - start_time + + # Should not wait + assert elapsed < 0.5 + + def test_very_short_window(self): + """Test with very short time window.""" + limiter = RateLimiter(max_requests=2, time_window=0.1) + + # Make 2 requests + for _ in range(2): + limiter.wait_if_needed() + + # 3rd should wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + assert elapsed >= 0.05 # Should wait some time + + def test_single_request_limit(self): + """Test with limit of 1 request.""" + limiter = RateLimiter(max_requests=1, time_window=0.5) + + limiter.wait_if_needed() + + # 2nd request should wait + start_time = time.time() + limiter.wait_if_needed() + elapsed = time.time() - start_time + + assert elapsed >= 0.4