diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index b337599fe..58eddc72d 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -17,11 +17,11 @@ jobs: - name: Install dependencies run: | - python -m pip install . - pip install -r dev-requirements.txt + pipx install poetry + poetry install - name: Run and upload benchmarks - run: ./scripts/run_and_upload_benchmarks.sh + run: poetry run ./scripts/run_and_upload_benchmarks.sh env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 39acde263..d4c8dfb8f 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -13,12 +13,12 @@ jobs: - uses: chartboost/ruff-action@v1 - name: Install dependencies run: | - python -m pip install . - pip install -r dev-requirements.txt + pipx install poetry + poetry install - name: black check - run: black --check --preview . + run: poetry run black --check --preview . - name: isort check - run: isort --profile black --check . + run: poetry run isort --profile black --check . build: runs-on: ${{ matrix.os }} @@ -46,21 +46,21 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - python -m pip install . - python -m pip install -r dev-requirements.txt + pipx install poetry + poetry install - name: Check types with pyright run: | - pyright + poetry run pyright - name: Test with pytest run: | - pytest + poetry run pytest - name: Can run Mentat # Unfortunately Github Actions Runners have trouble with prompt toolkit, so we can't do this on Windows. if: runner.os != 'Windows' # Ensure that python doesn't import local mentat folder and that 'mentat' command calls mentat instead of switching folders. working-directory: ./testbed run: | - mentat + poetry run mentat . license-check: runs-on: ubuntu-latest @@ -73,8 +73,8 @@ jobs: python-version: 3.11 - name: Install dependencies run: | - python -m pip install . - pip install -r dev-requirements.txt + pipx install poetry + poetry install - name: Run license checking script run: | - python tests/license_check.py + poetry run python tests/license_check.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 71054ffd1..9529849aa 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,8 +24,8 @@ jobs: python-version: '3.10' - name: Install dependencies run: | - python -m pip install --upgrade pip - python -m pip install setuptools wheel twine + pipx install poetry + poetry install - name: Set Prod conf.ini run: | mv mentat/resources/conf/conf-prod.ini mentat/resources/conf/conf.ini @@ -45,7 +45,7 @@ jobs: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} run: | - python setup.py sdist bdist_wheel + poetry build twine upload dist/* - name: Brew Release # Since Homebrew automatically updates from PyPI, no need for us to run this @@ -55,4 +55,4 @@ jobs: with: token: ${{ secrets.TOKEN_FOR_BREW }} formula: mentat - tag: v${{ steps.version-number.outputs.group1 }} \ No newline at end of file + tag: v${{ steps.version-number.outputs.group1 }} diff --git a/README.md b/README.md index c8d74b0e2..74db7a711 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ git clone https://github.com/AbanteAI/mentat.git cd mentat # install with pip in editable mode: -pip install -e . +poetry install ``` ## Add your OpenAI API Key diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index 36dc0ab37..000000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -aiomultiprocess==0.9.0 -black==23.9.1 -gitpython==3.1.37 -isort==5.12.0 -pip-licenses==4.3.3 -pyright==1.1.339 -pytest-xdist==3.3.1 -ruff==0.0.292 -sphinx==7.2.6 -sphinx-rtd-theme==2.0.0 - diff --git a/mentat/__init__.py b/mentat/__init__.py index 1679792cb..025f957d4 100644 --- a/mentat/__init__.py +++ b/mentat/__init__.py @@ -1,2 +1,19 @@ +from mentat.user_session import user_session + +__all__ = [ + "user_session", +] + + +def __dir__(): + return __all__ + + # Make sure to bump this on Release x.y.z PR's! __version__ = "1.0.7" + + +# the very first thing we need to do is load_config so we don't have an empty object while booting. +from mentat.config import load_config # noqa: E402 + +load_config() diff --git a/mentat/__main__.py b/mentat/__main__.py index 08682fc6f..c82399aa2 100644 --- a/mentat/__main__.py +++ b/mentat/__main__.py @@ -5,8 +5,8 @@ exit("Error: Python version 3.10 or higher is required.") -from mentat.terminal.client import run_cli - -# This is only here so that mentat can be run with python -m mentat -# This file will NOT run when run from pip installation -run_cli() +# from mentat.terminal.client import run_cli +# +# # This is only here so that mentat can be run with python -m mentat +# # This file will NOT run when run from pip installation +# run_cli() diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index 0105998e5..255b100be 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -9,23 +9,33 @@ ChatCompletionSystemMessageParam, ) +import mentat from mentat.llm_api_handler import prompt_tokens from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT from mentat.session_input import ask_yes_no, collect_user_input from mentat.transcripts import ModelMessage -agent_file_selection_prompt_path = Path("agent_file_selection_prompt.txt") -agent_command_prompt_path = Path("agent_command_selection_prompt.txt") - class AgentHandler: + + config = mentat.user_session.get("config") + agent_file_selection_prompt_path = config.ai.prompts.get( + "agent_file_selection_prompt", Path("text/agent_file_selection_prompt.txt") + ) + agent_command_prompt_path = config.ai.prompts.get( + "agent_command_selection_prompt", + Path("text/agent_command_selection_prompt.txt"), + ) + def __init__(self): self._agent_enabled = False self.agent_file_message = "" - self.agent_file_selection_prompt = read_prompt(agent_file_selection_prompt_path) - self.agent_command_prompt = read_prompt(agent_command_prompt_path) + self.agent_file_selection_prompt = read_prompt( + self.agent_file_selection_prompt_path + ) + self.agent_command_prompt = read_prompt(self.agent_command_prompt_path) # Make this property readonly because we have to set things when we enable agent mode @property @@ -53,7 +63,8 @@ async def enable_agent_mode(self): ), ), ] - model = ctx.config.model + config = mentat.user_session.get("config") + model = config.ai.model response = await ctx.llm_api_handler.call_llm_api(messages, model, False) content = response.choices[0].message.content or "" @@ -84,7 +95,8 @@ async def enable_agent_mode(self): async def _determine_commands(self) -> List[str]: ctx = SESSION_CONTEXT.get() - model = ctx.config.model + config = mentat.user_session.get("config") + model = config.ai.model messages = [ ChatCompletionSystemMessageParam( role="system", content=self.agent_command_prompt diff --git a/mentat/code_context.py b/mentat/code_context.py index ec4d20798..345793dac 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Dict, Iterable, List, Optional, Set, Union +import mentat from mentat.code_feature import ( CodeFeature, get_code_message_from_features, @@ -60,7 +61,7 @@ def display_context(self): """Display the baseline context: included files and auto-context settings""" session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + config = mentat.user_session.get("config") stream.send("Code Context:", color="blue") prefix = " " @@ -69,9 +70,11 @@ def display_context(self): stream.send(f"{prefix}Diff:", end=" ") stream.send(self.diff_context.get_display_context(), color="green") - if config.auto_context_tokens > 0: + if config.run.auto_context_tokens > 0: stream.send(f"{prefix}Auto-Context: Enabled") - stream.send(f"{prefix}Auto-Context Tokens: {config.auto_context_tokens}") + stream.send( + f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}" + ) else: stream.send(f"{prefix}Auto-Context: Disabled") @@ -120,9 +123,8 @@ async def get_code_message( 'prompt_tokens' argument is the total number of tokens used by the prompt before the code message, used to ensure that the code message won't overflow the model's context size """ - session_context = SESSION_CONTEXT.get() - config = session_context.config - model = config.model + config = mentat.user_session.get("config") + model = config.ai.model # Setup code message metadata code_message = list[str]() @@ -151,14 +153,17 @@ async def get_code_message( ) tokens_used = ( - prompt_tokens + meta_tokens + include_files_tokens + config.token_buffer + prompt_tokens + meta_tokens + include_files_tokens + config.ai.token_buffer ) + if not is_context_sufficient(tokens_used): raise ContextSizeInsufficient() - auto_tokens = min(get_max_tokens() - tokens_used, config.auto_context_tokens) + auto_tokens = min( + get_max_tokens() - tokens_used, config.run.auto_context_tokens + ) # Get auto included features - if config.auto_context_tokens > 0 and prompt: + if config.run.auto_context_tokens > 0 and prompt: features = self.get_all_features() feature_filter = DefaultFilter( auto_tokens, @@ -187,11 +192,10 @@ def get_all_features( Retrieves every CodeFeature under the cwd. If files_only is True the features won't be split into intervals """ session_context = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") abs_exclude_patterns: Set[Path] = set() - for pattern in self.ignore_patterns.union( - session_context.config.file_exclude_glob_list - ): + for pattern in self.ignore_patterns.union(config.run.file_exclude_glob_list): if not Path(pattern).is_absolute(): abs_exclude_patterns.add(session_context.cwd / pattern) else: @@ -270,17 +274,17 @@ def include( A set of paths that have been successfully included in the context """ session_context = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") path = Path(path) abs_exclude_patterns: Set[Path] = set() - all_exclude_patterns: Set[Union[str, Path]] = set( - [ - *exclude_patterns, - *self.ignore_patterns, - *session_context.config.file_exclude_glob_list, - ] - ) + all_exclude_patterns: Set[Union[str, Path]] = set([ + *exclude_patterns, + *self.ignore_patterns, + *config.run.file_exclude_glob_list, + ]) + for pattern in all_exclude_patterns: if not Path(pattern).is_absolute(): abs_exclude_patterns.add(session_context.cwd / pattern) @@ -293,6 +297,7 @@ def include( cwd=session_context.cwd, exclude_patterns=abs_exclude_patterns, ) + except PathValidationError as e: session_context.stream.send(str(e), color="light_red") return set() diff --git a/mentat/code_feature.py b/mentat/code_feature.py index 93f544197..350246b53 100644 --- a/mentat/code_feature.py +++ b/mentat/code_feature.py @@ -8,6 +8,7 @@ import attr +import mentat from mentat.ctags import get_ctag_lines_and_names from mentat.diff_context import annotate_file_message, parse_diff from mentat.errors import MentatError @@ -130,7 +131,9 @@ def get_code_message(self, standalone: bool = True) -> list[str]: """ session_context = SESSION_CONTEXT.get() code_file_manager = session_context.code_file_manager - parser = session_context.config.parser + + config = mentat.user_session.get("config") + parser = config.parser.parser code_context = session_context.code_context code_message: list[str] = [] diff --git a/mentat/command/commands/config.py b/mentat/command/commands/config.py index b3871bb52..fd6b5ec40 100644 --- a/mentat/command/commands/config.py +++ b/mentat/command/commands/config.py @@ -1,6 +1,5 @@ from typing import List -import attr from typing_extensions import override from mentat.command.command import Command, CommandArgument @@ -10,44 +9,26 @@ class ConfigCommand(Command, command_name="config"): @override async def apply(self, *args: str) -> None: + from mentat.config import get_config, mid_session_config, update_config + session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + if len(args) == 0: stream.send("No config option specified", color="yellow") - else: + elif len(args) == 1 or len(args) == 2: + setting = args[0] - if hasattr(config, setting): - if len(args) == 1: - value = getattr(config, setting) - description = attr.fields_dict(type(config))[setting].metadata.get( - "description" - ) - stream.send(f"{setting}: {value}") - if description: - stream.send(f"Description: {description}") - elif len(args) == 2: + if setting in mid_session_config: + if len(args) == 2: value = args[1] - if attr.fields_dict(type(config))[setting].metadata.get( - "no_midsession_change" - ): - stream.send( - f"Cannot change {setting} mid-session. Please restart" - " Mentat to change this setting.", - color="yellow", - ) - return - try: - setattr(config, setting, value) - stream.send(f"{setting} set to {value}", color="green") - except (TypeError, ValueError): - stream.send( - f"Illegal value for {setting}: {value}", color="red" - ) + update_config(setting=setting, value=value) else: - stream.send("Too many arguments", color="yellow") + get_config(setting=setting) else: stream.send(f"Unrecognized config option: {setting}", color="red") + else: + stream.send("Too many arguments", color="yellow") @override @classmethod @@ -62,18 +43,19 @@ def arguments(cls) -> List[CommandArgument]: def argument_autocompletions( cls, arguments: list[str], argument_position: int ) -> list[str]: - # Dodge circular imports - from mentat.config import Config if argument_position == 0: - return Config.get_fields() + return [ + "model", + "temperature", + "prompt_type", + "format", + "maximum_context", + "auto_context_tokens", + ] elif argument_position == 1: - setting = arguments[0] - fields = attr.fields_dict(Config) - if setting in fields: - return fields[setting].metadata.get("auto_completions", []) - else: - return [] + # TODO: Figure out a better way of doing this. + return [] else: return [] diff --git a/mentat/command/commands/screenshot.py b/mentat/command/commands/screenshot.py index d05508dda..f937dbed7 100644 --- a/mentat/command/commands/screenshot.py +++ b/mentat/command/commands/screenshot.py @@ -2,6 +2,7 @@ from typing_extensions import override +import mentat from mentat.auto_completer import get_command_filename_completions from mentat.command.command import Command, CommandArgument from mentat.session_context import SESSION_CONTEXT @@ -11,12 +12,13 @@ class ScreenshotCommand(Command, command_name="screenshot"): @override async def apply(self, *args: str) -> None: + config = mentat.user_session.get("config") + session_context = SESSION_CONTEXT.get() vision_manager = session_context.vision_manager stream = session_context.stream - config = session_context.config conversation = session_context.conversation - model = config.model + model = config.ai.model if "gpt" in model: if "vision" not in model: @@ -25,7 +27,8 @@ async def apply(self, *args: str) -> None: " gpt-4-vision-preview", color="yellow", ) - config.model = "gpt-4-vision-preview" + config.ai.model = "gpt-4-vision-preview" + mentat.user_session.set("config", config) else: stream.send( "Can't determine if this model supports vision. Attempting anyway.", diff --git a/mentat/command/commands/search.py b/mentat/command/commands/search.py index 3d771306b..5efc3c1f5 100644 --- a/mentat/command/commands/search.py +++ b/mentat/command/commands/search.py @@ -3,6 +3,7 @@ from termcolor import colored from typing_extensions import override +import mentat from mentat.command.command import Command, CommandArgument from mentat.errors import UserError from mentat.session_context import SESSION_CONTEXT @@ -34,10 +35,11 @@ def _parse_include_input(user_input: str, max_num: int) -> Set[int] | None: class SearchCommand(Command, command_name="search"): @override async def apply(self, *args: str) -> None: + config = mentat.user_session.get("config") + session_context = SESSION_CONTEXT.get() stream = session_context.stream code_context = session_context.code_context - config = session_context.config if len(args) == 0: stream.send("No search query specified", color="yellow") @@ -57,7 +59,7 @@ async def apply(self, *args: str) -> None: file_name = colored(file_name, "blue", attrs=["bold"]) file_name += colored(feature.interval_string(), "light_cyan") - tokens = feature.count_tokens(config.model) + tokens = feature.count_tokens(config.ai.model) cumulative_tokens += tokens tokens_str = colored(f" ({tokens} tokens)", "yellow") file_name += tokens_str diff --git a/mentat/config.py b/mentat/config.py index 8d52d7818..7e84c0f06 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -1,247 +1,404 @@ from __future__ import annotations -import json -from argparse import ArgumentParser, Namespace -from json import JSONDecodeError +import os +import shutil +from dataclasses import dataclass, field, fields from pathlib import Path +from typing import Any, Dict, List, Optional -import attr -from attr import converters, validators +import yaml +from dataclasses_json import DataClassJsonMixin +import mentat +from mentat import user_session from mentat.git_handler import get_git_root_for_path from mentat.llm_api_handler import known_models -from mentat.parsers.parser import Parser -from mentat.parsers.parser_map import parser_map +from mentat.parsers.block_parser import BlockParser +from mentat.parsers.replacement_parser import ReplacementParser +from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session_context import SESSION_CONTEXT from mentat.utils import mentat_dir_path -config_file_name = Path(".mentat_config.json") +config_file_name = Path(".mentat_config.yaml") user_config_path = mentat_dir_path / config_file_name +APP_ROOT = Path.cwd() +MENTAT_ROOT = Path(__file__).parent +USER_MENTAT_ROOT = Path.home() / ".mentat" +GIT_ROOT = get_git_root_for_path(APP_ROOT, raise_error=False) -def int_or_none(s: str | None) -> int | None: - if s is not None: - return int(s) - return None +bool_autocomplete = ["True", "False"] -bool_autocomplete = ["True", "False"] +@dataclass +class RunSettings(DataClassJsonMixin): + file_exclude_glob_list: List[Path] = field(default_factory=list) + auto_context: bool = False + auto_tokens: int = 8000 + auto_context_tokens: int = 0 + active_plugins: List[str] = field(default_factory=list) + def __init__( + self, + file_exclude_glob_list: Optional[List[Path]] = None, + active_plugins: Optional[List[str]] = None, + auto_context: Optional[bool] = None, + auto_tokens: Optional[int] = None, + auto_context_tokens: Optional[int] = None, + ) -> None: + if file_exclude_glob_list is not None: + self.file_exclude_glob_list = file_exclude_glob_list + if active_plugins is not None: + self.active_plugins = active_plugins + if auto_context is not None: + self.auto_context = auto_context + if auto_tokens is not None: + self.auto_tokens = auto_tokens + if auto_context_tokens is not None: + self.auto_context_tokens = auto_context_tokens -@attr.define -class Config: - _errors: list[str] = attr.field(factory=list) - # Model specific settings - model: str = attr.field( - default="gpt-4-1106-preview", - metadata={"auto_completions": list(known_models.keys())}, - ) - feature_selection_model: str = attr.field( - default="gpt-4-1106-preview", - metadata={"auto_completions": list(known_models.keys())}, - ) - embedding_model: str = attr.field( - default="text-embedding-ada-002", - metadata={ - "auto_completions": [ - model.name for model in known_models.values() if model.embedding_model - ] - }, - ) - temperature: float = attr.field( - default=0.2, converter=float, validator=[validators.le(1), validators.ge(0)] - ) +@dataclass +class AIModelSettings(DataClassJsonMixin): + model: str + feature_selection_model: str + embedding_model: str + prompts: Dict[str, Path] + temperature: float + maximum_context: Optional[int] + token_buffer: int + no_parser_prompt: bool - maximum_context: int | None = attr.field( - default=None, - metadata={ - "description": ( - "The maximum number of lines of context to include in the prompt. It is" - " inferred automatically for openai models but you can still set it to" - " save costs. It must be set for other models." - ), - }, - converter=int_or_none, - validator=validators.optional(validators.ge(0)), - ) - token_buffer: int = attr.field( - default=1000, - metadata={ - "description": ( - "The amount of tokens to always be reserved as a buffer for user and" - " model messages." - ), - }, - ) - parser: Parser = attr.field( # pyright: ignore - default="block", - metadata={ - "description": ( - "The format for the LLM to write code in. You probably don't want to" - " mess with this setting." - ), - "auto_completions": list(parser_map.keys()), - }, - converter=parser_map.get, # pyright: ignore - validator=validators.instance_of(Parser), # pyright: ignore - ) - no_parser_prompt: bool = attr.field( - default=False, - metadata={ - "description": ( - "Whether to include the parser prompt in the system message. This" - " should only be set to true for fine tuned models." - ), - "auto_completions": bool_autocomplete, - }, - converter=converters.optional(converters.to_bool), + def __init__( + self, + model: Optional[str] = "gpt-4-1106-preview", + feature_selection_model: Optional[str] = "gpt-4-1106-preview", + embedding_model: Optional[str] = "text-embedding-ada-002", + prompts: Optional[str] = "text", + temperature: Optional[float] = 0.2, + maximum_context: Optional[int] = None, + token_buffer: Optional[int] = 1000, + no_parser_prompt: Optional[bool] = False, + ): + if model is not None: + self.load_model(model) + if feature_selection_model is not None: + self.feature_selection_model = feature_selection_model + if embedding_model is not None: + self.embedding_model = embedding_model + if prompts is not None: + self.load_prompts(prompts) + if temperature is not None: + self.temperature = temperature + if maximum_context is not None: + self.maximum_context = maximum_context + if token_buffer is not None: + self.token_buffer = token_buffer + if no_parser_prompt is not None: + self.no_parser_prompt = no_parser_prompt + + def load_model(self, model: str) -> None: + self.model = model + known_model = known_models.get(model) + if known_model is not None: + if hasattr(known_model, "context_size"): + self.maximum_context = int(known_model.context_size) + + def load_prompts(self, prompt_type: str) -> None: + prompts_type = { + "markdown": { + "agent_file_selection_prompt": Path( + "markdown/agent_file_selection_prompt.md" + ), + "agent_command_selection_prompt": Path( + "markdown/agent_command_selection_prompt.md" + ), + "block_parser_prompt": Path("markdown/block_parser_prompt.md"), + "feature_selection_prompt": Path( + "markdown/feature_selection_prompt.md" + ), + "replacement_parser_prompt": Path( + "markdown/replacement_parser_prompt.md" + ), + "unified_diff_parser_prompt": Path( + "markdown/unified_diff_parser_prompt.md" + ), + "json_parser_prompt": Path("markdown/json_parser_prompt.md"), + }, + "text": { + "agent_file_selection_prompt": Path( + "text/agent_file_selection_prompt.txt" + ), + "agent_command_selection_prompt": Path( + "text/agent_command_selection_prompt.txt" + ), + "block_parser_prompt": Path("text/block_parser_prompt.txt"), + "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), + "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), + "unified_diff_parser_prompt": Path( + "text/unified_diff_parser_prompt.txt" + ), + "json_parser_prompt": Path("text/json_parser_prompt.txt"), + }, + } + + self.prompts = prompts_type.get(prompt_type, {}) + + +@dataclass +class UISettings(DataClassJsonMixin): + input_style: Dict[str, str] = field( + default_factory=lambda: { + "": "#9835bd", + "prompt": "#ffffff bold", + "continuation": "#ffffff bold", + } ) - # Context specific settings - file_exclude_glob_list: list[str] = attr.field( - factory=list, - metadata={"description": "List of glob patterns to exclude from context"}, + def __init__(self, input_style: Optional[Dict[str, str]] = None) -> None: + if input_style is not None: + self.input_style = input_style + + +@dataclass +class ParserSettings(DataClassJsonMixin): + parser: Any = BlockParser() + parser_type: str = "block" + + def __init__(self, parser_type: Optional[str] = "block"): + if parser_type is not None: + self.load_parser(parser_type) + else: + self.load_parser("block") + + def load_parser(self, parser_type: str) -> None: + parsers = { + "block": BlockParser, + "replacement": ReplacementParser, + "unified-diff": UnifiedDiffParser, + } + + if parser := parsers.get(parser_type): + self.parser_type = parser_type + self.parser = parser() + else: + self.parser_type = "block" + self.parser = parsers["block"]() + + +@dataclass +class RunningSessionConfig(DataClassJsonMixin): + model: Optional[str] = "gpt-4-1106-preview" + temperature: Optional[float] = 0.2 + prompt_type: Optional[str] = "text" + file_exclude_glob_list: Optional[List[str]] = field( + default_factory=list + ) # Use default factory for list + format: Optional[str] = "block" + input_style: Optional[Dict[str, str]] = field( + default_factory=lambda: { # Use default factory for dict + "": "#9835bd", + "prompt": "#ffffff bold", + "continuation": "#ffffff bold", + } ) - auto_context_tokens: int = attr.field( # pyright: ignore - default=0, - metadata={ - "description": ( - "Automatically selects code files for every request to include in" - " context. Adds this many tokens to context each request." - ), - "abbreviation": "a", - "const": 5000, - }, - converter=int, - validator=validators.ge(0), # pyright: ignore + maximum_context: Optional[int] = None + auto_context_tokens: Optional[int] = 0 + active_plugins: Optional[List[str]] = None + + @classmethod + def get_fields(cls) -> List[str]: + return [f.name for f in fields(cls)] + + +@dataclass +class MentatConfig: + # Directory where the mentat is running + root: Path = ( + field(default_factory=lambda: APP_ROOT), + ) # pyright: ignore[reportGeneralTypeIssues] + user_config_path: Path = field(default_factory=lambda: user_config_path) + + run: RunSettings = field(default_factory=RunSettings) + ai: AIModelSettings = field(default_factory=AIModelSettings) + ui: UISettings = field(default_factory=UISettings) + parser: ParserSettings = field(default_factory=ParserSettings) + + +def load_yaml(path: str) -> dict[str, Any | None]: + """Load the data from the YAML file.""" + with open(path, "r") as file: + return yaml.safe_load(file) + + +def init_config() -> None: + """Initialize the configuration file if it doesn't exist.""" + git_root = get_git_root_for_path(APP_ROOT, raise_error=False) + if git_root is not None: + default_conf_path = os.path.join( + MENTAT_ROOT, "resources", "conf", ".mentatconf.yaml" + ) + current_conf_path = os.path.join(git_root, ".mentatconf.yaml") + + if not os.path.exists(current_conf_path): + shutil.copy(default_conf_path, current_conf_path) + + +def load_settings(config_session: Optional[RunningSessionConfig] = None): + """Load the configuration from the `.mentatconf.yaml` file.""" + + user_conf_path = USER_MENTAT_ROOT / ".mentatconf.yaml" + git_root = get_git_root_for_path(APP_ROOT, raise_error=False) + + yaml_config = RunningSessionConfig() + + if user_conf_path.exists(): + data = load_yaml(str(user_conf_path)) + # fmt: off + yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] + kvs=data, infer_missing=True + ) + # fmt: on + + if git_root is not None: + git_conf_path = Path(git_root) / ".mentatconf.yaml" + if git_conf_path.exists(): + data = load_yaml(str(git_conf_path)) + # fmt: off + yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] + kvs=data, infer_missing=True + ) + # fmt: on + + # safety checks for missing values + if yaml_config.file_exclude_glob_list is None: + yaml_config.file_exclude_glob_list = [] + + if yaml_config.active_plugins is None: + yaml_config.active_plugins = [] + + if yaml_config.temperature is None: + yaml_config.temperature = 0.2 + + if config_session is not None: + if config_session.file_exclude_glob_list is not None: + yaml_config.file_exclude_glob_list.extend( + config_session.file_exclude_glob_list + ) + + if config_session.model is not None: + yaml_config.model = str(config_session.model) + + if config_session.temperature is not None: + yaml_config.temperature = float(config_session.temperature) + + if config_session.maximum_context is not None: + yaml_config.maximum_context = int(config_session.maximum_context) + + file_exclude_glob_list: List[str] = yaml_config.file_exclude_glob_list or [] + + # always ignore .mentatconf + file_exclude_glob_list.append(".mentatconf.yaml") + + run_settings = RunSettings( + file_exclude_glob_list=[ + Path(p) for p in file_exclude_glob_list + ], # pyright: ignore[reportUnknownVariableType] + active_plugins=yaml_config.active_plugins, + auto_context_tokens=yaml_config.auto_context_tokens, ) - # Sample specific settings - sample_repo: str | None = attr.field( - default=None, - metadata={ - "description": "A public url for a cloneable git repository to sample from." - }, + ui_settings = UISettings( + input_style=yaml_config.input_style # pyright: ignore[reportGeneralTypeIssues] ) - sample_merge_base_target: str | None = attr.field( - default=None, - metadata={ - "description": "The branch or commit to use as the merge base for samples." - }, + + ai_model_settings = AIModelSettings( + model=yaml_config.model, + temperature=yaml_config.temperature, + feature_selection_model=yaml_config.model, + maximum_context=yaml_config.maximum_context, ) - # Only settable by config file - input_style: list[tuple[str, str]] = attr.field( - factory=lambda: [ - ["", "#9835bd"], - ["prompt", "#ffffff bold"], - ["continuation", "#ffffff bold"], - ], - metadata={ - "description": "Styling information for the terminal.", - "no_flag": True, - "no_midsession_change": True, - }, + parser_type = yaml_config.format + parser_settings = ParserSettings(parser_type=parser_type) + + user_session.set( + "config", + MentatConfig( + run=run_settings, + ai=ai_model_settings, + ui=ui_settings, + parser=parser_settings, + ), ) - @classmethod - def get_fields(cls) -> list[str]: - return [ - field.name for field in attr.fields(cls) if not field.name.startswith("_") - ] - @classmethod - def add_fields_to_argparse(cls, parser: ArgumentParser) -> None: - for field in attr.fields(cls): - if "no_flag" in field.metadata: - continue - name = [f"--{field.name.replace('_', '-')}"] - if "abbreviation" in field.metadata: - name.append(f"-{field.metadata['abbreviation'].replace('_', '-')}") - - arguments = { - "help": field.metadata.get("description", ""), - } - if "const" in field.metadata: - arguments["nargs"] = "?" - arguments["const"] = field.metadata["const"] - - if field.type == "bool": - if arguments.get("default", False): - arguments["action"] = "store_false" - else: - arguments["action"] = "store_true" - elif field.type == "int": - arguments["type"] = int - elif field.type == "float": - arguments["type"] = float - elif field.type == "list[str]": - arguments["nargs"] = "*" - - parser.add_argument(*name, **arguments) +mid_session_config = [ + "model", + "temperature", + "format", + "maximum_context", + "auto_context_tokens", +] - @classmethod - def create(cls, cwd: Path, args: Namespace | None = None) -> Config: - config = Config() - - # Each method overwrites the previous so they are in order of precedence - config.load_file(user_config_path) - git_root = get_git_root_for_path(cwd, raise_error=False) - if git_root is not None: - config.load_file(git_root / config_file_name) - config.load_file(cwd / config_file_name) - - if args is not None: - config.load_namespace(args) - - return config - - def load_namespace(self, args: Namespace) -> None: - for field in attr.fields(Config): - if field.name in args and field.name != "_errors": - value = getattr(args, field.name) - if value is not None and value != field.default: - try: - setattr(self, field.name, value) - except (ValueError, TypeError) as e: - self.error(f"Warning: Illegal value for {field}: {e}") - - def load_file(self, path: Path) -> None: - if path.exists(): - with open(path) as config_file: - try: - config = json.load(config_file) - except JSONDecodeError: - self.error( - f"Warning: Config {path} contains invalid json; ignoring user" - " configuration file" - ) - return - for field in config: - if hasattr(self, field): - try: - setattr(self, field, config[field]) - except (ValueError, TypeError) as e: - self.error( - f"Warning: Config {path} contains invalid value for" - f" setting: {field}\n{e}" - ) - else: - self.error( - f"Warning: Config {path} contains unrecognized setting: {field}" - ) - - def error(self, message: str) -> None: - self._errors.append(message) - try: - self.send_errors_to_stream() - except LookupError: - pass - - def send_errors_to_stream(self): - session_context = SESSION_CONTEXT.get() - stream = session_context.stream - for error in self._errors: - stream.send(error, color="light_yellow") - self._errors = [] + +def update_config(setting: str, value: str | float | int) -> None: + """Reload the configuration using the provided keyword arguments.""" + config = mentat.user_session.get("config") + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + + try: + if setting == "model": + config.ai.load_model(value) + elif setting == "temperature": + config.ai.temperature = float(value) + elif setting == "format": + config.parser.load_parser(value) + elif setting == "maximum_context": + config.ai.maximum_context = int(value) + elif setting == "auto_context_tokens": + config.run.auto_context_tokens = value + + stream.send(f"{setting} set to {value}", color="green") + except (TypeError, ValueError) as e: + stream.send( + f"Illegal value for {setting}: {value}. Error: {str(e)}", color="red" + ) + + +def get_config(setting: str) -> None: + """Reload the configuration using the provided keyword arguments.""" + config = mentat.user_session.get("config") + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + + if setting == "model": + stream.send(f"{setting}: {config.ai.model}", color="green") + elif setting == "temperature": + stream.send(f"{setting}: {config.ai.temperature}", color="green") + elif setting == "format": + stream.send(f"{setting}:{config.parser.parser_type}", color="green") + elif setting == "maximum_context": + stream.send(f"{setting}: {config.ai.maximum_context}", color="green") + elif setting == "auto_context_tokens": + stream.send(f"{setting}: {config.run.auto_context_tokens}", color="green") + + +def load_config() -> None: + init_config() + load_settings() + + +def is_active_plugin(plugin: str | None = None) -> bool: + config = mentat.user_session.get("config") + if ( + plugin is not None + and config is not None + and config.run is not None + and config.run.active_plugins is not None + and plugin in config.run.active_plugins + ): + return True + + return False diff --git a/mentat/conversation.py b/mentat/conversation.py index 413731c0e..04447bf59 100644 --- a/mentat/conversation.py +++ b/mentat/conversation.py @@ -15,6 +15,7 @@ ChatCompletionUserMessageParam, ) +import mentat from mentat.errors import MentatError from mentat.llm_api_handler import ( TOKEN_COUNT_WARNING, @@ -39,23 +40,24 @@ def __init__(self): async def display_token_count(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + config = mentat.user_session.get("config") + code_context = session_context.code_context llm_api_handler = session_context.llm_api_handler - if not await llm_api_handler.is_model_available(config.model): + if not await llm_api_handler.is_model_available(config.ai.model): raise MentatError( - f"Model {config.model} is not available. Please try again with a" + f"Model {config.ai.model} is not available. Please try again with a" " different model." ) - if "gpt-4" not in config.model: + if "gpt-4" not in config.ai.model: stream.send( "Warning: Mentat has only been tested on GPT-4. You may experience" " issues with quality. This model may not be able to respond in" " mentat's edit format.", color="yellow", ) - if "gpt-3.5" not in config.model: + if "gpt-3.5" not in config.ai.model: stream.send( "Warning: Mentat does not know how to calculate costs or context" " size for this model.", @@ -66,7 +68,7 @@ async def display_token_count(self): code_message = await code_context.get_code_message( prompt_tokens( messages, - config.model, + config.ai.model, ) ) messages.append( @@ -75,16 +77,16 @@ async def display_token_count(self): content=code_message, ) ) - tokens = prompt_tokens(messages, config.model) + tokens = prompt_tokens(messages, config.ai.model) context_size = get_max_tokens() if not context_size: stream.send( - f"Context size for {config.model} is not known. Please set" + f"Context size for {config.ai.model} is not known. Please set" " the maximum context with `/config maximum_context value`.", color="light_red", ) - elif tokens + config.token_buffer > context_size: + elif tokens + config.ai.token_buffer > context_size: _plural = len(code_context.include_files) > 1 _exceed = tokens > context_size message: dict[tuple[bool, bool], str] = { @@ -151,12 +153,12 @@ def get_messages( """Returns the messages in the conversation. The system message may change throughout the conversation so it is important to access the messages through this method. """ - session_context = SESSION_CONTEXT.get() - config = session_context.config - if config.no_parser_prompt or not include_system_prompt: + config = mentat.user_session.get("config") + + if config.ai.no_parser_prompt or not include_system_prompt: return self._messages.copy() else: - parser = config.parser + parser = config.parser.parser prompt = parser.get_system_prompt() prompt_message: ChatCompletionMessageParam = ( ChatCompletionSystemMessageParam( @@ -177,8 +179,9 @@ async def _stream_model_response( ) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config - parser = config.parser + config = mentat.user_session.get("config") + + parser = config.parser.parser llm_api_handler = session_context.llm_api_handler cost_tracker = session_context.cost_tracker @@ -190,7 +193,7 @@ async def _stream_model_response( ) response = await llm_api_handler.call_llm_api( messages, - config.model, + config.ai.model, stream=True, response_format=parser.response_format(), ) @@ -202,7 +205,7 @@ async def _stream_model_response( terminate=True, ) - num_prompt_tokens = prompt_tokens(messages, config.model) + num_prompt_tokens = prompt_tokens(messages, config.ai.model) stream.send(f"Total token count: {num_prompt_tokens}", color="cyan") if num_prompt_tokens > TOKEN_COUNT_WARNING: stream.send( @@ -224,9 +227,11 @@ async def _stream_model_response( cost_tracker.log_api_call_stats( num_prompt_tokens, count_tokens( - parsed_llm_response.full_response, config.model, full_message=False + parsed_llm_response.full_response, + config.ai.model, + full_message=False, ), - config.model, + config.ai.model, display=True, ) @@ -242,21 +247,24 @@ async def _stream_model_response( async def get_model_response(self) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + config = mentat.user_session.get("config") + code_context = session_context.code_context messages_snapshot = self.get_messages() # Get current code message - loading_multiplier = 1.0 if config.auto_context_tokens > 0 else 0.0 - prompt = messages_snapshot[-1]["content"] + loading_multiplier = 1.0 if config.run.auto_context_tokens > 0 else 0.0 + prompt = messages_snapshot[-1][ + "content" + ] # pyright: ignore[reportTypedDictNotRequiredAccess] if isinstance(prompt, list): text_prompts = [ p.get("text", "") for p in prompt if p.get("type") == "text" ] prompt = " ".join(text_prompts) code_message = await code_context.get_code_message( - prompt_tokens(messages_snapshot, config.model), + prompt_tokens(messages_snapshot, config.ai.model), prompt=( prompt # Prompt can be image as well as text if isinstance(prompt, str) @@ -276,7 +284,7 @@ async def get_model_response(self) -> ParsedLLMResponse: except RateLimitError: stream.send( "Rate limit error received from OpenAI's servers using model" - f' {config.model}.\nUse "/config model " to switch to a' + f' {config.ai.model}.\nUse "/config model " to switch to a' " different model.", color="light_red", ) @@ -287,22 +295,22 @@ async def get_model_response(self) -> ParsedLLMResponse: return response def remaining_context(self) -> int | None: - ctx = SESSION_CONTEXT.get() - return get_max_tokens() - prompt_tokens(self.get_messages(), ctx.config.model) + config = mentat.user_session.get("config") + return get_max_tokens() - prompt_tokens(self.get_messages(), config.ai.model) def can_add_to_context(self, message: str) -> bool: """ Whether or not the model has enough context remaining to add this message. Will take token buffer into account and uses full_message=True. """ - ctx = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") remaining_context = self.remaining_context() return ( remaining_context is not None and remaining_context - - count_tokens(message, ctx.config.model, full_message=True) - - ctx.config.token_buffer + - count_tokens(message, config.ai.model, full_message=True) + - config.ai.token_buffer > 0 ) diff --git a/mentat/embeddings.py b/mentat/embeddings.py index 5c2b46d88..2933405d6 100644 --- a/mentat/embeddings.py +++ b/mentat/embeddings.py @@ -6,6 +6,7 @@ import numpy as np +import mentat from mentat.code_feature import CodeFeature, count_feature_tokens from mentat.errors import MentatError from mentat.llm_api_handler import ( @@ -84,7 +85,8 @@ async def get_feature_similarity_scores( session_context = SESSION_CONTEXT.get() stream = session_context.stream cost_tracker = session_context.cost_tracker - embedding_model = session_context.config.embedding_model + config = mentat.user_session.get("config") + embedding_model = config.ai.embedding_model llm_api_handler = session_context.llm_api_handler max_model_tokens = model_context_size(embedding_model) diff --git a/mentat/feature_filters/default_filter.py b/mentat/feature_filters/default_filter.py index e2d4b74a2..438059157 100644 --- a/mentat/feature_filters/default_filter.py +++ b/mentat/feature_filters/default_filter.py @@ -1,5 +1,6 @@ from typing import Optional +import mentat from mentat.code_feature import CodeFeature from mentat.errors import ContextSizeInsufficient, ModelError from mentat.feature_filters.embedding_similarity_filter import EmbeddingSimilarityFilter @@ -27,7 +28,8 @@ def __init__( async def filter(self, features: list[CodeFeature]) -> list[CodeFeature]: ctx = SESSION_CONTEXT.get() - if ctx.config.auto_context_tokens > 0 and self.user_prompt != "": + config = mentat.user_session.get("config") + if config.run.auto_context_tokens > 0 and self.user_prompt != "": features = await EmbeddingSimilarityFilter( self.user_prompt, (0.5 if self.use_llm else 1) * self.loading_multiplier ).filter(features) @@ -46,10 +48,10 @@ async def filter(self, features: list[CodeFeature]) -> list[CodeFeature]: " instead." ) features = await TruncateFilter( - self.max_tokens, ctx.config.model + self.max_tokens, config.ai.model ).filter(features) else: - features = await TruncateFilter(self.max_tokens, ctx.config.model).filter( + features = await TruncateFilter(self.max_tokens, config.ai.model).filter( features ) diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index 3a80c7662..ba66a2ab1 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -9,6 +9,7 @@ ChatCompletionSystemMessageParam, ) +import mentat from mentat.code_feature import CodeFeature, get_code_message_from_features from mentat.errors import ModelError, UserError from mentat.feature_filters.feature_filter import FeatureFilter @@ -20,7 +21,10 @@ class LLMFeatureFilter(FeatureFilter): - feature_selection_prompt_path = Path("feature_selection_prompt.txt") + config = mentat.user_session.get("config") + feature_selection_prompt_path = config.ai.prompts.get( + "feature_selection_prompt", Path("text/feature_selection_prompt.txt") + ) def __init__( self, @@ -40,21 +44,21 @@ async def filter( ) -> list[CodeFeature]: session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + cost_tracker = session_context.cost_tracker llm_api_handler = session_context.llm_api_handler # Preselect as many features as fit in the context window - model = config.feature_selection_model + model = self.config.ai.feature_selection_model context_size = model_context_size(model) if context_size is None: raise UserError( "Unknown context size for feature selection model: " - f"{config.feature_selection_model}" + f"{self.config.ai.feature_selection_model}" ) system_prompt = read_prompt(self.feature_selection_prompt_path) system_prompt_tokens = count_tokens( - system_prompt, config.feature_selection_model, full_message=True + system_prompt, self.config.ai.feature_selection_model, full_message=True ) user_prompt_tokens = count_tokens(self.user_prompt, model, full_message=True) expected_edits_tokens = ( @@ -67,7 +71,7 @@ async def filter( - system_prompt_tokens - user_prompt_tokens - expected_edits_tokens - - config.token_buffer + - self.config.ai.token_buffer ) truncate_filter = TruncateFilter(preselect_max_tokens, model) preselected_features = await truncate_filter.filter(features) @@ -167,5 +171,5 @@ async def filter( named_features.add(parsed_feature) # Greedy again to enforce max_tokens - truncate_filter = TruncateFilter(self.max_tokens, config.model) + truncate_filter = TruncateFilter(self.max_tokens, self.config.ai.model) return await truncate_filter.filter(named_features) diff --git a/mentat/git_handler.py b/mentat/git_handler.py index 2d48a5f62..5409db784 100644 --- a/mentat/git_handler.py +++ b/mentat/git_handler.py @@ -71,6 +71,7 @@ def get_paths_with_git_diffs(git_root: Path) -> set[Path]: def get_git_root_for_path(path: Path, raise_error: bool = True) -> Optional[Path]: + if os.path.isdir(path): dir_path = path else: diff --git a/mentat/llm_api_handler.py b/mentat/llm_api_handler.py index 081fb0d26..ad15ea391 100644 --- a/mentat/llm_api_handler.py +++ b/mentat/llm_api_handler.py @@ -38,6 +38,7 @@ from openai.types.chat.completion_create_params import ResponseFormat from PIL import Image +import mentat from mentat.errors import ContextSizeInsufficient, MentatError, UserError from mentat.session_context import SESSION_CONTEXT from mentat.utils import mentat_dir_path @@ -189,22 +190,22 @@ def model_price_per_1000_tokens(model: str) -> Optional[tuple[float, float]]: def get_max_tokens() -> int: + config = mentat.user_session.get("config") session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config - context_size = model_context_size(config.model) - maximum_context = config.maximum_context + context_size = model_context_size(config.ai.model) + maximum_context = config.ai.maximum_context if context_size is not None and maximum_context is not None: - return min(context_size, maximum_context) + return min(int(context_size), int(maximum_context)) elif context_size is not None: return context_size elif maximum_context is not None: return maximum_context else: stream.send( - f"Context size for {config.model} is not known. Please set" + f"Context size for {config.ai.model} is not known. Please set" " maximum-context with `/config maximum_context `.", color="light_red", ) @@ -212,10 +213,12 @@ def get_max_tokens() -> int: def is_context_sufficient(tokens: int) -> bool: + config = mentat.user_session.get("config") ctx = SESSION_CONTEXT.get() max_tokens = get_max_tokens() - if max_tokens - tokens < ctx.config.token_buffer: + + if max_tokens - tokens < config.ai.token_buffer: ctx.stream.send( f"The context size is limited to {max_tokens} tokens and your current" f" request uses {tokens} tokens. Please use `/exclude` to remove" @@ -284,8 +287,8 @@ async def call_llm_api( stream: bool, response_format: ResponseFormat = ResponseFormat(type="text"), ) -> ChatCompletion | AsyncIterator[ChatCompletionChunk]: + config = mentat.user_session.get("config") session_context = SESSION_CONTEXT.get() - config = session_context.config cost_tracker = session_context.cost_tracker # Confirm that model has enough tokens remaining. @@ -303,7 +306,7 @@ async def call_llm_api( response = await self.async_client.chat.completions.create( model=model, messages=messages, - temperature=config.temperature, + temperature=config.ai.temperature, stream=stream, max_tokens=4096, ) @@ -311,7 +314,7 @@ async def call_llm_api( response = await self.async_client.chat.completions.create( model=model, messages=messages, - temperature=config.temperature, + temperature=config.ai.temperature, stream=stream, response_format=response_format, ) diff --git a/mentat/parsers/block_parser.py b/mentat/parsers/block_parser.py index 2bc5c62a3..a35450312 100644 --- a/mentat/parsers/block_parser.py +++ b/mentat/parsers/block_parser.py @@ -6,6 +6,7 @@ from typing_extensions import override +import mentat from mentat.code_file_manager import CodeFileManager from mentat.errors import ModelError from mentat.parsers.change_display_helper import DisplayInformation, FileActionType @@ -14,8 +15,6 @@ from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT -block_parser_prompt_filename = Path("block_parser_prompt.txt") - class _BlockParserAction(Enum): Insert = "insert" @@ -71,6 +70,10 @@ def __init__(self, json_data: dict[str, Any]): class BlockParser(Parser): @override def get_system_prompt(self) -> str: + config = mentat.user_session.get("config") + block_parser_prompt_filename = config.ai.prompts.get( + "block_parser_prompt", Path("text/block_parser_prompt.txt") + ) return read_prompt(block_parser_prompt_filename) @override diff --git a/mentat/parsers/change_display_helper.py b/mentat/parsers/change_display_helper.py index 10db7f44d..403aaee20 100644 --- a/mentat/parsers/change_display_helper.py +++ b/mentat/parsers/change_display_helper.py @@ -95,12 +95,10 @@ def _remove_extra_empty_lines(lines: list[str]) -> list[str]: def _prefixed_lines(line_number_buffer: int, lines: list[str], prefix: str): - return "\n".join( - [ - prefix + " " * (line_number_buffer - len(prefix)) + line.strip("\n") - for line in lines - ] - ) + return "\n".join([ + prefix + " " * (line_number_buffer - len(prefix)) + line.strip("\n") + for line in lines + ]) def _get_code_block( @@ -202,18 +200,16 @@ def get_previous_lines( ) -> str: if display_information.first_changed_line < 0: return "" - lines = _remove_extra_empty_lines( - [ - display_information.file_lines[i] - for i in range( - max(0, display_information.first_changed_line - num), - min( - display_information.first_changed_line, - len(display_information.file_lines), - ), - ) - ] - ) + lines = _remove_extra_empty_lines([ + display_information.file_lines[i] + for i in range( + max(0, display_information.first_changed_line - num), + min( + display_information.first_changed_line, + len(display_information.file_lines), + ), + ) + ]) numbered = [ (str(display_information.first_changed_line - len(lines) + i + 1) + ":").ljust( display_information.line_number_buffer @@ -232,18 +228,16 @@ def get_later_lines( ) -> str: if display_information.last_changed_line < 0: return "" - lines = _remove_extra_empty_lines( - [ - display_information.file_lines[i] - for i in range( - max(0, display_information.last_changed_line), - min( - display_information.last_changed_line + num, - len(display_information.file_lines), - ), - ) - ] - ) + lines = _remove_extra_empty_lines([ + display_information.file_lines[i] + for i in range( + max(0, display_information.last_changed_line), + min( + display_information.last_changed_line + num, + len(display_information.file_lines), + ), + ) + ]) numbered = [ (str(display_information.last_changed_line + 1 + i) + ":").ljust( display_information.line_number_buffer diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index 4bc11b0a5..559aeb38f 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -11,6 +11,7 @@ from termcolor import colored from typing_extensions import override +import mentat from mentat.errors import ModelError from mentat.llm_api_handler import chunk_to_lines from mentat.parsers.file_edit import FileEdit, Replacement @@ -19,8 +20,6 @@ from mentat.session_context import SESSION_CONTEXT from mentat.streaming_printer import StreamingPrinter -json_parser_prompt_filename = Path("json_parser_prompt.txt") - comment_schema = { "type": "object", "properties": {"type": {"enum": ["comment"]}, "content": {"type": "string"}}, @@ -84,6 +83,10 @@ class JsonParser(Parser): @override def get_system_prompt(self) -> str: + config = mentat.user_session.get("config") + json_parser_prompt_filename = config.ai.prompts.get( + "json_parser_prompt", Path("text/json_parser_prompt.txt") + ) return read_prompt(json_parser_prompt_filename) @override diff --git a/mentat/parsers/replacement_parser.py b/mentat/parsers/replacement_parser.py index 7ba2a0bf1..56650da32 100644 --- a/mentat/parsers/replacement_parser.py +++ b/mentat/parsers/replacement_parser.py @@ -2,6 +2,7 @@ from typing_extensions import override +import mentat from mentat.code_file_manager import CodeFileManager from mentat.errors import ModelError from mentat.parsers.change_display_helper import DisplayInformation, FileActionType @@ -10,12 +11,14 @@ from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT -replacement_parser_prompt_filename = Path("replacement_parser_prompt.txt") - class ReplacementParser(Parser): @override def get_system_prompt(self) -> str: + config = mentat.user_session.get("config") + replacement_parser_prompt_filename = config.ai.prompts.get( + "replacement_parser_prompt", Path("text/replacement_parser_prompt.txt") + ) return read_prompt(replacement_parser_prompt_filename) @override diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index 21df280a4..aa3a8561e 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -4,6 +4,7 @@ from termcolor import colored from typing_extensions import override +import mentat from mentat.code_file_manager import CodeFileManager from mentat.parsers.change_display_helper import ( DisplayInformation, @@ -16,8 +17,6 @@ from mentat.parsers.parser import Parser from mentat.prompts.prompts import read_prompt -unified_diff_parser_prompt_filename = Path("unified_diff_parser_prompt.txt") - class UnifiedDiffDelimiter(Enum): SpecialStart = "---" @@ -29,6 +28,10 @@ class UnifiedDiffDelimiter(Enum): class UnifiedDiffParser(Parser): @override def get_system_prompt(self) -> str: + config = mentat.user_session.get("config") + unified_diff_parser_prompt_filename = config.ai.prompts.get( + "unified_diff_parser_prompt", Path("text/unified_diff_parser_prompt.txt") + ) return read_prompt(unified_diff_parser_prompt_filename) @override diff --git a/mentat/python_client/client.py b/mentat/python_client/client.py index 15e061e5f..34c024a32 100644 --- a/mentat/python_client/client.py +++ b/mentat/python_client/client.py @@ -4,7 +4,6 @@ from pathlib import Path from typing import List -from mentat.config import Config from mentat.errors import MentatError from mentat.session import Session from mentat.session_stream import StreamMessageSource @@ -19,7 +18,6 @@ def __init__( ignore_paths: List[Path] = [], diff: str | None = None, pr_diff: str | None = None, - config: Config = Config(), ): self.cwd = cwd.expanduser().resolve() self.paths = paths @@ -27,7 +25,6 @@ def __init__( self.ignore_paths = ignore_paths self.diff = diff self.pr_diff = pr_diff - self.config = config self._accumulated_message = "" self.stopped = Event() @@ -81,7 +78,6 @@ async def startup(self): self.ignore_paths, self.diff, self.pr_diff, - self.config, ) self.session.start() self.acc_task = asyncio.create_task(self._accumulate_messages()) diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml new file mode 100644 index 000000000..5c532c080 --- /dev/null +++ b/mentat/resources/conf/.mentatconf.yaml @@ -0,0 +1,43 @@ +#settings related to the AI are below + +# This field is for specifying the model name. You can find the list of valid options at https://platform.openai.com/docs/models/overview +model: gpt-4-1106-preview +temperature: 0.2 + +# For models other than gpt-3.5 and gpt-4, the model's context size can't be inferred. +# In such cases, you need to specify the maximum context manually. +maximum_context: + +#the type of prompts that the agent should be using options are text and markdown +prompt_type: text + +#settings related to each "run" + +# This list contains glob patterns. Mentat uses these patterns to exclude certain files when provided with a directory argument. +# Mentat considers all files that do not match your .gitignore file and these patterns. +# Glob patterns are interpreted from the git root location, so if you want to exclude all .py files, use "**/*.py" instead of "*.py". +# This example excludes all hidden files and directories: +file_exclude_glob_list: +# - "**/.*" +# - "**/.*/**" +auto_context_tokens: + +# a list of plugins that should be active. Current options include sampler +active_plugins: + - sampler + +#settings related to the "parser" + +# Mentat parses files following a specific format, which you can set here. +# Multiple formats are available, though the default one is expected to be the best fit for most cases. +# You can experiment with different formats as per your need. +# Available formats include: block, replacement, unified-diff. +format: block + +#settings related to each "UI" + +# This section contains key-value pairs for defining a custom Pygment Style for the Mentat prompt. +input_style: + "" : "#9835bd" + "prompt" : "#ffffff bold" + "continuation" : "#ffffff bold" diff --git a/mentat/resources/prompts/markdown/agent_command_selection_prompt.md b/mentat/resources/prompts/markdown/agent_command_selection_prompt.md new file mode 100644 index 000000000..081582651 --- /dev/null +++ b/mentat/resources/prompts/markdown/agent_command_selection_prompt.md @@ -0,0 +1,27 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +- You are running autonomously to test your recent code changes. + +## Instructions +1. **Run Commands**: + - Use the following format for commands: `command_1 arg_1`, `command_2`, `command_3 arg_1 arg_2`. + - Commands should be listed as a new-line separated list. + +2. **View Output**: + - After running commands, review the output to adjust your changes accordingly. + +3. **File Selection**: + - Use only pre-selected files for testing. + - Avoid commands that test, lint, or run the entire project. + - Do not use files that may not exist. + - Prefer running files you edited or those that use the files you edited. + +4. **Linter Usage**: + - Run a linter to automatically lint the files you changed. + - Do not run a linter check; run the command that actively lints the file. + +5. **Restrictions**: + - Do not provide additional context to ensure correct parsing of your response. \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/agent_file_selection_prompt.md b/mentat/resources/prompts/markdown/agent_file_selection_prompt.md new file mode 100644 index 000000000..a6272fb1d --- /dev/null +++ b/mentat/resources/prompts/markdown/agent_file_selection_prompt.md @@ -0,0 +1,23 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +You are responsible for conducting smoke testing on a codebase. This involves identifying and using specific commands to lint, test, and run the code, with the aim of detecting any errors. + +### Identifying Commands +1. **Objective**: Find commands to lint, test, and run the code, detecting any errors. +2. **Examples for Python**: + - `pytest ` + - `pyright ` + - `python ` + +### Requesting Codebase Files +1. **Procedure**: Based on a provided map of the codebase, identify and request necessary files. +2. **File Request Format**: Use the specific format to request files. Examples: + - `path/to/file.json` + - `path/to/another/file.txt` + +### Important Notes +- **Avoid Additional Context**: Do not provide extra information beyond what is requested. +- **Single Opportunity**: You have only one chance to request the necessary files. \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/block_parser_prompt.md b/mentat/resources/prompts/markdown/block_parser_prompt.md new file mode 100644 index 000000000..d8cd824a3 --- /dev/null +++ b/mentat/resources/prompts/markdown/block_parser_prompt.md @@ -0,0 +1,270 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +- **Role**: Agent in an automated coding system. +- **Responsibilities**: Responding to user requests involving code modifications. + +### User Requests Include +- Adding new features. +- Updating existing code. +- Fixing bugs. +- Adding comments or docstrings. + +### Response Structure +1. **Summary of Planned Changes**: + - Begin with a brief summary of the changes you plan to implement. + +2. **Detailed List of Changes**: + - Include a structured list of all planned changes. + - Plan for necessary additions like imports. + +3. **Code Edit Format** + - Utilize edit types: insert, deletes, replacements, creating new files, deleting existing files, renaming existing files. + - Allow multi-line edits. + - Start each edit description with `@@start` and end with `@@end`. + - For delete or delete-file actions, use a JSON formatted section only. + - For insert, replace, create-file actions, include `@@code` followed by the code lines. + - Exclude edit description blocks for non-code changes (e.g., design ideas). + +### Edit Types Examples + +#### 1. Insert Object +Used to insert code into a file. +```text +@@start +{ + "file": "core/script.py", + "action": "insert", + "insert-after-line": 3, + "insert-before-line": 4 +} +@@code + if name == "Bob": + print("Nice to see you again!") +@@end +``` + +#### 2. Create File Object +Used to create a new file. +```text +@@start +{ + "file": "core/utils.py", + "action": "create-file" +} +@@code +def get_name(): + return input("Enter your name: ") +@@end +``` + +#### 3. Replace Object +Used to replace code in a file. +```text +@@start +{ + "file": "core/script.py", + "action": "replace", + "start-line": 10, + "end-line": 10 +} +@@code +def main(): + name = get_name() +@@end +``` + +#### 4. Delete Object +Used to delete code in a file. +```text +@@start +{ + "file": "core/script.py", + "action": "delete", + "start-line": 13, + "end-line": 13 +} +@@end +``` + +#### 5. Rename File Object +Used to rename files. +```text +@@start +{ + "file": "core/hello_world.py", + "action": "rename-file", + "name": "core/hello_again.py" +} +@@end +``` + +#### 6. Delete File Object +Used to delete files. +```text +@@start +{ + "file": "core/goodbye_world.py", + "action": "delete-file" +} +@@end +``` + +### Examples +Below are examples of how user-provided requests and agents final answers: + +#### Example 1: Specific Code Modification Request +##### User Request: +```text +Code Files: + +core/script.py +1: +2:def say_hello(name): +3: print(f"Hello {name}!") +4: +5: +6:def say_goodbye(): +7: print("Goodbye!") +8: +9: +10:def main(name): +11: say_hello(name) +12: say_goodbye() +13: print("Done!") +14: + +core/hello_world.py +1: +2:def hello_world(): +3: print("Hello, World!") +4: + +User Request: +After saying hello, if the user's name is "Bob", say "Nice to see you again!" on another line. +Add a function to get the user's name and use it in main instead of taking name as an argument. +The new function should be in a separate file called utils.py. Stop saying "Done!". Then, +rename hello_world.py to hello_again.py. Finally, delete the goodbye_world.py file. +``` +##### Final Response: +```text +I will make the modifications to script.py and create the new file, importing from it in script.py. + +Steps: +1. Modify `say_hello`, adding the case for Bob. +2. Create `utils.py` with a function to get the user's name. +3. Import the new function in `script.py`. +4. Modify main to use the new function instead of taking name as an argument. +5. Remove the line printing "Done!". +6. Rename the file `hello_world.py` to `hello_again.py` +7. Delete file `goodbye_world.py` + +@@start +{ + "file": "core/script.py", + "action": "insert", + "insert-after-line": 3, + "insert-before-line": 4 +} +@@code + if name == "Bob": + print("Nice to see you again!") +@@end +@@start +{ + "file": "core/utils.py", + "action": "create-file" +} +@@code +def get_name(): + return input("Enter your name: ") +@@end +@@start +{ + "file": "core/script.py", + "action": "insert", + "insert-after-line": 0, + "insert-before-line": 1 +} +@@code +from core.utils import get_name +@@end +@@start +{ + "file": "core/script.py", + "action": "replace", + "start-line": 10, + "end-line": 10 +} +@@code +def main(): + name = get_name() +@@end +@@start +{ + "file": "core/script.py", + "action": "delete", + "start-line": 13, + "end-line": 13 +} +@@end +@@start +{ + "file": "core/hello_world.py", + "action": "rename-file", + "name": "core/hello_again.py" +} +@@end +@@start +{ + "file": "core/goodbye_world.py", + "action": "delete-file" +} +@@end +``` + +#### Example 2: Function Import and Modification +##### User Request: +```text +Code Files: + +core/hello_world.py +1: +2:def hello_world(): +3: print("Hello, World!") +4: + +User Request: +Call say_goodbye after printing hello world +``` +##### Final Response: +```text +I will make the modifications to hello_world.py + +Steps: +1. Import the `say_goodbye` function in `hello_world.py` +2. Modify `hello_world.py`, adding a function call for `say_goodbye` + +@@start +{ + "file": "core/hello_world.py", + "action": "insert", + "insert-after-line": 0, + "insert-before-line": 1 +} +@@code +from core.script import say_goodbye +@@end +@@start +{ + "file": "core/hello_world.py", + "action": "insert", + "insert-after-line": 4, + "insert-before-line": 5 +} +@@code + say_goodbye() +@@end +``` \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/feature_selection_prompt.md b/mentat/resources/prompts/markdown/feature_selection_prompt.md new file mode 100644 index 000000000..559afda6d --- /dev/null +++ b/mentat/resources/prompts/markdown/feature_selection_prompt.md @@ -0,0 +1,20 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +Your role is to act as part of an automated coding system. Your task is to read a User Query, then identify and return relevant sections from the Code Files that address the query. The returned sections should be in a JSON-parsable format. + +## Compliance Guidelines + +1. **Understanding the User Query**: Fully comprehend the user's query to accurately select the necessary code sections. + +2. **Selection Criteria**: + - Choose files and specific lines that would be modified (edited, added, or deleted) in response to the query. + - If an 'Expected Edits' list is provided, include all lines affected by these edits. + +3. **Identification of Interacting Elements**: Identify variables and functions that interact with the chosen code. Include them in your selection if their behavior is critical for implementing the expected edits. + +4. **Merging Sections**: Combine nearby selected sections (less than 5 lines apart) into larger sections or entire files for better context. + +5. **JSON-Parsable Response**: Ensure the response format is JSON-parsable, following the schema "path:startline-endline". Example format: `"[mydir/file_a, mydir/file_b:10-34]"`. \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/json_parser_prompt.md b/mentat/resources/prompts/markdown/json_parser_prompt.md new file mode 100644 index 000000000..cac842923 --- /dev/null +++ b/mentat/resources/prompts/markdown/json_parser_prompt.md @@ -0,0 +1,89 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +Agents are part of an automated coding system and must respond with valid JSON, adhering to a specific format. + +## Compliance Guidelines + +### General Instructions +- **Input**: User request, code file contents, and related information. +- **Output**: A JSON object with a field "content" containing a list of valid JSON objects. + +### Types of JSON Objects + +#### 1. Comment Object +Used to inform the user of planned changes. +```json +{ + "type": "comment", + "content": "Summary of planned changes." +} +``` + +#### 2. Edit Object +Replaces lines between specified start and end lines in a file. +```json +{ + "type": "edit", + "filename": "file_to_edit.py", + "starting-line": 2, + "ending-line": 4, + "content": "Replacement content" +} +``` + +#### 3. File Creation Object +Creates a new file. +```json +{ + "type": "creation", + "filename": "new_file.py" +} +``` + +#### 4. File Deletion Object +Deletes a specified file. +```json +{ + "type": "deletion", + "filename": "to_be_deleted.py" +} +``` + +#### 5. File Rename Object +Renames a specified file. +```json +{ + "type": "rename", + "filename": "original_name.py", + "new-filename": "new_name.py" +} +``` + +### Important Notes +- **Line Numbering**: The starting line is inclusive, and the ending line is exclusive. +- **Order of Fields**: Maintain the given order of fields in the response. + +### Example Response +Here's an example of how to format a response to a user request: + +```json +{ + "content": [ + { + "type": "comment", + "content": "Planned modification steps..." + }, + { + "type": "edit", + "filename": "file1.py", + "starting-line": x, + "ending-line": y, + "content": "Edit content" + }, + ... + ] +} +``` \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/replacement_parser_prompt.md b/mentat/resources/prompts/markdown/replacement_parser_prompt.md new file mode 100644 index 000000000..51eef6c21 --- /dev/null +++ b/mentat/resources/prompts/markdown/replacement_parser_prompt.md @@ -0,0 +1,54 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +You are to act as part of an automated coding system, processing user requests for code modifications. Your response must be formatted precisely for programmatic parsing. + +## Compliance Guidelines +1. **Response Structure**: Organize your response in two distinct parts: a summary of planned changes and the changes in the required edit format. +2. **Summary of Changes**: Begin with a brief summary listing the changes you intend to make. +3. **Code Edit Format**: Follow the specific format for code edits, as detailed below. +4. **Code Edit Markers**: Use `@` to mark the beginning of a code edit. Include the file name and relevant line numbers or indicators for new, deleted, or renamed files. +5. **Inserting and Deleting Lines**: For inserting or deleting lines, adhere to the specified formats. +6. **Avoiding Duplication**: Ensure no duplication of existing lines. If inserting identical code, replace any duplicated lines. +7. **Import Statements**: Before writing an import statement, verify that it isn't already imported. +8. **Indentation**: Maintain correct indentation in your code changes. + +## Edit Format Instructions: +- **Creating a New File**: `@ +` +- **Deleting a File**: `@ -` +- **Renaming a File**: `@ ` +- **Replacing Code Section**: `@ starting_line= ending_line=` (followed by the new code lines, ending with `@`). +- **Deleting Lines Without Adding New Ones**: Leave no lines between the starting `@` and the ending `@`. +- **Inserting Lines Without Deleting**: `@ insert_line=` (followed by the lines to insert, ending with `@`). + +## Example Task and Response +### User Request: +Replace the `hello_world` function with a `goodbye_world` function in `core/hello_world.py`. Insert a new line "Goodbye, name" after "Hello, name". Rename the file to `goodbye_world.py`. Create a new file `test.py` with the line "testing...". + +### Example Response: +**Summary of Changes**: +1. Replace `hello_world` function with `goodbye_world`. +2. Insert new "Goodbye, name" line. +3. Rename `hello_world.py` to `goodbye_world.py`. +4. Create `test.py` and add "testing..." line. + +**Code Edits**: +``` +@ core/hello_world.py starting_line=2 ending_line=4 +def goodbye_world(): + print("Goodbye, World!") +@ +@ core/hello_world.py starting_line=6 ending_line=7 + goodbye_world() +@ +@ core/hello_world.py insert_line=8 + print(f"Goodbye, {name}!") +@ +@ core/hello_world.py core/goodbye_world.py +@ core/test.py + +@ core/test.py insert_line=1 +print("testing...") +@ +``` \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/unified_diff_parser_prompt.md b/mentat/resources/prompts/markdown/unified_diff_parser_prompt.md new file mode 100644 index 000000000..36df7a51f --- /dev/null +++ b/mentat/resources/prompts/markdown/unified_diff_parser_prompt.md @@ -0,0 +1,92 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +You are to act as an automated coding system, processing user requests for code modifications and file management. Your output must follow a specific format for programmatic parsing. + +## Compliance Guidelines + +### Instruction Style +- Directly address the LLM Agent. +- Be precise and unambiguous. + +### Initial Prompt Structure +- Start with a concise statement of the task. + +### User Instructions Identification +- Clearly identify and interpret user instructions. + +### Markdown Formatting +- Use Markdown for clear organization. + +### Permitted Modifications +- Enhance clarity without altering intent. + +### Clear and Structured Output +- Ensure outputs are organized and easy to follow. + +### Conciseness and Relevance +- Maintain focus on the task. + +## Reformatted Instructions + +1. **Summarize Planned Changes**: Begin your response with a brief summary of the changes you will implement. + +2. **List of Changes**: + - Itemize the steps involved in the code modification and file management process. + +3. **Edit Format**: + - Use a git diff-like format for edits. + - Start edits with `--- ` and `+++ ` lines to indicate the file being edited. + - Use `--- /dev/null` for file creation and `+++ /dev/null` for file deletion. + - In the git diff section, prefix context lines with a space, deleted lines with a `-`, and added lines with a `+`. + - Separate different sections of code with `@@ @@` markers. + - Conclude the diff with a `@@ end @@` marker. + +4. **Context Requirement**: + - Always provide context for additions unless the file is empty. + - Context lines must match the lines in the file for acceptance. + +5. **Demonstration**: + - Provide an example user request and a corresponding example response following the above format. + +### Example User Request +User requests to modify `core/hello_world.py` by replacing `hello_world` function with `goodbye_world`, adding a new line, renaming the file, and creating a new file `test.py`. + +### Example Response +- **Summary of Changes**: + - Rename `hello_world.py` to `goodbye_world.py`. + - Replace `hello_world` with `goodbye_world`. + - Add a new `Goodbye, name` line. + - Create `test.py` and add content. + - Delete `test.py`. + +- **Git Diff Format**: + - Edit `core/hello_world.py` and rename to `core/goodbye_world.py`. + - Edit `test.py` creation and deletion. + +```diff +--- core/hello_world.py ++++ core/goodbye_world.py +@@ @@ +-def hello_world(): +- print("Hello, World!") ++def goodbye_world(): ++ print("Goodbye, World!") +@@ @@ + def main(name): +- hello_world() ++ goodbye_world() + print(f"Hello, {name}!") ++ print(f"Goodbye, {name}!") +@@ end @@ +--- /dev/null ++++ test.py +@@ @@ ++print("testing...") +@@ end @@ +--- test.py ++++ /dev/null +@@ end @@ +``` \ No newline at end of file diff --git a/mentat/resources/prompts/agent_command_selection_prompt.txt b/mentat/resources/prompts/text/agent_command_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/agent_command_selection_prompt.txt rename to mentat/resources/prompts/text/agent_command_selection_prompt.txt diff --git a/mentat/resources/prompts/agent_file_selection_prompt.txt b/mentat/resources/prompts/text/agent_file_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/agent_file_selection_prompt.txt rename to mentat/resources/prompts/text/agent_file_selection_prompt.txt diff --git a/mentat/resources/prompts/block_parser_prompt.txt b/mentat/resources/prompts/text/block_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/block_parser_prompt.txt rename to mentat/resources/prompts/text/block_parser_prompt.txt diff --git a/mentat/resources/prompts/feature_selection_prompt.txt b/mentat/resources/prompts/text/feature_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/feature_selection_prompt.txt rename to mentat/resources/prompts/text/feature_selection_prompt.txt diff --git a/mentat/resources/prompts/json_parser_prompt.txt b/mentat/resources/prompts/text/json_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/json_parser_prompt.txt rename to mentat/resources/prompts/text/json_parser_prompt.txt diff --git a/mentat/resources/prompts/replacement_parser_prompt.txt b/mentat/resources/prompts/text/replacement_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/replacement_parser_prompt.txt rename to mentat/resources/prompts/text/replacement_parser_prompt.txt diff --git a/mentat/resources/prompts/unified_diff_parser_prompt.txt b/mentat/resources/prompts/text/unified_diff_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/unified_diff_parser_prompt.txt rename to mentat/resources/prompts/text/unified_diff_parser_prompt.txt diff --git a/mentat/sampler/sampler.py b/mentat/sampler/sampler.py index eed7a8c2e..6c3b844a4 100644 --- a/mentat/sampler/sampler.py +++ b/mentat/sampler/sampler.py @@ -5,7 +5,9 @@ from git import GitCommandError, Repo # type: ignore from openai.types.chat import ChatCompletionMessageParam +import mentat from mentat.code_feature import get_consolidated_feature_refs +from mentat.config import is_active_plugin from mentat.errors import SampleError from mentat.git_handler import get_git_diff, get_git_root_for_path, get_hexsha_active from mentat.parsers.git_parser import GitParser @@ -17,22 +19,22 @@ def parse_message(message: ChatCompletionMessageParam) -> dict[str, str]: - ctx = SESSION_CONTEXT.get() content = message.get("content") text, code = "", "" + config = mentat.user_session.get("config") if isinstance(content, str): if message.get("role") != "assistant": text = content output = list[str]() in_special = False for line in content.splitlines(): - if ctx.config.parser._starts_special(line): # type: ignore + if config.parser.parser._starts_special(line): # type: ignore in_special = True if not in_special: output.append(line) else: pass # TODO: Convert to git diff format, replace 'code' above - if ctx.config.parser._ends_code(line): # type: ignore + if config.parser.parser._ends_code(line): # type: ignore in_special = False while output[-1] == "": output.pop() @@ -45,15 +47,29 @@ def parse_message(message: ChatCompletionMessageParam) -> dict[str, str]: class Sampler: + is_active: bool = False diff_active: str | None = None commit_active: str | None = None last_sample_id: str | None = None last_sample_hexsha: str | None = None + # set up the base config settings that sampler will use. + def __init__(self): + self.is_active = is_active_plugin("sampler") + if not mentat.user_session.get("sampler_settings"): + mentat.user_session.set( + "sampler_settings", + { + "repo": None, + "merge_base_target": None, + }, + ) + def set_active_diff(self): # Create a temporary commit with the active changes ctx = SESSION_CONTEXT.get() git_root = get_git_root_for_path(ctx.cwd, raise_error=False) + if not git_root: return repo = Repo(git_root) @@ -70,9 +86,10 @@ async def create_sample(self) -> Sample: session_context = SESSION_CONTEXT.get() stream = session_context.stream code_context = session_context.code_context - config = session_context.config conversation = session_context.conversation + sampler_config = mentat.user_session.get("sampler_settings") + git_root = get_git_root_for_path(session_context.cwd, raise_error=False) if not git_root: raise SampleError("No git repo found") @@ -80,8 +97,8 @@ async def create_sample(self) -> Sample: stream.send("Input sample data", color="light_blue") git_repo = Repo(git_root) merge_base = None - if config.sample_merge_base_target: - target = config.sample_merge_base_target + if sampler_config.get("merge_base_target"): + target = sampler_config.get("merge_base_target") stream.send(f"Use merge base target from config ({target})? (y/N)") response = (await collect_user_input()).data.strip() if response == "y": @@ -108,7 +125,7 @@ async def create_sample(self) -> Sample: except (AssertionError, GitCommandError) as e: raise SampleError(f"Error getting diff for merge base: {e}") - repo = config.sample_repo + repo = sampler_config.get("repo") if not repo: remote_url = "" try: @@ -127,7 +144,14 @@ async def create_sample(self) -> Sample: repo = remote_url else: repo = response - config.sample_repo = repo + + mentat.user_session.set( + "sampler_settings", + { + "repo": repo, + "merge_base_target": sampler_config.get("merge_base_target"), + }, + ) stream.send("Sample Title:") title = (await collect_user_input()).data.strip() or "" diff --git a/mentat/session.py b/mentat/session.py index 149343a75..bb7a3e247 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -7,16 +7,15 @@ from typing import Any, Coroutine, List, Optional, Set from uuid import uuid4 -import attr import sentry_sdk from openai import APITimeoutError, BadRequestError, RateLimitError +import mentat from mentat.agent_handler import AgentHandler from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_edit_feedback import get_user_feedback_on_edits from mentat.code_file_manager import CodeFileManager -from mentat.config import Config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.ctags import ensure_ctags_installed @@ -40,6 +39,8 @@ class Session: A message will be sent on the client_exit channel when ready for client to quit. """ + _errors: List[str] = [] # pyright: ignore[reportGeneralTypeIssues] + def __init__( self, cwd: Path, @@ -48,7 +49,6 @@ def __init__( ignore_paths: List[Path] = [], diff: Optional[str] = None, pr_diff: Optional[str] = None, - config: Config = Config(), ): # All errors thrown here need to be caught here self.stopped = False @@ -91,7 +91,6 @@ def __init__( stream, llm_api_handler, cost_tracker, - config, code_context, code_file_manager, conversation, @@ -106,9 +105,10 @@ def __init__( # Functions that require session_context check_version() - config.send_errors_to_stream() + self.send_errors_to_stream() for path in paths: code_context.include(path, exclude_patterns=exclude_paths) + if ( code_context.diff_context is not None and len(code_context.include_files) == 0 @@ -136,9 +136,10 @@ async def _main(self): conversation = session_context.conversation code_file_manager = session_context.code_file_manager agent_handler = session_context.agent_handler + config = mentat.user_session.get("config") # check early for ctags so we can fail fast - if session_context.config.auto_context_tokens > 0: + if config.run.auto_context_tokens > 0: ensure_ctags_installed() session_context.llm_api_handler.initialize_client() @@ -166,11 +167,13 @@ async def _main(self): conversation.add_user_message(message.data) parsed_llm_response = await conversation.get_model_response() + file_edits = [ file_edit for file_edit in parsed_llm_response.file_edits if file_edit.is_valid() ] + if file_edits: if not agent_handler.agent_enabled: file_edits, need_user_request = ( @@ -179,7 +182,7 @@ async def _main(self): for file_edit in file_edits: file_edit.resolve_conflicts() - if session_context.sampler: + if session_context.sampler and session_context.sampler.is_active: session_context.sampler.set_active_diff() applied_edits = await code_file_manager.write_changes_to_files( @@ -228,13 +231,27 @@ def start(self): the main loop which runs until an Exception or session_exit signal is encountered. """ + self.stream.send( + """ +███╗ ███╗███████╗███╗ ██╗████████╗ █████╗ ████████╗ +████╗ ████║██╔════╝████╗ ██║╚══██╔══╝██╔══██╗╚══██╔══╝ +██╔████╔██║█████╗ ██╔██╗ ██║ ██║ ███████║ ██║ +██║╚██╔╝██║██╔══╝ ██║╚██╗██║ ██║ ██╔══██║ ██║ +██║ ╚═╝ ██║███████╗██║ ╚████║ ██║ ██║ ██║ ██║ +╚═╝ ╚═╝╚══════╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ +------------------------------------------------------- + It is by will alone I set my mind in motion + """, + color="purple", + ) + async def run_main(): - ctx = SESSION_CONTEXT.get() try: with sentry_sdk.start_transaction( op="mentat_started", name="Mentat Started" ) as transaction: - transaction.set_tag("config", attr.asdict(ctx.config)) + # transaction.set_tag("config", attr.asdict(ctx.config)) + transaction.set_tag("config", "config") await self._main() except (SessionExit, CancelledError): pass @@ -283,3 +300,10 @@ async def _stop(self): self.stream.send(None, channel="client_exit") await self.stream.join() self.stream.stop() + + def send_errors_to_stream(self): + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + for error in self._errors: + stream.send(str(error), color="yellow") + self._errors = [] diff --git a/mentat/session_context.py b/mentat/session_context.py index 6c88969cb..cae5b070f 100644 --- a/mentat/session_context.py +++ b/mentat/session_context.py @@ -11,7 +11,6 @@ from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_file_manager import CodeFileManager - from mentat.config import Config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.llm_api_handler import LlmApiHandler @@ -28,7 +27,6 @@ class SessionContext: stream: SessionStream = attr.field() llm_api_handler: LlmApiHandler = attr.field() cost_tracker: CostTracker = attr.field() - config: Config = attr.field() code_context: CodeContext = attr.field() code_file_manager: CodeFileManager = attr.field() conversation: Conversation = attr.field() diff --git a/mentat/session_stream.py b/mentat/session_stream.py index 09751d3f9..7bf47a211 100644 --- a/mentat/session_stream.py +++ b/mentat/session_stream.py @@ -82,7 +82,6 @@ def send( created_at=datetime.utcnow(), extra=kwargs, ) - self.messages.append(message) self._broadcast.publish(channel=channel, message=message) diff --git a/mentat/terminal/__init__.py b/mentat/terminal/__init__.py index e69de29bb..5a58fafc0 100644 --- a/mentat/terminal/__init__.py +++ b/mentat/terminal/__init__.py @@ -0,0 +1,4 @@ +from mentat.config import load_config + +# first thing we do is we init a default config +load_config() diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 2d23c30fb..6cae1b9fa 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -1,17 +1,18 @@ -import argparse import asyncio import logging import signal from asyncio import Event from pathlib import Path from types import FrameType -from typing import Any, Coroutine, List, Set +from typing import Any, Coroutine, List, Optional, Set +import click from prompt_toolkit import PromptSession from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent from prompt_toolkit.styles import Style -from mentat.config import Config +import mentat +from mentat.config import update_config from mentat.session import Session from mentat.session_stream import StreamMessageSource from mentat.terminal.loading import LoadingHandler @@ -29,7 +30,6 @@ def __init__( ignore_paths: List[str] = [], diff: str | None = None, pr_diff: str | None = None, - config: Config = Config(), ): self.cwd = cwd self.paths = [Path(path) for path in paths] @@ -37,7 +37,6 @@ def __init__( self.ignore_paths = [Path(path) for path in ignore_paths] self.diff = diff self.pr_diff = pr_diff - self.config = config self._tasks: Set[asyncio.Task[None]] = set() self._should_exit = Event() @@ -136,6 +135,7 @@ def _init_signal_handlers(self): async def _run(self): self._init_signal_handlers() + self.session = Session( self.cwd, self.paths, @@ -143,14 +143,16 @@ async def _run(self): self.ignore_paths, self.diff, self.pr_diff, - self.config, ) self.session.start() + config = mentat.user_session.get("config") + style = Style.from_dict(config.ui.input_style) + mentat_completer = MentatCompleter(self.session.stream) self._prompt_session = MentatPromptSession( completer=mentat_completer, - style=Style(self.config.input_style), + style=style, enable_suspend=True, ) @@ -166,7 +168,7 @@ def _(event: KeyPressEvent): self._plain_session = PromptSession[str]( message=[("class:prompt", ">>> ")], - style=Style(self.config.input_style), + style=style, completer=None, key_bindings=plain_bindings, enable_suspend=True, @@ -194,72 +196,74 @@ def run(self): asyncio.run(self._run()) -def run_cli(): - parser = argparse.ArgumentParser( - description="Run conversation with command line args" - ) - parser.add_argument( - "paths", - nargs="*", - default=[], - help="List of file paths, directory paths, or glob patterns", - ) - parser.add_argument( - "--exclude", - "-e", - nargs="*", - default=[], - help="List of file paths, directory paths, or glob patterns to exclude", - ) - parser.add_argument( - "--ignore", - "-g", - nargs="*", - default=[], - help=( - "List of file paths, directory paths, or glob patterns to ignore in" - " auto-context" - ), - ) - parser.add_argument( - "--diff", - "-d", - nargs="?", - type=str, - default=None, - const="HEAD", - help="A git tree-ish (e.g. commit, branch, tag) to diff against", - ) - parser.add_argument( - "--pr-diff", - "-p", - type=str, - default=None, - help="A git tree-ish to diff against the latest common ancestor of", - ) - parser.add_argument( - "--cwd", default=Path.cwd(), help="The current working directory" - ) - - Config.add_fields_to_argparse(parser) - args = parser.parse_args() - - cwd = Path(args.cwd).expanduser().resolve() - paths = args.paths - exclude_paths = args.exclude - ignore_paths = args.ignore - diff = args.diff - pr_diff = args.pr_diff - - config = Config.create(cwd, args) +# Event handlers for all the buttons. + + +@click.command() +@click.option( + "-e", + "--exclude-paths", + multiple=True, + default=[], + help="List of file paths, directory paths, or glob patterns to exclude.", +) +@click.option( + "-g", + "--ignore-paths", + multiple=True, + default=[], + help=( + "List of file paths, directory paths, or glob patterns to ignore in" + " auto-context." + ), +) +@click.option( + "-d", + "--diff", + default=None, + show_default="HEAD", + help="A git tree-ish (e.g. commit, branch, tag) to diff against.", +) +@click.option( + "-p", + "--pr-diff", + default=None, + help="A git tree-ish to diff against the latest common ancestor of.", +) +@click.option("--cwd", default=str(Path.cwd()), help="The current working directory.") +@click.option("--model", default=None, help="The Model to use.") +@click.option("--temperature", default=None, help="The Model Temperature to use.") +@click.option("--maximum-context", default=None, help="The Maximum Context") +@click.argument("paths", nargs=-1, required=True) +def start( + paths: list[str], + exclude_paths: list[str], + ignore_paths: list[str], + diff: Optional[str], + pr_diff: Optional[str], + cwd: Optional[str], + model: Optional[str], + temperature: Optional[float], + maximum_context: Optional[int], +) -> None: + + if model is not None: + update_config("model", model) + if temperature is not None: + update_config("temperature", temperature) + if maximum_context is not None: + update_config("maximum_context", maximum_context) + + current_working_directory = Path.cwd() + if cwd: + current_working_directory = Path(cwd).expanduser().resolve() terminal_client = TerminalClient( - cwd, - paths, - exclude_paths, - ignore_paths, - diff, - pr_diff, - config, + current_working_directory, paths, exclude_paths, ignore_paths, diff, pr_diff ) + terminal_client.run() + + +if __name__ == "__main__": + start() diff --git a/mentat/terminal/output.py b/mentat/terminal/output.py index 3d770a963..ee8d5ae76 100644 --- a/mentat/terminal/output.py +++ b/mentat/terminal/output.py @@ -36,7 +36,6 @@ def print_stream_message(message: StreamMessage): color = message.extra["color"] if isinstance(message.extra.get("flush"), bool): flush = message.extra["flush"] - _print_stream_message_string( content=message.data, end=end, diff --git a/mentat/user_session.py b/mentat/user_session.py new file mode 100644 index 000000000..97388ced8 --- /dev/null +++ b/mentat/user_session.py @@ -0,0 +1,19 @@ +from typing import Any, Dict + +user_session_store: Dict[str, Any] = {} + + +class UserSession: + """ + Developer facing user session class. + Useful for the developer to store user specific data between calls. + """ + + def get(self, key: str, default: Any = None) -> Any: + return user_session_store.get(key, default) + + def set(self, key: str, value: Any) -> None: + user_session_store[key] = value + + +user_session = UserSession() diff --git a/mentat/utils.py b/mentat/utils.py index 1cae3f70f..dd3413730 100644 --- a/mentat/utils.py +++ b/mentat/utils.py @@ -3,11 +3,13 @@ import asyncio import hashlib import os +import pprint +import sys import time from importlib import resources from importlib.abc import Traversable from pathlib import Path -from typing import TYPE_CHECKING, AsyncIterator, List, Literal, Optional, Union +from typing import TYPE_CHECKING, Any, AsyncIterator, List, Literal, Optional, Union import packaging.version import requests @@ -178,6 +180,55 @@ def get_relative_path(path: Path, target: Path) -> Path: return relative_path +def dd(args: Any): + """ + This method dd takes an argument args and performs the following operations: + + 1. Checks if any arguments are provided. If not, raises a ValueError with the message "No args provided". + + 2. Prints the argument args in a pretty format using pprint.pprint(). + + 3. Handles any exception that might occur, and prints the exception message. + + 4. Finally, exits the program using sys.exit(). + + Note: This method does not return any value. + + Example usage: + args = [1, 2, 3] + dd(args) + """ + try: + # Throw an exception if needed + if not args: + raise ValueError("No args provided") + + # Pretty print the argument + pprint.pprint(args) + + except Exception as e: + print(f"Exception occurred: {e}") + + finally: + # Exit the program + sys.exit() + + +def dump(args: Any): + """ + This method dd takes an argument args and performs the following operations: + + 1. Checks if any arguments are provided. If not, raises a ValueError with the message "No args provided". + + Note: This method does not return any value. + + Example usage: + args = [1, 2, 3] + dump(args) + """ + pprint.pprint(args) + + CLONE_TO_DIR = Path(__file__).parent.parent / "benchmark_repos" diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 000000000..b40f0faae --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1943 @@ +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. + +[[package]] +name = "aiomultiprocess" +version = "0.9.0" +description = "AsyncIO version of the standard multiprocessing module" +optional = false +python-versions = ">=3.6" +files = [ + {file = "aiomultiprocess-0.9.0-py3-none-any.whl", hash = "sha256:3036c4c881cfbc63674686e036097f22309017c6bf96b04722a542ac9cac7423"}, + {file = "aiomultiprocess-0.9.0.tar.gz", hash = "sha256:07e7d5657697678d9d2825d4732dfd7655139762dee665167380797c02c68848"}, +] + +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[[package]] +name = "anyio" +version = "4.2.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, + {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "black" +version = "23.12.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-23.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67f19562d367468ab59bd6c36a72b2c84bc2f16b59788690e02bbcb140a77175"}, + {file = "black-23.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbd75d9f28a7283b7426160ca21c5bd640ca7cd8ef6630b4754b6df9e2da8462"}, + {file = "black-23.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:593596f699ca2dcbbbdfa59fcda7d8ad6604370c10228223cd6cf6ce1ce7ed7e"}, + {file = "black-23.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:12d5f10cce8dc27202e9a252acd1c9a426c83f95496c959406c96b785a92bb7d"}, + {file = "black-23.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e73c5e3d37e5a3513d16b33305713237a234396ae56769b839d7c40759b8a41c"}, + {file = "black-23.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba09cae1657c4f8a8c9ff6cfd4a6baaf915bb4ef7d03acffe6a2f6585fa1bd01"}, + {file = "black-23.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace64c1a349c162d6da3cef91e3b0e78c4fc596ffde9413efa0525456148873d"}, + {file = "black-23.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:72db37a2266b16d256b3ea88b9affcdd5c41a74db551ec3dd4609a59c17d25bf"}, + {file = "black-23.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fdf6f23c83078a6c8da2442f4d4eeb19c28ac2a6416da7671b72f0295c4a697b"}, + {file = "black-23.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39dda060b9b395a6b7bf9c5db28ac87b3c3f48d4fdff470fa8a94ab8271da47e"}, + {file = "black-23.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7231670266ca5191a76cb838185d9be59cfa4f5dd401b7c1c70b993c58f6b1b5"}, + {file = "black-23.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:193946e634e80bfb3aec41830f5d7431f8dd5b20d11d89be14b84a97c6b8bc75"}, + {file = "black-23.12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcf91b01ddd91a2fed9a8006d7baa94ccefe7e518556470cf40213bd3d44bbbc"}, + {file = "black-23.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:996650a89fe5892714ea4ea87bc45e41a59a1e01675c42c433a35b490e5aa3f0"}, + {file = "black-23.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdbff34c487239a63d86db0c9385b27cdd68b1bfa4e706aa74bb94a435403672"}, + {file = "black-23.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:97af22278043a6a1272daca10a6f4d36c04dfa77e61cbaaf4482e08f3640e9f0"}, + {file = "black-23.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ead25c273adfad1095a8ad32afdb8304933efba56e3c1d31b0fee4143a1e424a"}, + {file = "black-23.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c71048345bdbced456cddf1622832276d98a710196b842407840ae8055ade6ee"}, + {file = "black-23.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a832b6e00eef2c13b3239d514ea3b7d5cc3eaa03d0474eedcbbda59441ba5d"}, + {file = "black-23.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:6a82a711d13e61840fb11a6dfecc7287f2424f1ca34765e70c909a35ffa7fb95"}, + {file = "black-23.12.0-py3-none-any.whl", hash = "sha256:a7c07db8200b5315dc07e331dda4d889a56f6bf4db6a9c2a526fa3166a81614f"}, + {file = "black-23.12.0.tar.gz", hash = "sha256:330a327b422aca0634ecd115985c1c7fd7bdb5b5a2ef8aa9888a82e2ebe9437a"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "certifi" +version = "2023.11.17" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, + {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dataclasses-json" +version = "0.6.3" +description = "Easily serialize dataclasses to and from JSON." +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "dataclasses_json-0.6.3-py3-none-any.whl", hash = "sha256:4aeb343357997396f6bca1acae64e486c3a723d8f5c76301888abeccf0c45176"}, + {file = "dataclasses_json-0.6.3.tar.gz", hash = "sha256:35cb40aae824736fdf959801356641836365219cfe14caeb115c39136f775d2a"}, +] + +[package.dependencies] +marshmallow = ">=3.18.0,<4.0.0" +typing-inspect = ">=0.4.0,<1" + +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "execnet" +version = "2.0.2" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.7" +files = [ + {file = "execnet-2.0.2-py3-none-any.whl", hash = "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41"}, + {file = "execnet-2.0.2.tar.gz", hash = "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + +[[package]] +name = "fire" +version = "0.5.0" +description = "A library for automatically generating command line interfaces." +optional = false +python-versions = "*" +files = [ + {file = "fire-0.5.0.tar.gz", hash = "sha256:a6b0d49e98c8963910021f92bba66f65ab440da2982b78eb1bbf95a0a34aacc6"}, +] + +[package.dependencies] +six = "*" +termcolor = "*" + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.40" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, + {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.2" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, + {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.23.0)"] + +[[package]] +name = "httpx" +version = "0.25.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, + {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.6" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jsonschema" +version = "4.20.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.20.0-py3-none-any.whl", hash = "sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3"}, + {file = "jsonschema-4.20.0.tar.gz", hash = "sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.11.2" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.11.2-py3-none-any.whl", hash = "sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93"}, + {file = "jsonschema_specifications-2023.11.2.tar.gz", hash = "sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "marshmallow" +version = "3.20.1" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.20.1-py3-none-any.whl", hash = "sha256:684939db93e80ad3561392f47be0230743131560a41c5110684c16e21ade0a5c"}, + {file = "marshmallow-3.20.1.tar.gz", hash = "sha256:5d2371bbe42000f2b3fb5eaa065224df7d8f8597bc19a1bbfa5bfe7fba8da889"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"] +docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] +lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"] +tests = ["pytest", "pytz", "simplejson"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.26.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, + {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, + {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, + {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, + {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, + {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, + {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, + {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, + {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, + {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, + {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, +] + +[[package]] +name = "openai" +version = "1.5.0" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-1.5.0-py3-none-any.whl", hash = "sha256:42d8c84b0714c990e18afe81d37f8a64423e8196bf7157b8ea665b8d8f393253"}, + {file = "openai-1.5.0.tar.gz", hash = "sha256:4cd91e97988ccd6c44f815107def9495cbc718aeb8b28be33a87b6fa2c432508"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.5,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "outcome" +version = "1.3.0.post0" +description = "Capture the outcome of Python function calls." +optional = false +python-versions = ">=3.7" +files = [ + {file = "outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"}, + {file = "outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8"}, +] + +[package.dependencies] +attrs = ">=19.2.0" + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pillow" +version = "10.1.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, + {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, + {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, + {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, + {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, + {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, + {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "pip-licenses" +version = "4.3.3" +description = "Dump the software license list of Python packages installed with pip." +optional = false +python-versions = "~=3.8" +files = [ + {file = "pip-licenses-4.3.3.tar.gz", hash = "sha256:d14447094135eb5e43e4d9e1e3bcdb17a05751a9199df2d07f043a542c241c7a"}, + {file = "pip_licenses-4.3.3-py3-none-any.whl", hash = "sha256:1b697cace3149d7d380307bb1f1e0505f0db98f25fada64d32b7e6240f37f72c"}, +] + +[package.dependencies] +prettytable = ">=2.3.0" + +[package.extras] +test = ["docutils", "mypy", "pytest-cov", "pytest-pycodestyle", "pytest-runner"] + +[[package]] +name = "platformdirs" +version = "4.1.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "prettytable" +version = "3.9.0" +description = "A simple Python library for easily displaying tabular data in a visually appealing ASCII table format" +optional = false +python-versions = ">=3.8" +files = [ + {file = "prettytable-3.9.0-py3-none-any.whl", hash = "sha256:a71292ab7769a5de274b146b276ce938786f56c31cf7cea88b6f3775d82fe8c8"}, + {file = "prettytable-3.9.0.tar.gz", hash = "sha256:f4ed94803c23073a90620b201965e5dc0bccf1760b7a7eaf3158cab8aaffdf34"}, +] + +[package.dependencies] +wcwidth = "*" + +[package.extras] +tests = ["pytest", "pytest-cov", "pytest-lazy-fixture"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.43" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "2.5.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.5.2-py3-none-any.whl", hash = "sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0"}, + {file = "pydantic-2.5.2.tar.gz", hash = "sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.14.5" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.14.5" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.14.5-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd"}, + {file = "pydantic_core-2.14.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66"}, + {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997"}, + {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093"}, + {file = "pydantic_core-2.14.5-cp310-none-win32.whl", hash = "sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720"}, + {file = "pydantic_core-2.14.5-cp310-none-win_amd64.whl", hash = "sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b"}, + {file = "pydantic_core-2.14.5-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459"}, + {file = "pydantic_core-2.14.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4"}, + {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada"}, + {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda"}, + {file = "pydantic_core-2.14.5-cp311-none-win32.whl", hash = "sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651"}, + {file = "pydantic_core-2.14.5-cp311-none-win_amd64.whl", hash = "sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077"}, + {file = "pydantic_core-2.14.5-cp311-none-win_arm64.whl", hash = "sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf"}, + {file = "pydantic_core-2.14.5-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093"}, + {file = "pydantic_core-2.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e"}, + {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69"}, + {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d"}, + {file = "pydantic_core-2.14.5-cp312-none-win32.whl", hash = "sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260"}, + {file = "pydantic_core-2.14.5-cp312-none-win_amd64.whl", hash = "sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36"}, + {file = "pydantic_core-2.14.5-cp312-none-win_arm64.whl", hash = "sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325"}, + {file = "pydantic_core-2.14.5-cp37-none-win32.whl", hash = "sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405"}, + {file = "pydantic_core-2.14.5-cp37-none-win_amd64.whl", hash = "sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588"}, + {file = "pydantic_core-2.14.5-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf"}, + {file = "pydantic_core-2.14.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b"}, + {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec"}, + {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124"}, + {file = "pydantic_core-2.14.5-cp38-none-win32.whl", hash = "sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867"}, + {file = "pydantic_core-2.14.5-cp38-none-win_amd64.whl", hash = "sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d"}, + {file = "pydantic_core-2.14.5-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7"}, + {file = "pydantic_core-2.14.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955"}, + {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5"}, + {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209"}, + {file = "pydantic_core-2.14.5-cp39-none-win32.whl", hash = "sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6"}, + {file = "pydantic_core-2.14.5-cp39-none-win_amd64.whl", hash = "sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3"}, + {file = "pydantic_core-2.14.5.tar.gz", hash = "sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyright" +version = "1.1.341" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.341-py3-none-any.whl", hash = "sha256:f5800daf9d5780ebf6c6e04064a6d20da99c0ef16efd77526f83cc8d8551ff9f"}, + {file = "pyright-1.1.341.tar.gz", hash = "sha256:b891721f3abd10635cc4fd3076bcff5b7676567dc3a629997ed59a0d30034a87"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" + +[package.extras] +all = ["twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] + +[[package]] +name = "pysocks" +version = "1.7.1" +description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"}, + {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, + {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"}, +] + +[[package]] +name = "pytest" +version = "7.4.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, + {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.21.1" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, + {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] + +[[package]] +name = "pytest-mock" +version = "3.12.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-reportlog" +version = "0.4.0" +description = "Replacement for the --resultlog option, focused in simplicity and extensibility" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-reportlog-0.4.0.tar.gz", hash = "sha256:c9f2079504ee51f776d3118dcf5e4730f163d3dcf26ebc8f600c1fa307bf638c"}, + {file = "pytest_reportlog-0.4.0-py3-none-any.whl", hash = "sha256:5db4d00586546d8c6b95c66466629f1e913440c36d97795a673d2e19c5cedd5c"}, +] + +[package.dependencies] +pytest = "*" + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +name = "pytest-xdist" +version = "3.5.0" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"}, + {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"}, +] + +[package.dependencies] +execnet = ">=1.1" +pytest = ">=6.2.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + +[[package]] +name = "python-dotenv" +version = "1.0.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, + {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "referencing" +version = "0.32.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.32.0-py3-none-any.whl", hash = "sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99"}, + {file = "referencing-0.32.0.tar.gz", hash = "sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "regex" +version = "2023.10.3" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.7" +files = [ + {file = "regex-2023.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc"}, + {file = "regex-2023.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a"}, + {file = "regex-2023.10.3-cp310-cp310-win32.whl", hash = "sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec"}, + {file = "regex-2023.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353"}, + {file = "regex-2023.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e"}, + {file = "regex-2023.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54"}, + {file = "regex-2023.10.3-cp311-cp311-win32.whl", hash = "sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2"}, + {file = "regex-2023.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c"}, + {file = "regex-2023.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037"}, + {file = "regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a"}, + {file = "regex-2023.10.3-cp312-cp312-win32.whl", hash = "sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a"}, + {file = "regex-2023.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b"}, + {file = "regex-2023.10.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a3ee019a9befe84fa3e917a2dd378807e423d013377a884c1970a3c2792d293"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76066d7ff61ba6bf3cb5efe2428fc82aac91802844c022d849a1f0f53820502d"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe50b61bab1b1ec260fa7cd91106fa9fece57e6beba05630afe27c71259c59b"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fd88f373cb71e6b59b7fa597e47e518282455c2734fd4306a05ca219a1991b0"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ab05a182c7937fb374f7e946f04fb23a0c0699c0450e9fb02ef567412d2fa3"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dac37cf08fcf2094159922edc7a2784cfcc5c70f8354469f79ed085f0328ebdf"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e54ddd0bb8fb626aa1f9ba7b36629564544954fff9669b15da3610c22b9a0991"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3367007ad1951fde612bf65b0dffc8fd681a4ab98ac86957d16491400d661302"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:16f8740eb6dbacc7113e3097b0a36065a02e37b47c936b551805d40340fb9971"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:f4f2ca6df64cbdd27f27b34f35adb640b5d2d77264228554e68deda54456eb11"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:39807cbcbe406efca2a233884e169d056c35aa7e9f343d4e78665246a332f597"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7eece6fbd3eae4a92d7c748ae825cbc1ee41a89bb1c3db05b5578ed3cfcfd7cb"}, + {file = "regex-2023.10.3-cp37-cp37m-win32.whl", hash = "sha256:ce615c92d90df8373d9e13acddd154152645c0dc060871abf6bd43809673d20a"}, + {file = "regex-2023.10.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0f649fa32fe734c4abdfd4edbb8381c74abf5f34bc0b3271ce687b23729299ed"}, + {file = "regex-2023.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533"}, + {file = "regex-2023.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4"}, + {file = "regex-2023.10.3-cp38-cp38-win32.whl", hash = "sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d"}, + {file = "regex-2023.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b"}, + {file = "regex-2023.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af"}, + {file = "regex-2023.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48"}, + {file = "regex-2023.10.3-cp39-cp39-win32.whl", hash = "sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd"}, + {file = "regex-2023.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988"}, + {file = "regex-2023.10.3.tar.gz", hash = "sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rpds-py" +version = "0.15.2" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:337a8653fb11d2fbe7157c961cc78cb3c161d98cf44410ace9a3dc2db4fad882"}, + {file = "rpds_py-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:813a65f95bfcb7c8f2a70dd6add9b51e9accc3bdb3e03d0ff7a9e6a2d3e174bf"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:082e0e55d73690ffb4da4352d1b5bbe1b5c6034eb9dc8c91aa2a3ee15f70d3e2"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5595c80dd03d7e6c6afb73f3594bf3379a7d79fa57164b591d012d4b71d6ac4c"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb10bb720348fe1647a94eb605accb9ef6a9b1875d8845f9e763d9d71a706387"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53304cc14b1d94487d70086e1cb0cb4c29ec6da994d58ae84a4d7e78c6a6d04d"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d64a657de7aae8db2da60dc0c9e4638a0c3893b4d60101fd564a3362b2bfeb34"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ee40206d1d6e95eaa2b7b919195e3689a5cf6ded730632de7f187f35a1b6052c"}, + {file = "rpds_py-0.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1607cda6129f815493a3c184492acb5ae4aa6ed61d3a1b3663aa9824ed26f7ac"}, + {file = "rpds_py-0.15.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3e6e2e502c4043c52a99316d89dc49f416acda5b0c6886e0dd8ea7bb35859e8"}, + {file = "rpds_py-0.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:044f6f46d62444800402851afa3c3ae50141f12013060c1a3a0677e013310d6d"}, + {file = "rpds_py-0.15.2-cp310-none-win32.whl", hash = "sha256:c827a931c6b57f50f1bb5de400dcfb00bad8117e3753e80b96adb72d9d811514"}, + {file = "rpds_py-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3bbc89ce2a219662ea142f0abcf8d43f04a41d5b1880be17a794c39f0d609cb0"}, + {file = "rpds_py-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:1fd0f0b1ccd7d537b858a56355a250108df692102e08aa2036e1a094fd78b2dc"}, + {file = "rpds_py-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b414ef79f1f06fb90b5165db8aef77512c1a5e3ed1b4807da8476b7e2c853283"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c31272c674f725dfe0f343d73b0abe8c878c646967ec1c6106122faae1efc15b"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6945c2d61c42bb7e818677f43638675b8c1c43e858b67a96df3eb2426a86c9d"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02744236ac1895d7be837878e707a5c35fb8edc5137602f253b63623d7ad5c8c"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2181e86d4e1cdf49a7320cb72a36c45efcb7670d0a88f09fd2d3a7967c0540fd"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a8ff8e809da81363bffca2b965cb6e4bf6056b495fc3f078467d1f8266fe27f"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97532802f14d383f37d603a56e226909f825a83ff298dc1b6697de00d2243999"}, + {file = "rpds_py-0.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:13716e53627ad97babf72ac9e01cf9a7d4af2f75dd5ed7b323a7a9520e948282"}, + {file = "rpds_py-0.15.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2f1f295a5c28cfa74a7d48c95acc1c8a7acd49d7d9072040d4b694fe11cd7166"}, + {file = "rpds_py-0.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8ec464f20fe803ae00419bd1610934e3bda963aeba1e6181dfc9033dc7e8940c"}, + {file = "rpds_py-0.15.2-cp311-none-win32.whl", hash = "sha256:b61d5096e75fd71018b25da50b82dd70ec39b5e15bb2134daf7eb7bbbc103644"}, + {file = "rpds_py-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:9d41ebb471a6f064c0d1c873c4f7dded733d16ca5db7d551fb04ff3805d87802"}, + {file = "rpds_py-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:13ff62d3561a23c17341b4afc78e8fcfd799ab67c0b1ca32091d71383a98ba4b"}, + {file = "rpds_py-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b70b45a40ad0798b69748b34d508259ef2bdc84fb2aad4048bc7c9cafb68ddb3"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ecbba7efd82bd2a4bb88aab7f984eb5470991c1347bdd1f35fb34ea28dba6e"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9d38494a8d21c246c535b41ecdb2d562c4b933cf3d68de03e8bc43a0d41be652"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13152dfe7d7c27c40df8b99ac6aab12b978b546716e99f67e8a67a1d441acbc3"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:164fcee32f15d04d61568c9cb0d919e37ff3195919cd604039ff3053ada0461b"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a5122b17a4faf5d7a6d91fa67b479736c0cacc7afe791ddebb7163a8550b799"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:46b4f3d47d1033db569173be62365fbf7808c2bd3fb742314d251f130d90d44c"}, + {file = "rpds_py-0.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c61e42b4ceb9759727045765e87d51c1bb9f89987aca1fcc8a040232138cad1c"}, + {file = "rpds_py-0.15.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d2aa3ca9552f83b0b4fa6ca8c6ce08da6580f37e3e0ab7afac73a1cfdc230c0e"}, + {file = "rpds_py-0.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ec19e823b4ccd87bd69e990879acbce9e961fc7aebe150156b8f4418d4b27b7f"}, + {file = "rpds_py-0.15.2-cp312-none-win32.whl", hash = "sha256:afeabb382c1256a7477b739820bce7fe782bb807d82927102cee73e79b41b38b"}, + {file = "rpds_py-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:422b0901878a31ef167435c5ad46560362891816a76cc0d150683f3868a6f0d1"}, + {file = "rpds_py-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:baf744e5f9d5ee6531deea443be78b36ed1cd36c65a0b95ea4e8d69fa0102268"}, + {file = "rpds_py-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7e072f5da38d6428ba1fc1115d3cc0dae895df671cb04c70c019985e8c7606be"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f138f550b83554f5b344d6be35d3ed59348510edc3cb96f75309db6e9bfe8210"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2a4cd924d0e2f4b1a68034abe4cadc73d69ad5f4cf02db6481c0d4d749f548f"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5eb05b654a41e0f81ab27a7c3e88b6590425eb3e934e1d533ecec5dc88a6ffff"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ee066a64f0d2ba45391cac15b3a70dcb549e968a117bd0500634754cfe0e5fc"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c51a899792ee2c696072791e56b2020caff58b275abecbc9ae0cb71af0645c95"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac2ac84a4950d627d84b61f082eba61314373cfab4b3c264b62efab02ababe83"}, + {file = "rpds_py-0.15.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:62b292fff4739c6be89e6a0240c02bda5a9066a339d90ab191cf66e9fdbdc193"}, + {file = "rpds_py-0.15.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:98ee201a52a7f65608e5494518932e1473fd43535f12cade0a1b4ab32737fe28"}, + {file = "rpds_py-0.15.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3d40fb3ca22e3d40f494d577441b263026a3bd8c97ae6ce89b2d3c4b39ac9581"}, + {file = "rpds_py-0.15.2-cp38-none-win32.whl", hash = "sha256:30479a9f1fce47df56b07460b520f49fa2115ec2926d3b1303c85c81f8401ed1"}, + {file = "rpds_py-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:2df3d07a16a3bef0917b28cd564778fbb31f3ffa5b5e33584470e2d1b0f248f0"}, + {file = "rpds_py-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:56b51ba29a18e5f5810224bcf00747ad931c0716e3c09a76b4a1edd3d4aba71f"}, + {file = "rpds_py-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c11bc5814554b018f6c5d6ae0969e43766f81e995000b53a5d8c8057055e886"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2faa97212b0dc465afeedf49045cdd077f97be1188285e646a9f689cb5dfff9e"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:86c01299942b0f4b5b5f28c8701689181ad2eab852e65417172dbdd6c5b3ccc8"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd7d3608589072f63078b4063a6c536af832e76b0b3885f1bfe9e892abe6c207"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:938518a11780b39998179d07f31a4a468888123f9b00463842cd40f98191f4d3"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dccc623725d0b298f557d869a68496a2fd2a9e9c41107f234fa5f7a37d278ac"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d46ee458452727a147d7897bb33886981ae1235775e05decae5d5d07f537695a"}, + {file = "rpds_py-0.15.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d9d7ebcd11ea76ba0feaae98485cd8e31467c3d7985210fab46983278214736b"}, + {file = "rpds_py-0.15.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8a5f574b92b3ee7d254e56d56e37ec0e1416acb1ae357c4956d76a1788dc58fb"}, + {file = "rpds_py-0.15.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3db0c998c92b909d7c90b66c965590d4f3cd86157176a6cf14aa1f867b77b889"}, + {file = "rpds_py-0.15.2-cp39-none-win32.whl", hash = "sha256:bbc7421cbd28b4316d1d017db338039a7943f945c6f2bb15e1439b14b5682d28"}, + {file = "rpds_py-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:1c24e30d720c0009b6fb2e1905b025da56103c70a8b31b99138e4ed1c2a6c5b0"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e6fcd0a0f62f2997107f758bb372397b8d5fd5f39cc6dcb86f7cb98a2172d6c"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d800a8e2ac62db1b9ea5d6d1724f1a93c53907ca061de4d05ed94e8dfa79050c"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e09d017e3f4d9bd7d17a30d3f59e4d6d9ba2d2ced280eec2425e84112cf623f"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b88c3ab98556bc351b36d6208a6089de8c8db14a7f6e1f57f82a334bd2c18f0b"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f333bfe782a2d05a67cfaa0cc9cd68b36b39ee6acfe099f980541ed973a7093"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b629db53fe17e6ce478a969d30bd1d0e8b53238c46e3a9c9db39e8b65a9ef973"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485fbdd23becb822804ed05622907ee5c8e8a5f43f6f43894a45f463b2217045"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:893e38d0f4319dfa70c0f36381a37cc418985c87b11d9784365b1fff4fa6973b"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8ffdeb7dbd0160d4e391e1f857477e4762d00aa2199c294eb95dfb9451aa1d9f"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:fc33267d58dfbb2361baed52668c5d8c15d24bc0372cecbb79fed77339b55e0d"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:2e7e5633577b3bd56bf3af2ef6ae3778bbafb83743989d57f0e7edbf6c0980e4"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8b9650f92251fdef843e74fc252cdfd6e3c700157ad686eeb0c6d7fdb2d11652"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:07a2e1d78d382f7181789713cdf0c16edbad4fe14fe1d115526cb6f0eef0daa3"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03f9c5875515820633bd7709a25c3e60c1ea9ad1c5d4030ce8a8c203309c36fd"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:580182fa5b269c2981e9ce9764367cb4edc81982ce289208d4607c203f44ffde"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa1e626c524d2c7972c0f3a8a575d654a3a9c008370dc2a97e46abd0eaa749b9"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae9d83a81b09ce3a817e2cbb23aabc07f86a3abc664c613cd283ce7a03541e95"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9235be95662559141934fced8197de6fee8c58870f36756b0584424b6d708393"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a72e00826a2b032dda3eb25aa3e3579c6d6773d22d8446089a57a123481cc46c"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ab095edf1d840a6a6a4307e1a5b907a299a94e7b90e75436ee770b8c35d22a25"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b79c63d29101cbaa53a517683557bb550462394fb91044cc5998dd2acff7340"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:911e600e798374c0d86235e7ef19109cf865d1336942d398ff313375a25a93ba"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3cd61e759c4075510052d1eca5cddbd297fe1164efec14ef1fce3f09b974dfe4"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d2ae79f31da5143e020a8d4fc74e1f0cbcb8011bdf97453c140aa616db51406"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e99d6510c8557510c220b865d966b105464740dcbebf9b79ecd4fbab30a13d9"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c43e1b89099279cc03eb1c725c5de12af6edcd2f78e2f8a022569efa639ada3"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7187bee72384b9cfedf09a29a3b2b6e8815cc64c095cdc8b5e6aec81e9fd5f"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3423007fc0661827e06f8a185a3792c73dda41f30f3421562f210cf0c9e49569"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2974e6dff38afafd5ccf8f41cb8fc94600b3f4fd9b0a98f6ece6e2219e3158d5"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:93c18a1696a8e0388ed84b024fe1a188a26ba999b61d1d9a371318cb89885a8c"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c7cd0841a586b7105513a7c8c3d5c276f3adc762a072d81ef7fae80632afad1e"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:709dc11af2f74ba89c68b1592368c6edcbccdb0a06ba77eb28c8fe08bb6997da"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:fc066395e6332da1e7525d605b4c96055669f8336600bef8ac569d5226a7c76f"}, + {file = "rpds_py-0.15.2.tar.gz", hash = "sha256:373b76eeb79e8c14f6d82cb1d4d5293f9e4059baec6c1b16dca7ad13b6131b39"}, +] + +[[package]] +name = "ruff" +version = "0.0.292" +description = "An extremely fast Python linter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"}, + {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"}, + {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"}, + {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"}, + {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"}, + {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"}, +] + +[[package]] +name = "selenium" +version = "4.15.2" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "selenium-4.15.2-py3-none-any.whl", hash = "sha256:9e82cd1ac647fb73cf0d4a6e280284102aaa3c9d94f0fa6e6cc4b5db6a30afbf"}, + {file = "selenium-4.15.2.tar.gz", hash = "sha256:22eab5a1724c73d51b240a69ca702997b717eee4ba1f6065bf5d6b44dba01d48"}, +] + +[package.dependencies] +certifi = ">=2021.10.8" +trio = ">=0.17,<1.0" +trio-websocket = ">=0.9,<1.0" +urllib3 = {version = ">=1.26,<3", extras = ["socks"]} + +[[package]] +name = "sentry-sdk" +version = "1.34.0" +description = "Python client for Sentry (https://sentry.io)" +optional = false +python-versions = "*" +files = [ + {file = "sentry-sdk-1.34.0.tar.gz", hash = "sha256:e5d0d2b25931d88fa10986da59d941ac6037f742ab6ff2fce4143a27981d60c3"}, + {file = "sentry_sdk-1.34.0-py2.py3-none-any.whl", hash = "sha256:76dd087f38062ac6c1e30ed6feb533ee0037ff9e709974802db7b5dbf2e5db21"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} + +[package.extras] +aiohttp = ["aiohttp (>=3.5)"] +arq = ["arq (>=0.23)"] +asyncpg = ["asyncpg (>=0.23)"] +beam = ["apache-beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +chalice = ["chalice (>=1.16.0)"] +clickhouse-driver = ["clickhouse-driver (>=0.2.0)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] +grpcio = ["grpcio (>=1.21.1)"] +httpx = ["httpx (>=0.16.0)"] +huey = ["huey (>=2)"] +loguru = ["loguru (>=0.5)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] +opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] +pure-eval = ["asttokens", "executing", "pure-eval"] +pymongo = ["pymongo (>=3.1)"] +pyspark = ["pyspark (>=2.4.4)"] +quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] +tornado = ["tornado (>=5)"] + +[[package]] +name = "setuptools" +version = "69.0.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, + {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "sounddevice" +version = "0.4.6" +description = "Play and Record Sound with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sounddevice-0.4.6-py3-none-any.whl", hash = "sha256:5de768ba6fe56ad2b5aaa2eea794b76b73e427961c95acad2ee2ed7f866a4b20"}, + {file = "sounddevice-0.4.6-py3-none-macosx_10_6_x86_64.macosx_10_6_universal2.whl", hash = "sha256:8b0b806c205dd3e3cd5a97262b2482624fd21db7d47083b887090148a08051c8"}, + {file = "sounddevice-0.4.6-py3-none-win32.whl", hash = "sha256:e3ba6e674ffa8f79a591d744a1d4ab922fe5bdfd4faf8b25069a08e051010b7b"}, + {file = "sounddevice-0.4.6-py3-none-win_amd64.whl", hash = "sha256:7830d4f8f8570f2e5552942f81d96999c5fcd9a0b682d6fc5d5c5529df23be2c"}, + {file = "sounddevice-0.4.6.tar.gz", hash = "sha256:3236b78f15f0415bdf006a620cef073d0c0522851d66f4a961ed6d8eb1482fe9"}, +] + +[package.dependencies] +CFFI = ">=1.0" + +[package.extras] +numpy = ["NumPy"] + +[[package]] +name = "soundfile" +version = "0.12.1" +description = "An audio library based on libsndfile, CFFI and NumPy" +optional = false +python-versions = "*" +files = [ + {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"}, + {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"}, + {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"}, + {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"}, + {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"}, + {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"}, + {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"}, + {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"}, +] + +[package.dependencies] +cffi = ">=1.0" + +[package.extras] +numpy = ["numpy"] + +[[package]] +name = "termcolor" +version = "2.3.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.7" +files = [ + {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"}, + {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "tiktoken" +version = "0.4.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:176cad7f053d2cc82ce7e2a7c883ccc6971840a4b5276740d0b732a2b2011f8a"}, + {file = "tiktoken-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:450d504892b3ac80207700266ee87c932df8efea54e05cefe8613edc963c1285"}, + {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d662de1e7986d129139faf15e6a6ee7665ee103440769b8dedf3e7ba6ac37f"}, + {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5727d852ead18b7927b8adf558a6f913a15c7766725b23dbe21d22e243041b28"}, + {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c06cd92b09eb0404cedce3702fa866bf0d00e399439dad3f10288ddc31045422"}, + {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9ec161e40ed44e4210d3b31e2ff426b4a55e8254f1023e5d2595cb60044f8ea6"}, + {file = "tiktoken-0.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:1e8fa13cf9889d2c928b9e258e9dbbbf88ab02016e4236aae76e3b4f82dd8288"}, + {file = "tiktoken-0.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb2341836b725c60d0ab3c84970b9b5f68d4b733a7bcb80fb25967e5addb9920"}, + {file = "tiktoken-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ca30367ad750ee7d42fe80079d3092bd35bb266be7882b79c3bd159b39a17b0"}, + {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dc3df19ddec79435bb2a94ee46f4b9560d0299c23520803d851008445671197"}, + {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d980fa066e962ef0f4dad0222e63a484c0c993c7a47c7dafda844ca5aded1f3"}, + {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:329f548a821a2f339adc9fbcfd9fc12602e4b3f8598df5593cfc09839e9ae5e4"}, + {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b1a038cee487931a5caaef0a2e8520e645508cde21717eacc9af3fbda097d8bb"}, + {file = "tiktoken-0.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:08efa59468dbe23ed038c28893e2a7158d8c211c3dd07f2bbc9a30e012512f1d"}, + {file = "tiktoken-0.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3020350685e009053829c1168703c346fb32c70c57d828ca3742558e94827a9"}, + {file = "tiktoken-0.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba16698c42aad8190e746cd82f6a06769ac7edd415d62ba027ea1d99d958ed93"}, + {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c15d9955cc18d0d7ffcc9c03dc51167aedae98542238b54a2e659bd25fe77ed"}, + {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64e1091c7103100d5e2c6ea706f0ec9cd6dc313e6fe7775ef777f40d8c20811e"}, + {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e87751b54eb7bca580126353a9cf17a8a8eaadd44edaac0e01123e1513a33281"}, + {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e063b988b8ba8b66d6cc2026d937557437e79258095f52eaecfafb18a0a10c03"}, + {file = "tiktoken-0.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9c6dd439e878172dc163fced3bc7b19b9ab549c271b257599f55afc3a6a5edef"}, + {file = "tiktoken-0.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8d1d97f83697ff44466c6bef5d35b6bcdb51e0125829a9c0ed1e6e39fb9a08fb"}, + {file = "tiktoken-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b6bce7c68aa765f666474c7c11a7aebda3816b58ecafb209afa59c799b0dd2d"}, + {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a73286c35899ca51d8d764bc0b4d60838627ce193acb60cc88aea60bddec4fd"}, + {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0394967d2236a60fd0aacef26646b53636423cc9c70c32f7c5124ebe86f3093"}, + {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:dae2af6f03ecba5f679449fa66ed96585b2fa6accb7fd57d9649e9e398a94f44"}, + {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:55e251b1da3c293432179cf7c452cfa35562da286786be5a8b1ee3405c2b0dd2"}, + {file = "tiktoken-0.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:c835d0ee1f84a5aa04921717754eadbc0f0a56cf613f78dfc1cf9ad35f6c3fea"}, + {file = "tiktoken-0.4.0.tar.gz", hash = "sha256:59b20a819969735b48161ced9b92f05dc4519c17be4015cfb73b65270a243620"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tqdm" +version = "4.66.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, + {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "trio" +version = "0.23.2" +description = "A friendly Python library for async concurrency and I/O" +optional = false +python-versions = ">=3.8" +files = [ + {file = "trio-0.23.2-py3-none-any.whl", hash = "sha256:5a0b566fa5d50cf231cfd6b08f3b03aa4179ff004b8f3144059587039e2b26d3"}, + {file = "trio-0.23.2.tar.gz", hash = "sha256:da1d35b9a2b17eb32cae2e763b16551f9aa6703634735024e32f325c9285069e"}, +] + +[package.dependencies] +attrs = ">=20.1.0" +cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""} +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = "*" +outcome = "*" +sniffio = ">=1.3.0" +sortedcontainers = "*" + +[[package]] +name = "trio-websocket" +version = "0.11.1" +description = "WebSocket library for Trio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "trio-websocket-0.11.1.tar.gz", hash = "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f"}, + {file = "trio_websocket-0.11.1-py3-none-any.whl", hash = "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638"}, +] + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +trio = ">=0.11" +wsproto = ">=0.14" + +[[package]] +name = "typing-extensions" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "urllib3" +version = "2.1.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, + {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, +] + +[package.dependencies] +pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""} + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uuid" +version = "1.30" +description = "UUID object and generation functions (Python 2.3 or higher)" +optional = false +python-versions = "*" +files = [ + {file = "uuid-1.30.tar.gz", hash = "sha256:1f87cc004ac5120466f36c5beae48b4c48cc411968eed0eaecd3da82aa96193f"}, +] + +[[package]] +name = "wcwidth" +version = "0.2.12" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.12-py2.py3-none-any.whl", hash = "sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"}, + {file = "wcwidth-0.2.12.tar.gz", hash = "sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02"}, +] + +[[package]] +name = "webdriver-manager" +version = "4.0.1" +description = "Library provides the way to automatically manage drivers for different browsers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "webdriver_manager-4.0.1-py2.py3-none-any.whl", hash = "sha256:d7970052295bb9cda2c1a24cf0b872dd2c41ababcc78f7b6b8dc37a41e979a7e"}, + {file = "webdriver_manager-4.0.1.tar.gz", hash = "sha256:25ec177c6a2ce9c02fb8046f1b2732701a9418d6a977967bb065d840a3175d87"}, +] + +[package.dependencies] +packaging = "*" +python-dotenv = "*" +requests = "*" + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "d5fa1008c5330905c0a36bf1283c5b8c53c1b3f57802ea776a9c6eb1e848dce4" diff --git a/pyproject.toml b/pyproject.toml index d5628225a..6191d746a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,54 @@ +[tool.poetry] +name = "mentat" +version = "0.1.0" +description = "" +authors = ["bio_bootloader "] +readme = "README.md" + +[tool.poetry.scripts] +mentat = 'mentat.terminal.client:start' + +[tool.poetry.dependencies] +python = "^3.10" +attrs = "^23.1.0" +backoff = "^2.2.1" +fire = "^0.5.0" +jinja2 = "^3.1.2" +jsonschema = ">=4.17.0" +numpy = "^1.26.0" +openai = "^1.3.0" +pillow = "^10.1.0" +prompt-toolkit = "^3.0.39" +Pygments = "^2.15.1" +pytest = "^7.4.0" +pytest-asyncio = "^0.21.1" +pytest-mock = "^3.11.1" +pytest-reportlog = "^0.4.0" +python-dotenv = "^1.0.0" +selenium = "4.15.2" +sentry-sdk = "1.34.0" +sounddevice = "0.4.6" +soundfile = "0.12.1" +termcolor = "2.3.0" +tiktoken = "0.4.0" +typing_extensions = "4.8.0" +tqdm = "4.66.1" +webdriver_manager = "4.0.1" +dataclasses-json = "^0.6.3" +pyyaml = "^6.0.1" +click = "^8.1.7" +uuid = "^1.30" + +[tool.poetry.group.dev.dependencies] +aiomultiprocess = "^0.9.0" +black = "^23.9.1" +gitpython = "^3.1.37" +isort = "^5.12.0" +pip-licenses = "^4.3.3" +pyright = "^1.1.339" +pytest-xdist = "^3.3.1" +ruff = "^0.0.292" + [tool.isort] profile = "black" known_first_party = "mentat" @@ -12,3 +63,7 @@ addopts = "--ignore=vscode/bundled --ignore=benchmark_repos" [tool.black] preview = "true" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1110a6015..000000000 --- a/requirements.txt +++ /dev/null @@ -1,25 +0,0 @@ -attrs==23.1.0 -backoff==2.2.1 -fire==0.5.0 -gitpython==3.1.37 -jinja2==3.1.2 -jsonschema>=4.17.0 -numpy==1.26.0 -openai==1.3.0 -pillow==10.1.0 -prompt-toolkit==3.0.39 -Pygments==2.15.1 -pytest==7.4.0 -pytest-asyncio==0.21.1 -pytest-mock==3.11.1 -pytest-reportlog==0.4.0 -python-dotenv==1.0.0 -selenium==4.15.2 -sentry-sdk==1.34.0 -sounddevice==0.4.6 -soundfile==0.12.1 -termcolor==2.3.0 -tiktoken==0.4.0 -typing_extensions==4.8.0 -tqdm==4.66.1 -webdriver_manager==4.0.1 diff --git a/scripts/git_log_to_transcripts.py b/scripts/git_log_to_transcripts.py index 13f9639ca..75e592d50 100755 --- a/scripts/git_log_to_transcripts.py +++ b/scripts/git_log_to_transcripts.py @@ -159,12 +159,10 @@ async def translate_commits_to_transcripts(repo, count=10): "args": {}, "prompt": prompt, "expected_edits": llmResponse, - "edited_features": list( - { - str(f.relative_to(git_root)) - for f in bound_files(parsedLLMResponse.file_edits, padding=0) - } - ), + "edited_features": list({ + str(f.relative_to(git_root)) + for f in bound_files(parsedLLMResponse.file_edits, padding=0) + }), "selected_features": [], } try: diff --git a/setup.py b/setup.py deleted file mode 100644 index 19dfe6fcf..000000000 --- a/setup.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -from pathlib import Path - -import pkg_resources -from setuptools import find_packages, setup - -from mentat import __version__ - -readme_path = os.path.join(Path(__file__).parent, "README.md") -with open(readme_path, "r", encoding="utf-8") as f: - long_description = f.read() - - -setup( - name="mentat", - version=__version__, - python_requires=">=3.10", - packages=find_packages(include=["mentat", "mentat.*"]), - install_requires=[ - str(r) - for r in pkg_resources.parse_requirements( - open(os.path.join(os.path.dirname(__file__), "requirements.txt")) - ) - ], - entry_points={ - "console_scripts": [ - "mentat=mentat.terminal.client:run_cli", - ], - }, - description="AI coding assistant on your command line", - long_description=long_description, - long_description_content_type="text/markdown", - license="Apache-2.0", - include_package_data=True, - extras_require={ - "dev": [ - str(r) - for r in pkg_resources.parse_requirements( - open(os.path.join(os.path.dirname(__file__), "dev-requirements.txt")) - ) - ] - }, -) diff --git a/tests/benchmarks/benchmarks/mentat/license_update.py b/tests/benchmarks/benchmarks/mentat/license_update.py index 7e63f2744..b356c7cd9 100644 --- a/tests/benchmarks/benchmarks/mentat/license_update.py +++ b/tests/benchmarks/benchmarks/mentat/license_update.py @@ -34,19 +34,17 @@ def verify(): import benchmark_repos.mentat.tests.license_check as license_check importlib.reload(license_check) - return set(license_check.accepted_licenses) == set( - [ - "BSD License", - "Apache Software License", - "MIT License", - "MIT", - "Mozilla Public License 2.0 (MPL 2.0)", - "Python Software Foundation License", - "Apache 2.0", - "BSD 3-Clause", - "ISC License (ISCL)", - "HPND", - ] - ) + return set(license_check.accepted_licenses) == set([ + "BSD License", + "Apache Software License", + "MIT License", + "MIT", + "Mozilla Public License 2.0 (MPL 2.0)", + "Python Software Foundation License", + "Apache 2.0", + "BSD 3-Clause", + "ISC License (ISCL)", + "HPND", + ]) except IndentationError: return False diff --git a/tests/code_context_test.py b/tests/code_context_test.py index 41877d665..dc939e0b9 100644 --- a/tests/code_context_test.py +++ b/tests/code_context_test.py @@ -6,8 +6,9 @@ import pytest +import mentat from mentat.code_context import CodeContext -from mentat.config import Config +from mentat.config import load_config from mentat.errors import ContextSizeInsufficient from mentat.feature_filters.default_filter import DefaultFilter from mentat.git_handler import get_non_gitignored_files @@ -76,9 +77,9 @@ async def test_bracket_file(temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_config_glob_exclude(mocker, temp_testbed, mock_code_context): # Makes sure glob exclude config works - mocker.patch.object( - Config, "file_exclude_glob_list", new=[os.path.join("glob_test", "**", "*.py")] - ) + config = mentat.user_session.get("config") + config.run.file_exclude_glob_list = [Path("glob_test") / "**" / "*.py"] + mentat.user_session.set("config", config) glob_exclude_path = os.path.join("glob_test", "bagel", "apple", "exclude_me.py") glob_include_path = os.path.join("glob_test", "bagel", "apple", "include_me.ts") @@ -110,6 +111,9 @@ async def test_config_glob_exclude(mocker, temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_glob_include(temp_testbed, mock_code_context): + # reset the config context + load_config() + # Make sure glob include works glob_include_path = os.path.join("glob_test", "bagel", "apple", "include_me.py") glob_include_path2 = os.path.join("glob_test", "bagel", "apple", "include_me2.py") @@ -188,6 +192,10 @@ async def test_text_encoding_checking(temp_testbed, mock_session_context): @pytest.mark.asyncio @pytest.mark.clear_testbed async def test_max_auto_tokens(mocker, temp_testbed, mock_session_context): + config = mentat.user_session.get("config") + config.ai.maximum_context = 8000 + mentat.user_session.set("config", config) + with open("file_1.py", "w") as f: f.write(dedent("""\ def func_1(x, y): @@ -214,7 +222,7 @@ def func_4(string): ) code_context.include("file_1.py") code_context.use_llm = False - mock_session_context.config.auto_context_tokens = 8000 + filter_mock = AsyncMock(side_effect=lambda features: features) mocker.patch.object(DefaultFilter, "filter", side_effect=filter_mock) @@ -222,7 +230,7 @@ async def _count_max_tokens_where(tokens_used: int) -> int: code_message = await code_context.get_code_message(tokens_used, prompt="prompt") return count_tokens(code_message, "gpt-4", full_message=True) - assert await _count_max_tokens_where(0) == 89 # Code + assert await _count_max_tokens_where(0) == 46 # Code with pytest.raises(ContextSizeInsufficient): await _count_max_tokens_where(1e6) @@ -258,8 +266,12 @@ def test_get_all_features(temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_get_code_message_ignore(mocker, temp_testbed, mock_session_context): - mock_session_context.config.auto_context_tokens = 8000 - mocker.patch.object(Config, "maximum_context", new=7000) + + config = mentat.user_session.get("config") + config.ai.maximum_context = 7000 + config.run.auto_context_tokens = 8000 + mentat.user_session.set("config", config) + filter_mock = AsyncMock(side_effect=lambda features: features) mocker.patch.object(DefaultFilter, "filter", side_effect=filter_mock) code_context = CodeContext( diff --git a/tests/code_file_manager_test.py b/tests/code_file_manager_test.py index 9131b7330..b43324245 100644 --- a/tests/code_file_manager_test.py +++ b/tests/code_file_manager_test.py @@ -62,16 +62,14 @@ async def test_run_from_subdirectory( """Run mentat from a subdirectory of the git root""" # Change to the subdirectory os.chdir("multifile_calculator") - mock_collect_user_input.set_stream_messages( - [ - ( - "Insert the comment # Hello on the first line of" - " multifile_calculator/calculator.py and scripts/echo.py" - ), - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + ( + "Insert the comment # Hello on the first line of" + " multifile_calculator/calculator.py and scripts/echo.py" + ), + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment in both files. @@ -119,16 +117,14 @@ async def test_run_from_superdirectory( ): """Run mentat from outside the git root""" # Change to the subdirectory - mock_collect_user_input.set_stream_messages( - [ - ( - "Insert the comment # Hello on the first line of" - " multifile_calculator/calculator.py and scripts/echo.py" - ), - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + ( + "Insert the comment # Hello on the first line of" + " multifile_calculator/calculator.py and scripts/echo.py" + ), + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment in both files. @@ -175,13 +171,11 @@ async def test_change_after_creation( mock_call_llm_api, ): file_name = Path("hello_world.py") - mock_collect_user_input.set_stream_messages( - [ - "Conversation", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Conversation", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/commands_test.py b/tests/commands_test.py index e5c4d4a61..46d1ca652 100644 --- a/tests/commands_test.py +++ b/tests/commands_test.py @@ -4,6 +4,7 @@ import pytest +import mentat from mentat.code_feature import CodeFeature from mentat.command.command import Command, InvalidCommand from mentat.command.commands.context import ContextCommand @@ -29,12 +30,10 @@ async def test_commit_command(temp_testbed, mock_collect_user_input): with open(file_name, "w") as f: f.write("# Commit me!") - mock_collect_user_input.set_stream_messages( - [ - "/commit", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "/commit", + "q", + ]) session = Session(cwd=temp_testbed, paths=[]) session.start() @@ -46,12 +45,10 @@ async def test_commit_command(temp_testbed, mock_collect_user_input): # TODO: test without git @pytest.mark.asyncio async def test_include_command(temp_testbed, mock_collect_user_input): - mock_collect_user_input.set_stream_messages( - [ - "/include scripts", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "/include scripts", + "q", + ]) session = Session(cwd=temp_testbed) session.start() @@ -66,12 +63,10 @@ async def test_include_command(temp_testbed, mock_collect_user_input): # TODO: test without git @pytest.mark.asyncio async def test_exclude_command(temp_testbed, mock_collect_user_input): - mock_collect_user_input.set_stream_messages( - [ - "/exclude scripts", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "/exclude scripts", + "q", + ]) session = Session(cwd=temp_testbed, paths=["scripts"]) session.start() @@ -89,14 +84,12 @@ async def test_undo_command(temp_testbed, mock_collect_user_input, mock_call_llm # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "Edit the file", - "y", - "/undo", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Edit the file", + "y", + "/undo", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -132,15 +125,13 @@ async def test_redo_command(temp_testbed, mock_collect_user_input, mock_call_llm # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "Edit the file", - "y", - "/undo", - "/redo", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Edit the file", + "y", + "/undo", + "/redo", + "q", + ]) new_file_name = "new_temp.py" mock_call_llm_api.set_streamed_values([dedent(f"""\ @@ -195,14 +186,12 @@ async def test_undo_all_command( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "", - "y", - "/undo-all", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "", + "y", + "/undo-all", + "q", + ]) # TODO: Make a way to set multiple return values for call_llm_api and reset multiple edits at once mock_call_llm_api.set_streamed_values([dedent(f"""\ @@ -233,13 +222,11 @@ async def test_undo_all_command( @pytest.mark.asyncio async def test_clear_command(temp_testbed, mock_collect_user_input, mock_call_llm_api): - mock_collect_user_input.set_stream_messages( - [ - "Request", - "/clear", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Request", + "/clear", + "q", + ]) mock_call_llm_api.set_streamed_values(["Answer"]) session = Session(cwd=Path.cwd()) @@ -255,13 +242,11 @@ async def test_clear_command(temp_testbed, mock_collect_user_input, mock_call_ll async def test_search_command( mocker, temp_testbed, mock_call_llm_api, mock_collect_user_input ): - mock_collect_user_input.set_stream_messages( - [ - "Request", - "/search Query", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Request", + "/search Query", + "q", + ]) mock_call_llm_api.set_streamed_values(["Answer"]) mock_feature = CodeFeature( Path(temp_testbed) / "multifile_calculator" / "calculator.py" @@ -290,16 +275,16 @@ async def test_context_command(temp_testbed, mock_call_llm_api): @pytest.mark.asyncio async def test_config_command(mock_call_llm_api): session_context = SESSION_CONTEXT.get() - config = session_context.config stream = session_context.stream command = Command.create_command("config") await command.apply("test") assert stream.messages[-1].data == "Unrecognized config option: test" await command.apply("model") assert stream.messages[-1].data.startswith("model: ") - await command.apply("model", "test") - assert stream.messages[-1].data == "model set to test" - assert config.model == "test" + await command.apply("model", "gpt-4-32k") + assert stream.messages[-1].data == "model set to gpt-4-32k" + config = mentat.user_session.get("config") + assert config.ai.model == "gpt-4-32k" await command.apply("model", "test", "lol") assert stream.messages[-1].data == "Too many arguments" @@ -310,11 +295,11 @@ async def test_screenshot_command(mocker): session_context = SESSION_CONTEXT.get() mock_vision_manager = mocker.MagicMock() session_context.vision_manager = mock_vision_manager - config = session_context.config + config = mentat.user_session.get("config") stream = session_context.stream conversation = session_context.conversation - assert config.model != "gpt-4-vision-preview" + assert config.ai.model != "gpt-4-vision-preview" mock_vision_manager.screenshot.return_value = "fake_image_data" @@ -322,7 +307,7 @@ async def test_screenshot_command(mocker): await screenshot_command.apply("fake_path") mock_vision_manager.screenshot.assert_called_once_with("fake_path") - assert config.model == "gpt-4-vision-preview" + assert config.ai.model == "gpt-4-vision-preview" assert stream.messages[-1].data == "Screenshot taken for: fake_path." assert conversation._messages[-1] == { "role": "user", @@ -333,11 +318,11 @@ async def test_screenshot_command(mocker): } # Test non-gpt models aren't changed - config.model = "test" + config.ai.model = "test" await screenshot_command.apply("fake_path") - assert config.model == "test" + assert config.ai.model == "test" # Test other models containing vision aren't changed - config.model = "gpt-vision" + config.ai.model = "gpt-vision" await screenshot_command.apply("fake_path") - assert config.model == "gpt-vision" + assert config.ai.model == "gpt-vision" diff --git a/tests/config_test.py b/tests/config_test.py index 85ce02d11..982be3ec6 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,96 +1,36 @@ -import argparse -from pathlib import Path -from textwrap import dedent +from io import StringIO +from unittest.mock import MagicMock import pytest +import yaml import mentat.config -from mentat.config import Config, config_file_name -from mentat.parsers.replacement_parser import ReplacementParser +from mentat import config @pytest.fixture -def mock_config_errors(mocker): - errors = [] - mocker.patch.object(Config, "error", lambda self, message: errors.append(message)) - return errors +def mock_open(mocker): + mock_open = mocker.patch("builtins.open", new_callable=MagicMock) + return mock_open @pytest.mark.asyncio -async def test_config_creation(): - "This test verifies the Config adds the parameters to the argparse object." - "Those take precedence over the config files and the project config takes" - "precedence over the user config." - parser = argparse.ArgumentParser() - Config.add_fields_to_argparse(parser) - args = parser.parse_args( - [ - "--model", - "model", - "--temperature", - "0.2", - "--maximum-context", - "1", - "-a", - "2000", - ] - ) - assert args.model == "model" - assert args.temperature == 0.2 - assert args.maximum_context == "1" - assert args.parser is None - assert args.auto_context_tokens == 2000 - - with open(config_file_name, "w") as project_config_file: - project_config_file.write(dedent("""\ - { - "input_style": [[ "project", "yes" ]] - }""")) - - mentat.config.user_config_path = Path(str(config_file_name) + "1") - with open(mentat.config.user_config_path, "w") as user_config_file: - user_config_file.write(dedent("""\ - { - "model": "test", - "parser": "replacement", - "input_style": [[ "user", "yes" ]] - }""")) - - config = Config.create(Path.cwd(), args) - - assert config.model == "model" - assert config.temperature == 0.2 - assert config.maximum_context == 1 - assert type(config.parser) == ReplacementParser - assert config.auto_context_tokens == 2000 - assert config.input_style == [["project", "yes"]] +async def test_load_yaml(mock_open): + data = {"test_key": "test_value"} + mock_open.return_value.__enter__.return_value = StringIO(yaml.dump(data)) + assert config.load_yaml("test_path") == data + mock_open.assert_called_with("test_path", "r") @pytest.mark.asyncio -async def test_invalid_config(mock_config_errors): - # If invalid config file is found, it should use next config - with open(config_file_name, "w") as project_config_file: - project_config_file.write(dedent("""\ - { - "model": "project", - "format": "I have a trailing comma", - }""")) +async def test_default_config(): + "This test verifies that a config is created with default settings required for the run." + config = mentat.user_session.get("config") + + assert config.ai.model == "gpt-4-1106-preview" + assert config.ai.maximum_context == 16000 - mentat.config.user_config_path = Path(str(config_file_name) + "1") - with open(mentat.config.user_config_path, "w") as user_config_file: - user_config_file.write(dedent("""\ - { - "model": "test", - "foobar": "Not a real setting" - }""")) + assert config.run.auto_tokens == 8000 + assert config.run.auto_context is False - config = Config.create(cwd=Path.cwd()) - assert ( - mock_config_errors[0] - == "Warning: Config .mentat_config.json1 contains unrecognized setting: foobar" - ) - assert ( - "contains invalid json; ignoring user configuration file" - in mock_config_errors[1] - ) - assert config.model == "test" + assert config.parser.parser_type == "block" diff --git a/tests/conftest.py b/tests/conftest.py index 1d4b2bc9b..c8a036128 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,16 +16,17 @@ from openai.types.chat.chat_completion_chunk import Choice as AsyncChoice from openai.types.chat.chat_completion_chunk import ChoiceDelta -from mentat import config +import mentat from mentat.agent_handler import AgentHandler from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_file_manager import CodeFileManager -from mentat.config import Config, config_file_name +from mentat.config import config_file_name, load_config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.git_handler import get_git_root_for_path from mentat.llm_api_handler import LlmApiHandler +from mentat.parsers.block_parser import BlockParser from mentat.sampler.sampler import Sampler from mentat.session_context import SESSION_CONTEXT, SessionContext from mentat.session_stream import SessionStream, StreamMessage, StreamMessageSource @@ -205,6 +206,7 @@ def set_unstreamed_values(value): Choice( finish_reason="stop", index=0, + logprobs=None, message=ChatCompletionMessage( content=value, role="assistant", @@ -260,14 +262,25 @@ def mock_session_context(temp_testbed): set by a Session if the test creates a Session. If you create a Session or Client in your test, do NOT use this SessionContext! """ + # reset the config context + load_config() + + # autoset some settings to conform to tests + config = mentat.user_session.get("config") + config.root = temp_testbed + config.run.file_exclude_glob_list = [] + config.ai.maximum_context = 16000 + config.ai.load_prompts("text") + config.parser.parser_type = "block" + config.parser.parser = BlockParser() + mentat.user_session.set("config", config) + git_root = get_git_root_for_path(temp_testbed, raise_error=False) stream = SessionStream() cost_tracker = CostTracker() - config = Config() - llm_api_handler = LlmApiHandler() code_context = CodeContext(stream, git_root) @@ -288,7 +301,6 @@ def mock_session_context(temp_testbed): stream, llm_api_handler, cost_tracker, - config, code_context, code_file_manager, conversation, @@ -381,7 +393,9 @@ def temp_testbed(monkeypatch, get_marks): # it will be unset unless a specific test wants to make a config in the testbed @pytest.fixture(autouse=True) def mock_user_config(mocker): + config = mentat.user_session.get("config") config.user_config_path = Path(config_file_name) + mentat.user_session.set("config", config) @pytest.fixture(autouse=True) diff --git a/tests/conversation_test.py b/tests/conversation_test.py index 6106c44b8..d5cf18b23 100644 --- a/tests/conversation_test.py +++ b/tests/conversation_test.py @@ -1,3 +1,4 @@ +import mentat from mentat.parsers.block_parser import BlockParser from mentat.parsers.replacement_parser import ReplacementParser from mentat.session_context import SESSION_CONTEXT @@ -5,15 +6,22 @@ def test_midconveration_parser_change(mock_call_llm_api): session_context = SESSION_CONTEXT.get() - config = session_context.config conversation = session_context.conversation - config.parser = "block" + config = mentat.user_session.get("config") + + config.parser.parser_type = "block" + config.parser.parser = BlockParser() + mentat.user_session.set("config", config) + assert ( conversation.get_messages()[0]["content"] == BlockParser().get_system_prompt() ) - config.parser = "replacement" + config.parser.parser_type = "replacement" + config.parser.parser = ReplacementParser() + mentat.user_session.set("config", config) + assert ( conversation.get_messages()[0]["content"] == ReplacementParser().get_system_prompt() @@ -22,11 +30,12 @@ def test_midconveration_parser_change(mock_call_llm_api): def test_no_parser_prompt(mock_call_llm_api): session_context = SESSION_CONTEXT.get() - config = session_context.config + config = mentat.user_session.get("config") conversation = session_context.conversation assert len(conversation.get_messages()) == 1 - config.no_parser_prompt = True + config.ai.no_parser_prompt = True + mentat.user_session.set("config", config) assert len(conversation.get_messages()) == 0 diff --git a/tests/embeddings_test.py b/tests/embeddings_test.py index 12bb99180..c1401e5ff 100644 --- a/tests/embeddings_test.py +++ b/tests/embeddings_test.py @@ -19,14 +19,12 @@ async def test_get_feature_similarity_scores(mocker, mock_call_embedding_api): _make_code_feature(Path(f"file{i}.txt").resolve(), f"File {i}") for i in range(3) ] - mock_call_embedding_api.set_embedding_values( - [ - [0.7, 0.7, 0.7], # The prompt - [0.4, 0.4, 0.4], - [0.5, 0.6, 0.7], - [0.69, 0.7, 0.71], - ] - ) + mock_call_embedding_api.set_embedding_values([ + [0.7, 0.7, 0.7], # The prompt + [0.4, 0.4, 0.4], + [0.5, 0.6, 0.7], + [0.69, 0.7, 0.71], + ]) result = await get_feature_similarity_scores(prompt, features) assert len(result) == 3 assert max(result) == result[0] # The first feature is most similar diff --git a/tests/llm_api_handler_test.py b/tests/llm_api_handler_test.py index 31fd1d691..fcbe8137f 100644 --- a/tests/llm_api_handler_test.py +++ b/tests/llm_api_handler_test.py @@ -23,11 +23,9 @@ def test_prompt_tokens(): img_base64 = base64.b64encode(buffer.getvalue()).decode() image_url = f"data:image/png;base64,{img_base64}" - messages.append( - { - "role": "user", - "content": [{"type": "image_url", "image_url": {"url": image_url}}], - } - ) + messages.append({ + "role": "user", + "content": [{"type": "image_url", "image_url": {"url": image_url}}], + }) assert prompt_tokens(messages, model) == 24 + 6 * 170 + 85 + 5 diff --git a/tests/parser_tests/block_format_error_test.py b/tests/parser_tests/block_format_error_test.py index 9be29dd9a..a45366159 100644 --- a/tests/parser_tests/block_format_error_test.py +++ b/tests/parser_tests/block_format_error_test.py @@ -3,14 +3,14 @@ import pytest -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.block_parser import BlockParser from mentat.session import Session @pytest.fixture(autouse=True) def block_parser(mocker): - mocker.patch.object(Config, "parser", new=BlockParser()) + mocker.patch.object(ParserSettings, "parser", new=BlockParser()) temp_file_name = "temp.py" @@ -54,13 +54,11 @@ async def error_test_template( with open(temp_file_name, "w") as f: f.write("") - mock_collect_user_input.set_stream_messages( - [ - "Go!", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Go!", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([changes]) session = Session(cwd=Path.cwd(), paths=[temp_file_name]) diff --git a/tests/parser_tests/block_format_test.py b/tests/parser_tests/block_format_test.py index 3310aab75..cec5bc853 100644 --- a/tests/parser_tests/block_format_test.py +++ b/tests/parser_tests/block_format_test.py @@ -4,7 +4,7 @@ import pytest -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.block_parser import BlockParser from mentat.session import Session from tests.parser_tests.inverse import verify_inverse @@ -12,7 +12,7 @@ @pytest.fixture def block_parser(mocker): - mocker.patch.object(Config, "parser", new=BlockParser()) + mocker.patch.object(ParserSettings, "parser", new=BlockParser()) @pytest.mark.asyncio @@ -24,13 +24,11 @@ async def test_insert(mock_call_llm_api, mock_collect_user_input, block_parser): # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment between both lines. @@ -72,13 +70,11 @@ async def test_replace(mock_call_llm_api, mock_collect_user_input, block_parser) # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will replace both lines with one comment @@ -120,13 +116,11 @@ async def test_delete(mock_call_llm_api, mock_collect_user_input, block_parser): # with 4 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will delete the middle two lines @@ -160,13 +154,11 @@ async def test_delete(mock_call_llm_api, mock_collect_user_input, block_parser): async def test_create_file(mock_call_llm_api, mock_collect_user_input, block_parser): # Create a temporary file temp_file_name = "new_dir/temp.py" - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will create a new file called temp.py @@ -201,14 +193,12 @@ async def test_delete_file(mock_call_llm_api, mock_collect_user_input, block_par with open(temp_file_name, "w") as f: f.write("# I am not long for this world") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will delete the file @@ -239,13 +229,11 @@ async def test_rename_file(mock_call_llm_api, mock_collect_user_input, block_par with open(temp_file_name, "w") as f: f.write("# Move me!") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ I will rename the file @@ -280,13 +268,11 @@ async def test_change_then_rename_file( with open(temp_file_name, "w") as f: f.write("# Move me!") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ I will insert a comment then rename the file @@ -333,13 +319,11 @@ async def test_rename_file_then_change( with open(temp_file_name, "w") as f: f.write("# Move me!") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ I will rename the file then insert a comment @@ -389,13 +373,11 @@ async def test_multiple_blocks( # just for # good measure""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment between the first two lines @@ -455,13 +437,11 @@ async def test_json_strings(mock_call_llm_api, mock_collect_user_input, block_pa f.write(dedent("""\ # This is a temporary file""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment at the start. diff --git a/tests/parser_tests/replacement_format_error_test.py b/tests/parser_tests/replacement_format_error_test.py index f6e0013d7..105e70ca1 100644 --- a/tests/parser_tests/replacement_format_error_test.py +++ b/tests/parser_tests/replacement_format_error_test.py @@ -3,14 +3,15 @@ import pytest -from mentat.config import Config +import mentat +from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session @pytest.fixture(autouse=True) def replacement_parser(mocker): - mocker.patch.object(Config, "parser", new=ReplacementParser()) + mocker.patch.object(ParserSettings, "parser", new=ReplacementParser()) @pytest.mark.asyncio @@ -19,18 +20,22 @@ async def test_invalid_line_numbers( mock_collect_user_input, ): temp_file_name = "temp.py" - with open(temp_file_name, "w") as f: + temp_file_location = Path.cwd() / temp_file_name + + config = mentat.user_session.get("config") + config.parser.parser = ReplacementParser() + mentat.user_session.set("config", config) + + with open(temp_file_location, "w") as f: f.write(dedent("""\ # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -44,15 +49,16 @@ async def test_invalid_line_numbers( # I also will not be used @""")]) - session = Session(cwd=Path.cwd(), paths=[temp_file_name]) + session = Session(cwd=Path.cwd(), paths=[Path(temp_file_location)]) session.start() await session.stream.recv(channel="client_exit") - with open(temp_file_name, "r") as f: + with open(temp_file_location, "r") as f: content = f.read() expected_content = dedent("""\ # This is a temporary file # I inserted this comment # with 2 lines""") + assert content == expected_content @@ -61,19 +67,21 @@ async def test_invalid_special_line( mock_call_llm_api, mock_collect_user_input, ): + config = mentat.user_session.get("config") + config.parser.parser = ReplacementParser() + mentat.user_session.set("config", config) + temp_file_name = "temp.py" with open(temp_file_name, "w") as f: f.write(dedent("""\ # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/parser_tests/replacement_format_test.py b/tests/parser_tests/replacement_format_test.py index 3d57dd6e2..3fccc692b 100644 --- a/tests/parser_tests/replacement_format_test.py +++ b/tests/parser_tests/replacement_format_test.py @@ -3,15 +3,17 @@ import pytest -from mentat.config import Config +import mentat from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session from tests.parser_tests.inverse import verify_inverse -@pytest.fixture -def replacement_parser(mocker): - mocker.patch.object(Config, "parser", new=ReplacementParser()) +@pytest.fixture() +def replacement_parser(): + config = mentat.user_session.get("config") + config.parser.parser = ReplacementParser() + mentat.user_session.set("config", config) @pytest.mark.asyncio @@ -22,13 +24,11 @@ async def test_insert(mock_call_llm_api, mock_collect_user_input, replacement_pa # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -56,13 +56,11 @@ async def test_delete(mock_call_llm_api, mock_collect_user_input, replacement_pa # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -87,13 +85,11 @@ async def test_replace(mock_call_llm_api, mock_collect_user_input, replacement_p # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -117,13 +113,11 @@ async def test_create_file( mock_call_llm_api, mock_collect_user_input, replacement_parser ): temp_file_name = "temp.py" - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -152,14 +146,12 @@ async def test_delete_file( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -182,13 +174,11 @@ async def test_rename_file( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -217,13 +207,11 @@ async def test_change_then_rename_then_change( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/parser_tests/unified_diff_format_error_test.py b/tests/parser_tests/unified_diff_format_error_test.py index c4a955cf1..e9714f23f 100644 --- a/tests/parser_tests/unified_diff_format_error_test.py +++ b/tests/parser_tests/unified_diff_format_error_test.py @@ -1,7 +1,7 @@ from pathlib import Path from textwrap import dedent -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session import Session from tests.conftest import pytest @@ -9,7 +9,7 @@ @pytest.fixture(autouse=True) def unified_diff_parser(mocker): - mocker.patch.object(Config, "parser", new=UnifiedDiffParser()) + mocker.patch.object(ParserSettings, "parser", new=UnifiedDiffParser()) @pytest.mark.asyncio @@ -26,13 +26,11 @@ async def test_not_matching( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -72,13 +70,11 @@ async def test_no_prefix( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/parser_tests/unified_diff_format_test.py b/tests/parser_tests/unified_diff_format_test.py index 54feb3d0a..58de902b5 100644 --- a/tests/parser_tests/unified_diff_format_test.py +++ b/tests/parser_tests/unified_diff_format_test.py @@ -3,14 +3,16 @@ import pytest -from mentat.config import Config +import mentat from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session import Session @pytest.fixture(autouse=True) -def unified_diff_parser(mocker): - mocker.patch.object(Config, "parser", new=UnifiedDiffParser()) +def unified_diff_parser(): + config = mentat.user_session.get("config") + config.parser.parser = UnifiedDiffParser() + mentat.user_session.set("config", config) @pytest.mark.asyncio @@ -18,6 +20,7 @@ async def test_replacement( mock_call_llm_api, mock_collect_user_input, ): + temp_file_name = Path("temp.py").absolute() with open(temp_file_name, "w") as f: f.write(dedent("""\ @@ -26,13 +29,11 @@ async def test_replacement( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -55,6 +56,7 @@ async def test_replacement( # This is # your captain speaking # 4 lines""") + assert content == expected_content @@ -75,13 +77,11 @@ async def test_multiple_replacements( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -135,13 +135,11 @@ async def test_multiple_replacement_spots( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -190,13 +188,11 @@ async def test_little_context_addition( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -238,13 +234,11 @@ async def test_empty_file( with open(temp_file_name, "w") as f: f.write("") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -274,13 +268,11 @@ async def test_creation( ): temp_file_name = Path("temp.py") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -318,14 +310,12 @@ async def test_deletion( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -352,13 +342,11 @@ async def test_no_ending_marker( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/record_benchmark.py b/tests/record_benchmark.py index 051d1cfc9..16f275684 100644 --- a/tests/record_benchmark.py +++ b/tests/record_benchmark.py @@ -26,14 +26,12 @@ def main(run=False, threshold=0.75, count=1): if run: for _ in range(count): os.environ["MENTAT_BENCHMARKS_RUNNING"] = "true" - pytest.main( - [ - benchmark_location, - "--benchmark", - "--report-log", - benchmark_log_location, - ] - ) + pytest.main([ + benchmark_location, + "--benchmark", + "--report-log", + benchmark_log_location, + ]) os.environ["MENTAT_BENCHMARKS_RUNNING"] = "false" print() nodes = [] diff --git a/tests/sampler_test.py b/tests/sampler_test.py index 173289b4e..804f694d3 100644 --- a/tests/sampler_test.py +++ b/tests/sampler_test.py @@ -10,6 +10,7 @@ ChatCompletionUserMessageParam, ) +import mentat from mentat.errors import SampleError from mentat.git_handler import get_git_diff from mentat.parsers.block_parser import BlockParser @@ -35,7 +36,13 @@ async def test_sample_from_context( mock_session_context, mock_collect_user_input, ): - mock_session_context.config.sample_repo = "test_sample_repo" + mentat.user_session.set( + "sampler_settings", + { + "repo": "test_sample_repo", + "merge_base_target": "", + }, + ) mocker.patch( "mentat.conversation.Conversation.get_messages", @@ -62,14 +69,12 @@ async def test_sample_from_context( with open("test_file.py", "w") as f: f.write("test_file_content\n") - mock_collect_user_input.set_stream_messages( - [ - "", - "test_title", - "test_description", - "test_test_command", - ] - ) + mock_collect_user_input.set_stream_messages([ + "", + "test_title", + "test_description", + "test_test_command", + ]) sampler = Sampler() sample = await sampler.create_sample() assert sample.title == "test_title" @@ -99,19 +104,25 @@ def is_sha1(string: str) -> bool: @pytest.mark.asyncio async def test_sample_command(temp_testbed, mock_collect_user_input, mock_call_llm_api): - mock_collect_user_input.set_stream_messages( - [ - "Request", - "y", - f"/sample {temp_testbed.as_posix()}", - "", - "test_url", - "test_title", - "test_description", - "test_test_command", - "q", - ] + mentat.user_session.set( + "sampler_settings", + { + "repo": None, + "merge_base_target": None, + }, ) + + mock_collect_user_input.set_stream_messages([ + "Request", + "y", + f"/sample {temp_testbed.as_posix()}", + "", + "test_url", + "test_title", + "test_description", + "test_test_command", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment in both files. @@ -327,7 +338,9 @@ def get_updates_as_parsed_llm_message(cwd): async def test_sampler_integration( temp_testbed, mock_session_context, mock_call_llm_api ): - # Setup the environemnt + mentat.user_session.set("sampler_settings", {"repo": None}) + + # Setup the environment repo = Repo(temp_testbed) (temp_testbed / "test_file.py").write_text("permanent commit") repo.git.add("test_file.py") @@ -403,6 +416,7 @@ async def test_sampler_integration( # Evaluate the sample using Mentat sample_files = list(temp_testbed.glob("sample_*.json")) assert len(sample_files) == 1 + sample = Sample.load(sample_files[0]) assert sample.title == "test_title" assert sample.description == "test_description" diff --git a/tests/system_test.py b/tests/system_test.py index 353252b7f..55f1e50c7 100644 --- a/tests/system_test.py +++ b/tests/system_test.py @@ -15,14 +15,12 @@ async def test_system(mock_call_llm_api, mock_collect_user_input): with open(temp_file_name, "w") as f: f.write("# This is a temporary file.") - mock_collect_user_input.set_stream_messages( - [ - "Add changes to the file", - "i", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Add changes to the file", + "i", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will add a print statement. @@ -70,16 +68,14 @@ async def test_interactive_change_selection(mock_call_llm_api, mock_collect_user with open(temp_file_name, "w") as f: f.write("# This is a temporary file for interactive test.") - mock_collect_user_input.set_stream_messages( - [ - "Add changes to the file", - "i", - "y", - "n", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Add changes to the file", + "i", + "y", + "n", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will make three changes to the file. @@ -179,13 +175,11 @@ async def test_sub_directory( with monkeypatch.context() as m: m.chdir("scripts") file_name = "calculator.py" - mock_collect_user_input.set_stream_messages( - [ - "Add changes to the file", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Add changes to the file", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -237,11 +231,9 @@ async def test_recursive_git_repositories(temp_testbed, mock_collect_user_input) f.write("") files.append(temp_testbed / file_path) - mock_collect_user_input.set_stream_messages( - [ - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "q", + ]) session = Session(cwd=temp_testbed, paths=[Path(".")]) session.start()