From 0376cda0cf86b03dd51bb14af47c1f89c3d3a9fc Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Mon, 18 Dec 2023 08:57:06 -0500 Subject: [PATCH 01/24] Replace dev-requirements.txt and requirements.txt with poetry.lock The dev-requirements.txt and requirements.txt files have been deleted and replaced with a poetry.lock file. This switch to using Poetry for dependency management will simplify dependencies' declaration and lock them for consistent installs across different environments. This update includes moving all previously listed dependencies to the newly generated Poetry lock file. --- dev-requirements.txt | 8 - poetry.lock | 1824 ++++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 48 ++ requirements.txt | 24 - 4 files changed, 1872 insertions(+), 32 deletions(-) delete mode 100644 dev-requirements.txt create mode 100644 poetry.lock delete mode 100644 requirements.txt diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index bbbe1f042..000000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -aiomultiprocess==0.9.0 -black==23.9.1 -gitpython==3.1.37 -isort==5.12.0 -pip-licenses==4.3.3 -pyright==1.1.339 -pytest-xdist==3.3.1 -ruff==0.0.292 diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 000000000..618c44749 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,1824 @@ +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. + +[[package]] +name = "aiomultiprocess" +version = "0.9.0" +description = "AsyncIO version of the standard multiprocessing module" +optional = false +python-versions = ">=3.6" +files = [ + {file = "aiomultiprocess-0.9.0-py3-none-any.whl", hash = "sha256:3036c4c881cfbc63674686e036097f22309017c6bf96b04722a542ac9cac7423"}, + {file = "aiomultiprocess-0.9.0.tar.gz", hash = "sha256:07e7d5657697678d9d2825d4732dfd7655139762dee665167380797c02c68848"}, +] + +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[[package]] +name = "anyio" +version = "4.2.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, + {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "black" +version = "23.12.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-23.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67f19562d367468ab59bd6c36a72b2c84bc2f16b59788690e02bbcb140a77175"}, + {file = "black-23.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bbd75d9f28a7283b7426160ca21c5bd640ca7cd8ef6630b4754b6df9e2da8462"}, + {file = "black-23.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:593596f699ca2dcbbbdfa59fcda7d8ad6604370c10228223cd6cf6ce1ce7ed7e"}, + {file = "black-23.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:12d5f10cce8dc27202e9a252acd1c9a426c83f95496c959406c96b785a92bb7d"}, + {file = "black-23.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e73c5e3d37e5a3513d16b33305713237a234396ae56769b839d7c40759b8a41c"}, + {file = "black-23.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ba09cae1657c4f8a8c9ff6cfd4a6baaf915bb4ef7d03acffe6a2f6585fa1bd01"}, + {file = "black-23.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace64c1a349c162d6da3cef91e3b0e78c4fc596ffde9413efa0525456148873d"}, + {file = "black-23.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:72db37a2266b16d256b3ea88b9affcdd5c41a74db551ec3dd4609a59c17d25bf"}, + {file = "black-23.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fdf6f23c83078a6c8da2442f4d4eeb19c28ac2a6416da7671b72f0295c4a697b"}, + {file = "black-23.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39dda060b9b395a6b7bf9c5db28ac87b3c3f48d4fdff470fa8a94ab8271da47e"}, + {file = "black-23.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7231670266ca5191a76cb838185d9be59cfa4f5dd401b7c1c70b993c58f6b1b5"}, + {file = "black-23.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:193946e634e80bfb3aec41830f5d7431f8dd5b20d11d89be14b84a97c6b8bc75"}, + {file = "black-23.12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcf91b01ddd91a2fed9a8006d7baa94ccefe7e518556470cf40213bd3d44bbbc"}, + {file = "black-23.12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:996650a89fe5892714ea4ea87bc45e41a59a1e01675c42c433a35b490e5aa3f0"}, + {file = "black-23.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdbff34c487239a63d86db0c9385b27cdd68b1bfa4e706aa74bb94a435403672"}, + {file = "black-23.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:97af22278043a6a1272daca10a6f4d36c04dfa77e61cbaaf4482e08f3640e9f0"}, + {file = "black-23.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ead25c273adfad1095a8ad32afdb8304933efba56e3c1d31b0fee4143a1e424a"}, + {file = "black-23.12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c71048345bdbced456cddf1622832276d98a710196b842407840ae8055ade6ee"}, + {file = "black-23.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a832b6e00eef2c13b3239d514ea3b7d5cc3eaa03d0474eedcbbda59441ba5d"}, + {file = "black-23.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:6a82a711d13e61840fb11a6dfecc7287f2424f1ca34765e70c909a35ffa7fb95"}, + {file = "black-23.12.0-py3-none-any.whl", hash = "sha256:a7c07db8200b5315dc07e331dda4d889a56f6bf4db6a9c2a526fa3166a81614f"}, + {file = "black-23.12.0.tar.gz", hash = "sha256:330a327b422aca0634ecd115985c1c7fd7bdb5b5a2ef8aa9888a82e2ebe9437a"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "certifi" +version = "2023.11.17" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, + {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "execnet" +version = "2.0.2" +description = "execnet: rapid multi-Python deployment" +optional = false +python-versions = ">=3.7" +files = [ + {file = "execnet-2.0.2-py3-none-any.whl", hash = "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41"}, + {file = "execnet-2.0.2.tar.gz", hash = "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"}, +] + +[package.extras] +testing = ["hatch", "pre-commit", "pytest", "tox"] + +[[package]] +name = "fire" +version = "0.5.0" +description = "A library for automatically generating command line interfaces." +optional = false +python-versions = "*" +files = [ + {file = "fire-0.5.0.tar.gz", hash = "sha256:a6b0d49e98c8963910021f92bba66f65ab440da2982b78eb1bbf95a0a34aacc6"}, +] + +[package.dependencies] +six = "*" +termcolor = "*" + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.40" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"}, + {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.2" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, + {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.23.0)"] + +[[package]] +name = "httpx" +version = "0.25.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, + {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.6" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jsonschema" +version = "4.20.0" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema-4.20.0-py3-none-any.whl", hash = "sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3"}, + {file = "jsonschema-4.20.0.tar.gz", hash = "sha256:4f614fd46d8d61258610998997743ec5492a648b33cf478c1ddc23ed4598a5fa"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +jsonschema-specifications = ">=2023.03.6" +referencing = ">=0.28.4" +rpds-py = ">=0.7.1" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jsonschema-specifications" +version = "2023.11.2" +description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonschema_specifications-2023.11.2-py3-none-any.whl", hash = "sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93"}, + {file = "jsonschema_specifications-2023.11.2.tar.gz", hash = "sha256:9472fc4fea474cd74bea4a2b190daeccb5a9e4db2ea80efcf7a1b582fc9a81b8"}, +] + +[package.dependencies] +referencing = ">=0.31.0" + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, + {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.26.2" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"}, + {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"}, + {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"}, + {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"}, + {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"}, + {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"}, + {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"}, + {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"}, + {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"}, + {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"}, + {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"}, + {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"}, + {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"}, + {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"}, + {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"}, + {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"}, + {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"}, + {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"}, + {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"}, + {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"}, + {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"}, + {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"}, + {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"}, +] + +[[package]] +name = "openai" +version = "1.5.0" +description = "The official Python library for the openai API" +optional = false +python-versions = ">=3.7.1" +files = [ + {file = "openai-1.5.0-py3-none-any.whl", hash = "sha256:42d8c84b0714c990e18afe81d37f8a64423e8196bf7157b8ea665b8d8f393253"}, + {file = "openai-1.5.0.tar.gz", hash = "sha256:4cd91e97988ccd6c44f815107def9495cbc718aeb8b28be33a87b6fa2c432508"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<5" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<3" +sniffio = "*" +tqdm = ">4" +typing-extensions = ">=4.5,<5" + +[package.extras] +datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] + +[[package]] +name = "outcome" +version = "1.3.0.post0" +description = "Capture the outcome of Python function calls." +optional = false +python-versions = ">=3.7" +files = [ + {file = "outcome-1.3.0.post0-py2.py3-none-any.whl", hash = "sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b"}, + {file = "outcome-1.3.0.post0.tar.gz", hash = "sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8"}, +] + +[package.dependencies] +attrs = ">=19.2.0" + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pillow" +version = "10.1.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, + {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, + {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, + {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, + {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, + {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, + {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, + {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, + {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, + {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, + {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, + {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, + {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, + {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, + {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, + {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, + {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, + {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, + {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, + {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, + {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, + {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, + {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, + {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "pip-licenses" +version = "4.3.3" +description = "Dump the software license list of Python packages installed with pip." +optional = false +python-versions = "~=3.8" +files = [ + {file = "pip-licenses-4.3.3.tar.gz", hash = "sha256:d14447094135eb5e43e4d9e1e3bcdb17a05751a9199df2d07f043a542c241c7a"}, + {file = "pip_licenses-4.3.3-py3-none-any.whl", hash = "sha256:1b697cace3149d7d380307bb1f1e0505f0db98f25fada64d32b7e6240f37f72c"}, +] + +[package.dependencies] +prettytable = ">=2.3.0" + +[package.extras] +test = ["docutils", "mypy", "pytest-cov", "pytest-pycodestyle", "pytest-runner"] + +[[package]] +name = "platformdirs" +version = "4.1.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "prettytable" +version = "3.9.0" +description = "A simple Python library for easily displaying tabular data in a visually appealing ASCII table format" +optional = false +python-versions = ">=3.8" +files = [ + {file = "prettytable-3.9.0-py3-none-any.whl", hash = "sha256:a71292ab7769a5de274b146b276ce938786f56c31cf7cea88b6f3775d82fe8c8"}, + {file = "prettytable-3.9.0.tar.gz", hash = "sha256:f4ed94803c23073a90620b201965e5dc0bccf1760b7a7eaf3158cab8aaffdf34"}, +] + +[package.dependencies] +wcwidth = "*" + +[package.extras] +tests = ["pytest", "pytest-cov", "pytest-lazy-fixture"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.43" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "2.5.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-2.5.2-py3-none-any.whl", hash = "sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0"}, + {file = "pydantic-2.5.2.tar.gz", hash = "sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.14.5" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.14.5" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic_core-2.14.5-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd"}, + {file = "pydantic_core-2.14.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113"}, + {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66"}, + {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997"}, + {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093"}, + {file = "pydantic_core-2.14.5-cp310-none-win32.whl", hash = "sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720"}, + {file = "pydantic_core-2.14.5-cp310-none-win_amd64.whl", hash = "sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b"}, + {file = "pydantic_core-2.14.5-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459"}, + {file = "pydantic_core-2.14.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6"}, + {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4"}, + {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada"}, + {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda"}, + {file = "pydantic_core-2.14.5-cp311-none-win32.whl", hash = "sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651"}, + {file = "pydantic_core-2.14.5-cp311-none-win_amd64.whl", hash = "sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077"}, + {file = "pydantic_core-2.14.5-cp311-none-win_arm64.whl", hash = "sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf"}, + {file = "pydantic_core-2.14.5-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093"}, + {file = "pydantic_core-2.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc"}, + {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e"}, + {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69"}, + {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d"}, + {file = "pydantic_core-2.14.5-cp312-none-win32.whl", hash = "sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260"}, + {file = "pydantic_core-2.14.5-cp312-none-win_amd64.whl", hash = "sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36"}, + {file = "pydantic_core-2.14.5-cp312-none-win_arm64.whl", hash = "sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4"}, + {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325"}, + {file = "pydantic_core-2.14.5-cp37-none-win32.whl", hash = "sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405"}, + {file = "pydantic_core-2.14.5-cp37-none-win_amd64.whl", hash = "sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588"}, + {file = "pydantic_core-2.14.5-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf"}, + {file = "pydantic_core-2.14.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331"}, + {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b"}, + {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec"}, + {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124"}, + {file = "pydantic_core-2.14.5-cp38-none-win32.whl", hash = "sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867"}, + {file = "pydantic_core-2.14.5-cp38-none-win_amd64.whl", hash = "sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d"}, + {file = "pydantic_core-2.14.5-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7"}, + {file = "pydantic_core-2.14.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db"}, + {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955"}, + {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5"}, + {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209"}, + {file = "pydantic_core-2.14.5-cp39-none-win32.whl", hash = "sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6"}, + {file = "pydantic_core-2.14.5-cp39-none-win_amd64.whl", hash = "sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7"}, + {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634"}, + {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8"}, + {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe"}, + {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3"}, + {file = "pydantic_core-2.14.5.tar.gz", hash = "sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pyright" +version = "1.1.341" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.341-py3-none-any.whl", hash = "sha256:f5800daf9d5780ebf6c6e04064a6d20da99c0ef16efd77526f83cc8d8551ff9f"}, + {file = "pyright-1.1.341.tar.gz", hash = "sha256:b891721f3abd10635cc4fd3076bcff5b7676567dc3a629997ed59a0d30034a87"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" + +[package.extras] +all = ["twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] + +[[package]] +name = "pysocks" +version = "1.7.1" +description = "A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "PySocks-1.7.1-py27-none-any.whl", hash = "sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299"}, + {file = "PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5"}, + {file = "PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0"}, +] + +[[package]] +name = "pytest" +version = "7.4.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, + {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.21.1" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"}, + {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] + +[[package]] +name = "pytest-mock" +version = "3.12.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-reportlog" +version = "0.4.0" +description = "Replacement for the --resultlog option, focused in simplicity and extensibility" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-reportlog-0.4.0.tar.gz", hash = "sha256:c9f2079504ee51f776d3118dcf5e4730f163d3dcf26ebc8f600c1fa307bf638c"}, + {file = "pytest_reportlog-0.4.0-py3-none-any.whl", hash = "sha256:5db4d00586546d8c6b95c66466629f1e913440c36d97795a673d2e19c5cedd5c"}, +] + +[package.dependencies] +pytest = "*" + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +name = "pytest-xdist" +version = "3.5.0" +description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"}, + {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"}, +] + +[package.dependencies] +execnet = ">=1.1" +pytest = ">=6.2.0" + +[package.extras] +psutil = ["psutil (>=3.0)"] +setproctitle = ["setproctitle"] +testing = ["filelock"] + +[[package]] +name = "python-dotenv" +version = "1.0.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, + {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "referencing" +version = "0.32.0" +description = "JSON Referencing + Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "referencing-0.32.0-py3-none-any.whl", hash = "sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99"}, + {file = "referencing-0.32.0.tar.gz", hash = "sha256:689e64fe121843dcfd57b71933318ef1f91188ffb45367332700a86ac8fd6161"}, +] + +[package.dependencies] +attrs = ">=22.2.0" +rpds-py = ">=0.7.0" + +[[package]] +name = "regex" +version = "2023.10.3" +description = "Alternative regular expression module, to replace re." +optional = false +python-versions = ">=3.7" +files = [ + {file = "regex-2023.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4c34d4f73ea738223a094d8e0ffd6d2c1a1b4c175da34d6b0de3d8d69bee6bcc"}, + {file = "regex-2023.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8f4e49fc3ce020f65411432183e6775f24e02dff617281094ba6ab079ef0915"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cd1bccf99d3ef1ab6ba835308ad85be040e6a11b0977ef7ea8c8005f01a3c29"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81dce2ddc9f6e8f543d94b05d56e70d03a0774d32f6cca53e978dc01e4fc75b8"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c6b4d23c04831e3ab61717a707a5d763b300213db49ca680edf8bf13ab5d91b"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c15ad0aee158a15e17e0495e1e18741573d04eb6da06d8b84af726cfc1ed02ee"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6239d4e2e0b52c8bd38c51b760cd870069f0bdf99700a62cd509d7a031749a55"}, + {file = "regex-2023.10.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4a8bf76e3182797c6b1afa5b822d1d5802ff30284abe4599e1247be4fd6b03be"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9c727bbcf0065cbb20f39d2b4f932f8fa1631c3e01fcedc979bd4f51fe051c5"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3ccf2716add72f80714b9a63899b67fa711b654be3fcdd34fa391d2d274ce767"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:107ac60d1bfdc3edb53be75e2a52aff7481b92817cfdddd9b4519ccf0e54a6ff"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:00ba3c9818e33f1fa974693fb55d24cdc8ebafcb2e4207680669d8f8d7cca79a"}, + {file = "regex-2023.10.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f0a47efb1dbef13af9c9a54a94a0b814902e547b7f21acb29434504d18f36e3a"}, + {file = "regex-2023.10.3-cp310-cp310-win32.whl", hash = "sha256:36362386b813fa6c9146da6149a001b7bd063dabc4d49522a1f7aa65b725c7ec"}, + {file = "regex-2023.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:c65a3b5330b54103e7d21cac3f6bf3900d46f6d50138d73343d9e5b2900b2353"}, + {file = "regex-2023.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:90a79bce019c442604662d17bf69df99090e24cdc6ad95b18b6725c2988a490e"}, + {file = "regex-2023.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c7964c2183c3e6cce3f497e3a9f49d182e969f2dc3aeeadfa18945ff7bdd7051"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef80829117a8061f974b2fda8ec799717242353bff55f8a29411794d635d964"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5addc9d0209a9afca5fc070f93b726bf7003bd63a427f65ef797a931782e7edc"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c148bec483cc4b421562b4bcedb8e28a3b84fcc8f0aa4418e10898f3c2c0eb9b"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d1f21af4c1539051049796a0f50aa342f9a27cde57318f2fc41ed50b0dbc4ac"}, + {file = "regex-2023.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b9ac09853b2a3e0d0082104036579809679e7715671cfbf89d83c1cb2a30f58"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ebedc192abbc7fd13c5ee800e83a6df252bec691eb2c4bedc9f8b2e2903f5e2a"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d8a993c0a0ffd5f2d3bda23d0cd75e7086736f8f8268de8a82fbc4bd0ac6791e"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:be6b7b8d42d3090b6c80793524fa66c57ad7ee3fe9722b258aec6d0672543fd0"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4023e2efc35a30e66e938de5aef42b520c20e7eda7bb5fb12c35e5d09a4c43f6"}, + {file = "regex-2023.10.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0d47840dc05e0ba04fe2e26f15126de7c755496d5a8aae4a08bda4dd8d646c54"}, + {file = "regex-2023.10.3-cp311-cp311-win32.whl", hash = "sha256:9145f092b5d1977ec8c0ab46e7b3381b2fd069957b9862a43bd383e5c01d18c2"}, + {file = "regex-2023.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:b6104f9a46bd8743e4f738afef69b153c4b8b592d35ae46db07fc28ae3d5fb7c"}, + {file = "regex-2023.10.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bff507ae210371d4b1fe316d03433ac099f184d570a1a611e541923f78f05037"}, + {file = "regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be5e22bbb67924dea15039c3282fa4cc6cdfbe0cbbd1c0515f9223186fc2ec5f"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a992f702c9be9c72fa46f01ca6e18d131906a7180950958f766c2aa294d4b41"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7434a61b158be563c1362d9071358f8ab91b8d928728cd2882af060481244c9e"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2169b2dcabf4e608416f7f9468737583ce5f0a6e8677c4efbf795ce81109d7c"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9e908ef5889cda4de038892b9accc36d33d72fb3e12c747e2799a0e806ec841"}, + {file = "regex-2023.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12bd4bc2c632742c7ce20db48e0d99afdc05e03f0b4c1af90542e05b809a03d9"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bc72c231f5449d86d6c7d9cc7cd819b6eb30134bb770b8cfdc0765e48ef9c420"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bce8814b076f0ce5766dc87d5a056b0e9437b8e0cd351b9a6c4e1134a7dfbda9"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:ba7cd6dc4d585ea544c1412019921570ebd8a597fabf475acc4528210d7c4a6f"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b0c7d2f698e83f15228ba41c135501cfe7d5740181d5903e250e47f617eb4292"}, + {file = "regex-2023.10.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5a8f91c64f390ecee09ff793319f30a0f32492e99f5dc1c72bc361f23ccd0a9a"}, + {file = "regex-2023.10.3-cp312-cp312-win32.whl", hash = "sha256:ad08a69728ff3c79866d729b095872afe1e0557251da4abb2c5faff15a91d19a"}, + {file = "regex-2023.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:39cdf8d141d6d44e8d5a12a8569d5a227f645c87df4f92179bd06e2e2705e76b"}, + {file = "regex-2023.10.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4a3ee019a9befe84fa3e917a2dd378807e423d013377a884c1970a3c2792d293"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76066d7ff61ba6bf3cb5efe2428fc82aac91802844c022d849a1f0f53820502d"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe50b61bab1b1ec260fa7cd91106fa9fece57e6beba05630afe27c71259c59b"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fd88f373cb71e6b59b7fa597e47e518282455c2734fd4306a05ca219a1991b0"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3ab05a182c7937fb374f7e946f04fb23a0c0699c0450e9fb02ef567412d2fa3"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dac37cf08fcf2094159922edc7a2784cfcc5c70f8354469f79ed085f0328ebdf"}, + {file = "regex-2023.10.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:e54ddd0bb8fb626aa1f9ba7b36629564544954fff9669b15da3610c22b9a0991"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3367007ad1951fde612bf65b0dffc8fd681a4ab98ac86957d16491400d661302"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:16f8740eb6dbacc7113e3097b0a36065a02e37b47c936b551805d40340fb9971"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:f4f2ca6df64cbdd27f27b34f35adb640b5d2d77264228554e68deda54456eb11"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:39807cbcbe406efca2a233884e169d056c35aa7e9f343d4e78665246a332f597"}, + {file = "regex-2023.10.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7eece6fbd3eae4a92d7c748ae825cbc1ee41a89bb1c3db05b5578ed3cfcfd7cb"}, + {file = "regex-2023.10.3-cp37-cp37m-win32.whl", hash = "sha256:ce615c92d90df8373d9e13acddd154152645c0dc060871abf6bd43809673d20a"}, + {file = "regex-2023.10.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0f649fa32fe734c4abdfd4edbb8381c74abf5f34bc0b3271ce687b23729299ed"}, + {file = "regex-2023.10.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9b98b7681a9437262947f41c7fac567c7e1f6eddd94b0483596d320092004533"}, + {file = "regex-2023.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:91dc1d531f80c862441d7b66c4505cd6ea9d312f01fb2f4654f40c6fdf5cc37a"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82fcc1f1cc3ff1ab8a57ba619b149b907072e750815c5ba63e7aa2e1163384a4"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7979b834ec7a33aafae34a90aad9f914c41fd6eaa8474e66953f3f6f7cbd4368"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef71561f82a89af6cfcbee47f0fabfdb6e63788a9258e913955d89fdd96902ab"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd829712de97753367153ed84f2de752b86cd1f7a88b55a3a775eb52eafe8a94"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00e871d83a45eee2f8688d7e6849609c2ca2a04a6d48fba3dff4deef35d14f07"}, + {file = "regex-2023.10.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:706e7b739fdd17cb89e1fbf712d9dc21311fc2333f6d435eac2d4ee81985098c"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cc3f1c053b73f20c7ad88b0d1d23be7e7b3901229ce89f5000a8399746a6e039"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6f85739e80d13644b981a88f529d79c5bdf646b460ba190bffcaf6d57b2a9863"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:741ba2f511cc9626b7561a440f87d658aabb3d6b744a86a3c025f866b4d19e7f"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e77c90ab5997e85901da85131fd36acd0ed2221368199b65f0d11bca44549711"}, + {file = "regex-2023.10.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:979c24cbefaf2420c4e377ecd1f165ea08cc3d1fbb44bdc51bccbbf7c66a2cb4"}, + {file = "regex-2023.10.3-cp38-cp38-win32.whl", hash = "sha256:58837f9d221744d4c92d2cf7201c6acd19623b50c643b56992cbd2b745485d3d"}, + {file = "regex-2023.10.3-cp38-cp38-win_amd64.whl", hash = "sha256:c55853684fe08d4897c37dfc5faeff70607a5f1806c8be148f1695be4a63414b"}, + {file = "regex-2023.10.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2c54e23836650bdf2c18222c87f6f840d4943944146ca479858404fedeb9f9af"}, + {file = "regex-2023.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:69c0771ca5653c7d4b65203cbfc5e66db9375f1078689459fe196fe08b7b4930"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ac965a998e1388e6ff2e9781f499ad1eaa41e962a40d11c7823c9952c77123e"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c0e8fae5b27caa34177bdfa5a960c46ff2f78ee2d45c6db15ae3f64ecadde14"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c56c3d47da04f921b73ff9415fbaa939f684d47293f071aa9cbb13c94afc17d"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ef1e014eed78ab650bef9a6a9cbe50b052c0aebe553fb2881e0453717573f52"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d29338556a59423d9ff7b6eb0cb89ead2b0875e08fe522f3e068b955c3e7b59b"}, + {file = "regex-2023.10.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9c6d0ced3c06d0f183b73d3c5920727268d2201aa0fe6d55c60d68c792ff3588"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:994645a46c6a740ee8ce8df7911d4aee458d9b1bc5639bc968226763d07f00fa"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:66e2fe786ef28da2b28e222c89502b2af984858091675044d93cb50e6f46d7af"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:11175910f62b2b8c055f2b089e0fedd694fe2be3941b3e2633653bc51064c528"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:06e9abc0e4c9ab4779c74ad99c3fc10d3967d03114449acc2c2762ad4472b8ca"}, + {file = "regex-2023.10.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fb02e4257376ae25c6dd95a5aec377f9b18c09be6ebdefa7ad209b9137b73d48"}, + {file = "regex-2023.10.3-cp39-cp39-win32.whl", hash = "sha256:3b2c3502603fab52d7619b882c25a6850b766ebd1b18de3df23b2f939360e1bd"}, + {file = "regex-2023.10.3-cp39-cp39-win_amd64.whl", hash = "sha256:adbccd17dcaff65704c856bd29951c58a1bd4b2b0f8ad6b826dbd543fe740988"}, + {file = "regex-2023.10.3.tar.gz", hash = "sha256:3fef4f844d2290ee0ba57addcec17eec9e3df73f10a2748485dfd6a3a188cc0f"}, +] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rpds-py" +version = "0.15.2" +description = "Python bindings to Rust's persistent data structures (rpds)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "rpds_py-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:337a8653fb11d2fbe7157c961cc78cb3c161d98cf44410ace9a3dc2db4fad882"}, + {file = "rpds_py-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:813a65f95bfcb7c8f2a70dd6add9b51e9accc3bdb3e03d0ff7a9e6a2d3e174bf"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:082e0e55d73690ffb4da4352d1b5bbe1b5c6034eb9dc8c91aa2a3ee15f70d3e2"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5595c80dd03d7e6c6afb73f3594bf3379a7d79fa57164b591d012d4b71d6ac4c"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb10bb720348fe1647a94eb605accb9ef6a9b1875d8845f9e763d9d71a706387"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:53304cc14b1d94487d70086e1cb0cb4c29ec6da994d58ae84a4d7e78c6a6d04d"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d64a657de7aae8db2da60dc0c9e4638a0c3893b4d60101fd564a3362b2bfeb34"}, + {file = "rpds_py-0.15.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ee40206d1d6e95eaa2b7b919195e3689a5cf6ded730632de7f187f35a1b6052c"}, + {file = "rpds_py-0.15.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1607cda6129f815493a3c184492acb5ae4aa6ed61d3a1b3663aa9824ed26f7ac"}, + {file = "rpds_py-0.15.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f3e6e2e502c4043c52a99316d89dc49f416acda5b0c6886e0dd8ea7bb35859e8"}, + {file = "rpds_py-0.15.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:044f6f46d62444800402851afa3c3ae50141f12013060c1a3a0677e013310d6d"}, + {file = "rpds_py-0.15.2-cp310-none-win32.whl", hash = "sha256:c827a931c6b57f50f1bb5de400dcfb00bad8117e3753e80b96adb72d9d811514"}, + {file = "rpds_py-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3bbc89ce2a219662ea142f0abcf8d43f04a41d5b1880be17a794c39f0d609cb0"}, + {file = "rpds_py-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:1fd0f0b1ccd7d537b858a56355a250108df692102e08aa2036e1a094fd78b2dc"}, + {file = "rpds_py-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b414ef79f1f06fb90b5165db8aef77512c1a5e3ed1b4807da8476b7e2c853283"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c31272c674f725dfe0f343d73b0abe8c878c646967ec1c6106122faae1efc15b"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a6945c2d61c42bb7e818677f43638675b8c1c43e858b67a96df3eb2426a86c9d"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02744236ac1895d7be837878e707a5c35fb8edc5137602f253b63623d7ad5c8c"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2181e86d4e1cdf49a7320cb72a36c45efcb7670d0a88f09fd2d3a7967c0540fd"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a8ff8e809da81363bffca2b965cb6e4bf6056b495fc3f078467d1f8266fe27f"}, + {file = "rpds_py-0.15.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:97532802f14d383f37d603a56e226909f825a83ff298dc1b6697de00d2243999"}, + {file = "rpds_py-0.15.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:13716e53627ad97babf72ac9e01cf9a7d4af2f75dd5ed7b323a7a9520e948282"}, + {file = "rpds_py-0.15.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2f1f295a5c28cfa74a7d48c95acc1c8a7acd49d7d9072040d4b694fe11cd7166"}, + {file = "rpds_py-0.15.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8ec464f20fe803ae00419bd1610934e3bda963aeba1e6181dfc9033dc7e8940c"}, + {file = "rpds_py-0.15.2-cp311-none-win32.whl", hash = "sha256:b61d5096e75fd71018b25da50b82dd70ec39b5e15bb2134daf7eb7bbbc103644"}, + {file = "rpds_py-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:9d41ebb471a6f064c0d1c873c4f7dded733d16ca5db7d551fb04ff3805d87802"}, + {file = "rpds_py-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:13ff62d3561a23c17341b4afc78e8fcfd799ab67c0b1ca32091d71383a98ba4b"}, + {file = "rpds_py-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b70b45a40ad0798b69748b34d508259ef2bdc84fb2aad4048bc7c9cafb68ddb3"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ecbba7efd82bd2a4bb88aab7f984eb5470991c1347bdd1f35fb34ea28dba6e"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9d38494a8d21c246c535b41ecdb2d562c4b933cf3d68de03e8bc43a0d41be652"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:13152dfe7d7c27c40df8b99ac6aab12b978b546716e99f67e8a67a1d441acbc3"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:164fcee32f15d04d61568c9cb0d919e37ff3195919cd604039ff3053ada0461b"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a5122b17a4faf5d7a6d91fa67b479736c0cacc7afe791ddebb7163a8550b799"}, + {file = "rpds_py-0.15.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:46b4f3d47d1033db569173be62365fbf7808c2bd3fb742314d251f130d90d44c"}, + {file = "rpds_py-0.15.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c61e42b4ceb9759727045765e87d51c1bb9f89987aca1fcc8a040232138cad1c"}, + {file = "rpds_py-0.15.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d2aa3ca9552f83b0b4fa6ca8c6ce08da6580f37e3e0ab7afac73a1cfdc230c0e"}, + {file = "rpds_py-0.15.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ec19e823b4ccd87bd69e990879acbce9e961fc7aebe150156b8f4418d4b27b7f"}, + {file = "rpds_py-0.15.2-cp312-none-win32.whl", hash = "sha256:afeabb382c1256a7477b739820bce7fe782bb807d82927102cee73e79b41b38b"}, + {file = "rpds_py-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:422b0901878a31ef167435c5ad46560362891816a76cc0d150683f3868a6f0d1"}, + {file = "rpds_py-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:baf744e5f9d5ee6531deea443be78b36ed1cd36c65a0b95ea4e8d69fa0102268"}, + {file = "rpds_py-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7e072f5da38d6428ba1fc1115d3cc0dae895df671cb04c70c019985e8c7606be"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f138f550b83554f5b344d6be35d3ed59348510edc3cb96f75309db6e9bfe8210"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2a4cd924d0e2f4b1a68034abe4cadc73d69ad5f4cf02db6481c0d4d749f548f"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5eb05b654a41e0f81ab27a7c3e88b6590425eb3e934e1d533ecec5dc88a6ffff"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ee066a64f0d2ba45391cac15b3a70dcb549e968a117bd0500634754cfe0e5fc"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c51a899792ee2c696072791e56b2020caff58b275abecbc9ae0cb71af0645c95"}, + {file = "rpds_py-0.15.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac2ac84a4950d627d84b61f082eba61314373cfab4b3c264b62efab02ababe83"}, + {file = "rpds_py-0.15.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:62b292fff4739c6be89e6a0240c02bda5a9066a339d90ab191cf66e9fdbdc193"}, + {file = "rpds_py-0.15.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:98ee201a52a7f65608e5494518932e1473fd43535f12cade0a1b4ab32737fe28"}, + {file = "rpds_py-0.15.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3d40fb3ca22e3d40f494d577441b263026a3bd8c97ae6ce89b2d3c4b39ac9581"}, + {file = "rpds_py-0.15.2-cp38-none-win32.whl", hash = "sha256:30479a9f1fce47df56b07460b520f49fa2115ec2926d3b1303c85c81f8401ed1"}, + {file = "rpds_py-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:2df3d07a16a3bef0917b28cd564778fbb31f3ffa5b5e33584470e2d1b0f248f0"}, + {file = "rpds_py-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:56b51ba29a18e5f5810224bcf00747ad931c0716e3c09a76b4a1edd3d4aba71f"}, + {file = "rpds_py-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c11bc5814554b018f6c5d6ae0969e43766f81e995000b53a5d8c8057055e886"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2faa97212b0dc465afeedf49045cdd077f97be1188285e646a9f689cb5dfff9e"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:86c01299942b0f4b5b5f28c8701689181ad2eab852e65417172dbdd6c5b3ccc8"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd7d3608589072f63078b4063a6c536af832e76b0b3885f1bfe9e892abe6c207"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:938518a11780b39998179d07f31a4a468888123f9b00463842cd40f98191f4d3"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dccc623725d0b298f557d869a68496a2fd2a9e9c41107f234fa5f7a37d278ac"}, + {file = "rpds_py-0.15.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d46ee458452727a147d7897bb33886981ae1235775e05decae5d5d07f537695a"}, + {file = "rpds_py-0.15.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d9d7ebcd11ea76ba0feaae98485cd8e31467c3d7985210fab46983278214736b"}, + {file = "rpds_py-0.15.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8a5f574b92b3ee7d254e56d56e37ec0e1416acb1ae357c4956d76a1788dc58fb"}, + {file = "rpds_py-0.15.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3db0c998c92b909d7c90b66c965590d4f3cd86157176a6cf14aa1f867b77b889"}, + {file = "rpds_py-0.15.2-cp39-none-win32.whl", hash = "sha256:bbc7421cbd28b4316d1d017db338039a7943f945c6f2bb15e1439b14b5682d28"}, + {file = "rpds_py-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:1c24e30d720c0009b6fb2e1905b025da56103c70a8b31b99138e4ed1c2a6c5b0"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1e6fcd0a0f62f2997107f758bb372397b8d5fd5f39cc6dcb86f7cb98a2172d6c"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d800a8e2ac62db1b9ea5d6d1724f1a93c53907ca061de4d05ed94e8dfa79050c"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e09d017e3f4d9bd7d17a30d3f59e4d6d9ba2d2ced280eec2425e84112cf623f"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b88c3ab98556bc351b36d6208a6089de8c8db14a7f6e1f57f82a334bd2c18f0b"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f333bfe782a2d05a67cfaa0cc9cd68b36b39ee6acfe099f980541ed973a7093"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b629db53fe17e6ce478a969d30bd1d0e8b53238c46e3a9c9db39e8b65a9ef973"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:485fbdd23becb822804ed05622907ee5c8e8a5f43f6f43894a45f463b2217045"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:893e38d0f4319dfa70c0f36381a37cc418985c87b11d9784365b1fff4fa6973b"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8ffdeb7dbd0160d4e391e1f857477e4762d00aa2199c294eb95dfb9451aa1d9f"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:fc33267d58dfbb2361baed52668c5d8c15d24bc0372cecbb79fed77339b55e0d"}, + {file = "rpds_py-0.15.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:2e7e5633577b3bd56bf3af2ef6ae3778bbafb83743989d57f0e7edbf6c0980e4"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8b9650f92251fdef843e74fc252cdfd6e3c700157ad686eeb0c6d7fdb2d11652"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:07a2e1d78d382f7181789713cdf0c16edbad4fe14fe1d115526cb6f0eef0daa3"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03f9c5875515820633bd7709a25c3e60c1ea9ad1c5d4030ce8a8c203309c36fd"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:580182fa5b269c2981e9ce9764367cb4edc81982ce289208d4607c203f44ffde"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa1e626c524d2c7972c0f3a8a575d654a3a9c008370dc2a97e46abd0eaa749b9"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae9d83a81b09ce3a817e2cbb23aabc07f86a3abc664c613cd283ce7a03541e95"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9235be95662559141934fced8197de6fee8c58870f36756b0584424b6d708393"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a72e00826a2b032dda3eb25aa3e3579c6d6773d22d8446089a57a123481cc46c"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ab095edf1d840a6a6a4307e1a5b907a299a94e7b90e75436ee770b8c35d22a25"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b79c63d29101cbaa53a517683557bb550462394fb91044cc5998dd2acff7340"}, + {file = "rpds_py-0.15.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:911e600e798374c0d86235e7ef19109cf865d1336942d398ff313375a25a93ba"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3cd61e759c4075510052d1eca5cddbd297fe1164efec14ef1fce3f09b974dfe4"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9d2ae79f31da5143e020a8d4fc74e1f0cbcb8011bdf97453c140aa616db51406"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e99d6510c8557510c220b865d966b105464740dcbebf9b79ecd4fbab30a13d9"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c43e1b89099279cc03eb1c725c5de12af6edcd2f78e2f8a022569efa639ada3"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7187bee72384b9cfedf09a29a3b2b6e8815cc64c095cdc8b5e6aec81e9fd5f"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3423007fc0661827e06f8a185a3792c73dda41f30f3421562f210cf0c9e49569"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2974e6dff38afafd5ccf8f41cb8fc94600b3f4fd9b0a98f6ece6e2219e3158d5"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:93c18a1696a8e0388ed84b024fe1a188a26ba999b61d1d9a371318cb89885a8c"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:c7cd0841a586b7105513a7c8c3d5c276f3adc762a072d81ef7fae80632afad1e"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:709dc11af2f74ba89c68b1592368c6edcbccdb0a06ba77eb28c8fe08bb6997da"}, + {file = "rpds_py-0.15.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:fc066395e6332da1e7525d605b4c96055669f8336600bef8ac569d5226a7c76f"}, + {file = "rpds_py-0.15.2.tar.gz", hash = "sha256:373b76eeb79e8c14f6d82cb1d4d5293f9e4059baec6c1b16dca7ad13b6131b39"}, +] + +[[package]] +name = "ruff" +version = "0.0.292" +description = "An extremely fast Python linter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.0.292-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:02f29db018c9d474270c704e6c6b13b18ed0ecac82761e4fcf0faa3728430c96"}, + {file = "ruff-0.0.292-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:69654e564342f507edfa09ee6897883ca76e331d4bbc3676d8a8403838e9fade"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c3c91859a9b845c33778f11902e7b26440d64b9d5110edd4e4fa1726c41e0a4"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f4476f1243af2d8c29da5f235c13dca52177117935e1f9393f9d90f9833f69e4"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be8eb50eaf8648070b8e58ece8e69c9322d34afe367eec4210fdee9a555e4ca7"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:9889bac18a0c07018aac75ef6c1e6511d8411724d67cb879103b01758e110a81"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6bdfabd4334684a4418b99b3118793f2c13bb67bf1540a769d7816410402a205"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7c77c53bfcd75dbcd4d1f42d6cabf2485d2e1ee0678da850f08e1ab13081a8"}, + {file = "ruff-0.0.292-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e087b24d0d849c5c81516ec740bf4fd48bf363cfb104545464e0fca749b6af9"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f160b5ec26be32362d0774964e218f3fcf0a7da299f7e220ef45ae9e3e67101a"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ac153eee6dd4444501c4bb92bff866491d4bfb01ce26dd2fff7ca472c8df9ad0"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_i686.whl", hash = "sha256:87616771e72820800b8faea82edd858324b29bb99a920d6aa3d3949dd3f88fb0"}, + {file = "ruff-0.0.292-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b76deb3bdbea2ef97db286cf953488745dd6424c122d275f05836c53f62d4016"}, + {file = "ruff-0.0.292-py3-none-win32.whl", hash = "sha256:e854b05408f7a8033a027e4b1c7f9889563dd2aca545d13d06711e5c39c3d003"}, + {file = "ruff-0.0.292-py3-none-win_amd64.whl", hash = "sha256:f27282bedfd04d4c3492e5c3398360c9d86a295be00eccc63914438b4ac8a83c"}, + {file = "ruff-0.0.292-py3-none-win_arm64.whl", hash = "sha256:7f67a69c8f12fbc8daf6ae6d36705037bde315abf8b82b6e1f4c9e74eb750f68"}, + {file = "ruff-0.0.292.tar.gz", hash = "sha256:1093449e37dd1e9b813798f6ad70932b57cf614e5c2b5c51005bf67d55db33ac"}, +] + +[[package]] +name = "selenium" +version = "4.15.2" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "selenium-4.15.2-py3-none-any.whl", hash = "sha256:9e82cd1ac647fb73cf0d4a6e280284102aaa3c9d94f0fa6e6cc4b5db6a30afbf"}, + {file = "selenium-4.15.2.tar.gz", hash = "sha256:22eab5a1724c73d51b240a69ca702997b717eee4ba1f6065bf5d6b44dba01d48"}, +] + +[package.dependencies] +certifi = ">=2021.10.8" +trio = ">=0.17,<1.0" +trio-websocket = ">=0.9,<1.0" +urllib3 = {version = ">=1.26,<3", extras = ["socks"]} + +[[package]] +name = "sentry-sdk" +version = "1.34.0" +description = "Python client for Sentry (https://sentry.io)" +optional = false +python-versions = "*" +files = [ + {file = "sentry-sdk-1.34.0.tar.gz", hash = "sha256:e5d0d2b25931d88fa10986da59d941ac6037f742ab6ff2fce4143a27981d60c3"}, + {file = "sentry_sdk-1.34.0-py2.py3-none-any.whl", hash = "sha256:76dd087f38062ac6c1e30ed6feb533ee0037ff9e709974802db7b5dbf2e5db21"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} + +[package.extras] +aiohttp = ["aiohttp (>=3.5)"] +arq = ["arq (>=0.23)"] +asyncpg = ["asyncpg (>=0.23)"] +beam = ["apache-beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +chalice = ["chalice (>=1.16.0)"] +clickhouse-driver = ["clickhouse-driver (>=0.2.0)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] +grpcio = ["grpcio (>=1.21.1)"] +httpx = ["httpx (>=0.16.0)"] +huey = ["huey (>=2)"] +loguru = ["loguru (>=0.5)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] +opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] +pure-eval = ["asttokens", "executing", "pure-eval"] +pymongo = ["pymongo (>=3.1)"] +pyspark = ["pyspark (>=2.4.4)"] +quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] +tornado = ["tornado (>=5)"] + +[[package]] +name = "setuptools" +version = "69.0.2" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"}, + {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "sounddevice" +version = "0.4.6" +description = "Play and Record Sound with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sounddevice-0.4.6-py3-none-any.whl", hash = "sha256:5de768ba6fe56ad2b5aaa2eea794b76b73e427961c95acad2ee2ed7f866a4b20"}, + {file = "sounddevice-0.4.6-py3-none-macosx_10_6_x86_64.macosx_10_6_universal2.whl", hash = "sha256:8b0b806c205dd3e3cd5a97262b2482624fd21db7d47083b887090148a08051c8"}, + {file = "sounddevice-0.4.6-py3-none-win32.whl", hash = "sha256:e3ba6e674ffa8f79a591d744a1d4ab922fe5bdfd4faf8b25069a08e051010b7b"}, + {file = "sounddevice-0.4.6-py3-none-win_amd64.whl", hash = "sha256:7830d4f8f8570f2e5552942f81d96999c5fcd9a0b682d6fc5d5c5529df23be2c"}, + {file = "sounddevice-0.4.6.tar.gz", hash = "sha256:3236b78f15f0415bdf006a620cef073d0c0522851d66f4a961ed6d8eb1482fe9"}, +] + +[package.dependencies] +CFFI = ">=1.0" + +[package.extras] +numpy = ["NumPy"] + +[[package]] +name = "soundfile" +version = "0.12.1" +description = "An audio library based on libsndfile, CFFI and NumPy" +optional = false +python-versions = "*" +files = [ + {file = "soundfile-0.12.1-py2.py3-none-any.whl", hash = "sha256:828a79c2e75abab5359f780c81dccd4953c45a2c4cd4f05ba3e233ddf984b882"}, + {file = "soundfile-0.12.1-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:d922be1563ce17a69582a352a86f28ed8c9f6a8bc951df63476ffc310c064bfa"}, + {file = "soundfile-0.12.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:bceaab5c4febb11ea0554566784bcf4bc2e3977b53946dda2b12804b4fe524a8"}, + {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2dc3685bed7187c072a46ab4ffddd38cef7de9ae5eb05c03df2ad569cf4dacbc"}, + {file = "soundfile-0.12.1-py2.py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:074247b771a181859d2bc1f98b5ebf6d5153d2c397b86ee9e29ba602a8dfe2a6"}, + {file = "soundfile-0.12.1-py2.py3-none-win32.whl", hash = "sha256:59dfd88c79b48f441bbf6994142a19ab1de3b9bb7c12863402c2bc621e49091a"}, + {file = "soundfile-0.12.1-py2.py3-none-win_amd64.whl", hash = "sha256:0d86924c00b62552b650ddd28af426e3ff2d4dc2e9047dae5b3d8452e0a49a77"}, + {file = "soundfile-0.12.1.tar.gz", hash = "sha256:e8e1017b2cf1dda767aef19d2fd9ee5ebe07e050d430f77a0a7c66ba08b8cdae"}, +] + +[package.dependencies] +cffi = ">=1.0" + +[package.extras] +numpy = ["numpy"] + +[[package]] +name = "termcolor" +version = "2.3.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.7" +files = [ + {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"}, + {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "tiktoken" +version = "0.4.0" +description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tiktoken-0.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:176cad7f053d2cc82ce7e2a7c883ccc6971840a4b5276740d0b732a2b2011f8a"}, + {file = "tiktoken-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:450d504892b3ac80207700266ee87c932df8efea54e05cefe8613edc963c1285"}, + {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d662de1e7986d129139faf15e6a6ee7665ee103440769b8dedf3e7ba6ac37f"}, + {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5727d852ead18b7927b8adf558a6f913a15c7766725b23dbe21d22e243041b28"}, + {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c06cd92b09eb0404cedce3702fa866bf0d00e399439dad3f10288ddc31045422"}, + {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9ec161e40ed44e4210d3b31e2ff426b4a55e8254f1023e5d2595cb60044f8ea6"}, + {file = "tiktoken-0.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:1e8fa13cf9889d2c928b9e258e9dbbbf88ab02016e4236aae76e3b4f82dd8288"}, + {file = "tiktoken-0.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb2341836b725c60d0ab3c84970b9b5f68d4b733a7bcb80fb25967e5addb9920"}, + {file = "tiktoken-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ca30367ad750ee7d42fe80079d3092bd35bb266be7882b79c3bd159b39a17b0"}, + {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dc3df19ddec79435bb2a94ee46f4b9560d0299c23520803d851008445671197"}, + {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d980fa066e962ef0f4dad0222e63a484c0c993c7a47c7dafda844ca5aded1f3"}, + {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:329f548a821a2f339adc9fbcfd9fc12602e4b3f8598df5593cfc09839e9ae5e4"}, + {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b1a038cee487931a5caaef0a2e8520e645508cde21717eacc9af3fbda097d8bb"}, + {file = "tiktoken-0.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:08efa59468dbe23ed038c28893e2a7158d8c211c3dd07f2bbc9a30e012512f1d"}, + {file = "tiktoken-0.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3020350685e009053829c1168703c346fb32c70c57d828ca3742558e94827a9"}, + {file = "tiktoken-0.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba16698c42aad8190e746cd82f6a06769ac7edd415d62ba027ea1d99d958ed93"}, + {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c15d9955cc18d0d7ffcc9c03dc51167aedae98542238b54a2e659bd25fe77ed"}, + {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64e1091c7103100d5e2c6ea706f0ec9cd6dc313e6fe7775ef777f40d8c20811e"}, + {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e87751b54eb7bca580126353a9cf17a8a8eaadd44edaac0e01123e1513a33281"}, + {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e063b988b8ba8b66d6cc2026d937557437e79258095f52eaecfafb18a0a10c03"}, + {file = "tiktoken-0.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9c6dd439e878172dc163fced3bc7b19b9ab549c271b257599f55afc3a6a5edef"}, + {file = "tiktoken-0.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8d1d97f83697ff44466c6bef5d35b6bcdb51e0125829a9c0ed1e6e39fb9a08fb"}, + {file = "tiktoken-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b6bce7c68aa765f666474c7c11a7aebda3816b58ecafb209afa59c799b0dd2d"}, + {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a73286c35899ca51d8d764bc0b4d60838627ce193acb60cc88aea60bddec4fd"}, + {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0394967d2236a60fd0aacef26646b53636423cc9c70c32f7c5124ebe86f3093"}, + {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:dae2af6f03ecba5f679449fa66ed96585b2fa6accb7fd57d9649e9e398a94f44"}, + {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:55e251b1da3c293432179cf7c452cfa35562da286786be5a8b1ee3405c2b0dd2"}, + {file = "tiktoken-0.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:c835d0ee1f84a5aa04921717754eadbc0f0a56cf613f78dfc1cf9ad35f6c3fea"}, + {file = "tiktoken-0.4.0.tar.gz", hash = "sha256:59b20a819969735b48161ced9b92f05dc4519c17be4015cfb73b65270a243620"}, +] + +[package.dependencies] +regex = ">=2022.1.18" +requests = ">=2.26.0" + +[package.extras] +blobfile = ["blobfile (>=2)"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tqdm" +version = "4.66.1" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, + {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "trio" +version = "0.23.2" +description = "A friendly Python library for async concurrency and I/O" +optional = false +python-versions = ">=3.8" +files = [ + {file = "trio-0.23.2-py3-none-any.whl", hash = "sha256:5a0b566fa5d50cf231cfd6b08f3b03aa4179ff004b8f3144059587039e2b26d3"}, + {file = "trio-0.23.2.tar.gz", hash = "sha256:da1d35b9a2b17eb32cae2e763b16551f9aa6703634735024e32f325c9285069e"}, +] + +[package.dependencies] +attrs = ">=20.1.0" +cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""} +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = "*" +outcome = "*" +sniffio = ">=1.3.0" +sortedcontainers = "*" + +[[package]] +name = "trio-websocket" +version = "0.11.1" +description = "WebSocket library for Trio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "trio-websocket-0.11.1.tar.gz", hash = "sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f"}, + {file = "trio_websocket-0.11.1-py3-none-any.whl", hash = "sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638"}, +] + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +trio = ">=0.11" +wsproto = ">=0.14" + +[[package]] +name = "typing-extensions" +version = "4.8.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, + {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, +] + +[[package]] +name = "urllib3" +version = "2.1.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, + {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, +] + +[package.dependencies] +pysocks = {version = ">=1.5.6,<1.5.7 || >1.5.7,<2.0", optional = true, markers = "extra == \"socks\""} + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wcwidth" +version = "0.2.12" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.12-py2.py3-none-any.whl", hash = "sha256:f26ec43d96c8cbfed76a5075dac87680124fa84e0855195a6184da9c187f133c"}, + {file = "wcwidth-0.2.12.tar.gz", hash = "sha256:f01c104efdf57971bcb756f054dd58ddec5204dd15fa31d6503ea57947d97c02"}, +] + +[[package]] +name = "webdriver-manager" +version = "4.0.1" +description = "Library provides the way to automatically manage drivers for different browsers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "webdriver_manager-4.0.1-py2.py3-none-any.whl", hash = "sha256:d7970052295bb9cda2c1a24cf0b872dd2c41ababcc78f7b6b8dc37a41e979a7e"}, + {file = "webdriver_manager-4.0.1.tar.gz", hash = "sha256:25ec177c6a2ce9c02fb8046f1b2732701a9418d6a977967bb065d840a3175d87"}, +] + +[package.dependencies] +packaging = "*" +python-dotenv = "*" +requests = "*" + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + +[metadata] +lock-version = "2.0" +python-versions = "^3.10" +content-hash = "fad963a8412c0f9fc089a4852f2b7d0fc711bf096e10980cde6aa795bc5914c2" diff --git a/pyproject.toml b/pyproject.toml index d5628225a..7fd0ce20c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,47 @@ +[tool.poetry] +name = "mentat" +version = "0.1.0" +description = "" +authors = ["@bio_bootloader"] +readme = "README.md" + +[tool.poetry.dependencies] +python = "^3.10" +attrs = "^23.1.0" +backoff = "^2.2.1" +fire = "^0.5.0" +jinja2 = "^3.1.2" +jsonschema = ">=4.17.0" +numpy = "^1.26.0" +openai = "^1.3.0" +pillow = "^10.1.0" +prompt-toolkit = "^3.0.39" +Pygments = "^2.15.1" +pytest = "^7.4.0" +pytest-asyncio = "^0.21.1" +pytest-mock = "^3.11.1" +pytest-reportlog = "^0.4.0" +python-dotenv = "^1.0.0" +selenium = "4.15.2" +sentry-sdk = "1.34.0" +sounddevice = "0.4.6" +soundfile = "0.12.1" +termcolor = "2.3.0" +tiktoken = "0.4.0" +typing_extensions = "4.8.0" +tqdm = "4.66.1" +webdriver_manager = "4.0.1" + +[tool.poetry.group.dev.dependencies] +aiomultiprocess = "^0.9.0" +black = "^23.9.1" +gitpython = "^3.1.37" +isort = "^5.12.0" +pip-licenses = "^4.3.3" +pyright = "^1.1.339" +pytest-xdist = "^3.3.1" +ruff = "^0.0.292" + [tool.isort] profile = "black" known_first_party = "mentat" @@ -12,3 +56,7 @@ addopts = "--ignore=vscode/bundled --ignore=benchmark_repos" [tool.black] preview = "true" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index c05da428b..000000000 --- a/requirements.txt +++ /dev/null @@ -1,24 +0,0 @@ -attrs==23.1.0 -backoff==2.2.1 -fire==0.5.0 -jinja2==3.1.2 -jsonschema>=4.17.0 -numpy==1.26.0 -openai==1.3.0 -pillow==10.1.0 -prompt-toolkit==3.0.39 -Pygments==2.15.1 -pytest==7.4.0 -pytest-asyncio==0.21.1 -pytest-mock==3.11.1 -pytest-reportlog==0.4.0 -python-dotenv==1.0.0 -selenium==4.15.2 -sentry-sdk==1.34.0 -sounddevice==0.4.6 -soundfile==0.12.1 -termcolor==2.3.0 -tiktoken==0.4.0 -typing_extensions==4.8.0 -tqdm==4.66.1 -webdriver_manager==4.0.1 From 61ab014a8b08243db08124f5dd85c9553c3ea161 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Tue, 19 Dec 2023 07:49:12 -0500 Subject: [PATCH 02/24] Migrate packaging from setuptools to poetry The setup.py file has been removed as part of this migration, with all necessary information and dependencies now defined in the updated pyproject.toml file. This change enables more modern, standardized Python packaging and dependency management. --- pyproject.toml | 5 ++++- setup.py | 43 ------------------------------------------- 2 files changed, 4 insertions(+), 44 deletions(-) delete mode 100644 setup.py diff --git a/pyproject.toml b/pyproject.toml index 7fd0ce20c..4d1ba67b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,9 +2,12 @@ name = "mentat" version = "0.1.0" description = "" -authors = ["@bio_bootloader"] +authors = ["bio_bootloader "] readme = "README.md" +[tool.poetry.scripts] +mentat = 'mentat.terminal.client:run_cli' + [tool.poetry.dependencies] python = "^3.10" attrs = "^23.1.0" diff --git a/setup.py b/setup.py deleted file mode 100644 index 19dfe6fcf..000000000 --- a/setup.py +++ /dev/null @@ -1,43 +0,0 @@ -import os -from pathlib import Path - -import pkg_resources -from setuptools import find_packages, setup - -from mentat import __version__ - -readme_path = os.path.join(Path(__file__).parent, "README.md") -with open(readme_path, "r", encoding="utf-8") as f: - long_description = f.read() - - -setup( - name="mentat", - version=__version__, - python_requires=">=3.10", - packages=find_packages(include=["mentat", "mentat.*"]), - install_requires=[ - str(r) - for r in pkg_resources.parse_requirements( - open(os.path.join(os.path.dirname(__file__), "requirements.txt")) - ) - ], - entry_points={ - "console_scripts": [ - "mentat=mentat.terminal.client:run_cli", - ], - }, - description="AI coding assistant on your command line", - long_description=long_description, - long_description_content_type="text/markdown", - license="Apache-2.0", - include_package_data=True, - extras_require={ - "dev": [ - str(r) - for r in pkg_resources.parse_requirements( - open(os.path.join(os.path.dirname(__file__), "dev-requirements.txt")) - ) - ] - }, -) From 5bdafdd95167bd45ef15090b68315fa0919080fb Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Tue, 19 Dec 2023 08:05:28 -0500 Subject: [PATCH 03/24] Switch to Poetry for dependency management The project has been updated to use Poetry for managing dependencies in lieu of pip. This applies to the build, lint, test, benchmark, and release workflows. All pyright, pytest, black, isort, and other tests now run with poetry instead of python and pip commands. --- .github/workflows/benchmarks.yml | 3 +-- .github/workflows/lint_and_test.yml | 19 ++++++++----------- .github/workflows/release.yml | 7 +++---- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index adc85422f..3e0aca514 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -17,8 +17,7 @@ jobs: - name: Install dependencies run: | - python -m pip install . - pip install -r dev-requirements.txt + poetry install - name: Run and upload benchmarks run: ./scripts/run_and_upload_benchmarks.sh diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 39acde263..751f282fd 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -13,12 +13,11 @@ jobs: - uses: chartboost/ruff-action@v1 - name: Install dependencies run: | - python -m pip install . - pip install -r dev-requirements.txt + poetry install - name: black check - run: black --check --preview . + run: poetry run black --check --preview . - name: isort check - run: isort --profile black --check . + run: poetry run isort --profile black --check . build: runs-on: ${{ matrix.os }} @@ -46,14 +45,13 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - python -m pip install . - python -m pip install -r dev-requirements.txt + poetry install - name: Check types with pyright run: | - pyright + poetry run pyright - name: Test with pytest run: | - pytest + poetry run pytest - name: Can run Mentat # Unfortunately Github Actions Runners have trouble with prompt toolkit, so we can't do this on Windows. if: runner.os != 'Windows' @@ -73,8 +71,7 @@ jobs: python-version: 3.11 - name: Install dependencies run: | - python -m pip install . - pip install -r dev-requirements.txt + poetry install - name: Run license checking script run: | - python tests/license_check.py + poetry run tests/license_check.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 71054ffd1..a0e31d00d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,8 +24,7 @@ jobs: python-version: '3.10' - name: Install dependencies run: | - python -m pip install --upgrade pip - python -m pip install setuptools wheel twine + poetry install - name: Set Prod conf.ini run: | mv mentat/resources/conf/conf-prod.ini mentat/resources/conf/conf.ini @@ -45,7 +44,7 @@ jobs: TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} run: | - python setup.py sdist bdist_wheel + poetry build twine upload dist/* - name: Brew Release # Since Homebrew automatically updates from PyPI, no need for us to run this @@ -55,4 +54,4 @@ jobs: with: token: ${{ secrets.TOKEN_FOR_BREW }} formula: mentat - tag: v${{ steps.version-number.outputs.group1 }} \ No newline at end of file + tag: v${{ steps.version-number.outputs.group1 }} From ed640f8289d285926405b6a4b2295e8950853161 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Tue, 19 Dec 2023 08:09:44 -0500 Subject: [PATCH 04/24] Update installation instructions in README Changed the package manager for installation from pip to poetry in the README file. This update reflects the switch to the more modern package manager, poetry. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 578d46e5d..f89acf2a9 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ git clone https://github.com/AbanteAI/mentat.git cd mentat # install with pip in editable mode: -pip install -e . +poetry install ``` ## Add your OpenAI API Key From ed014b1ea34badf667c6640d31c213d3c3114eb3 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Tue, 19 Dec 2023 08:11:49 -0500 Subject: [PATCH 05/24] Add poetry installation step in Github workflows The Github workflows for benchmarking, release, and linting/testing have been updated to include a step for installing poetry using pipx. This change ensures that poetry, a necessary dependency for this project, is properly installed during these workflow processes. --- .github/workflows/benchmarks.yml | 1 + .github/workflows/lint_and_test.yml | 3 +++ .github/workflows/release.yml | 1 + 3 files changed, 5 insertions(+) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 3e0aca514..12a3d5ebe 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -17,6 +17,7 @@ jobs: - name: Install dependencies run: | + pipx install poetry poetry install - name: Run and upload benchmarks diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 751f282fd..8548d9bf2 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -13,6 +13,7 @@ jobs: - uses: chartboost/ruff-action@v1 - name: Install dependencies run: | + pipx install poetry poetry install - name: black check run: poetry run black --check --preview . @@ -45,6 +46,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | + pipx install poetry poetry install - name: Check types with pyright run: | @@ -71,6 +73,7 @@ jobs: python-version: 3.11 - name: Install dependencies run: | + pipx install poetry poetry install - name: Run license checking script run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a0e31d00d..9529849aa 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,6 +24,7 @@ jobs: python-version: '3.10' - name: Install dependencies run: | + pipx install poetry poetry install - name: Set Prod conf.ini run: | From 7a77a246c74bdbdec07894013ab327b42f352729 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Tue, 19 Dec 2023 08:35:55 -0500 Subject: [PATCH 06/24] Update license check command in GitHub workflow This change modifies the run command under the license checking script step of the GitHub Action workflow. Specifically, it alters the way we execute the license_check.py script, using 'poetry run python' instead of 'poetry run' alone. --- .github/workflows/lint_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 8548d9bf2..241a4c4ec 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -77,4 +77,4 @@ jobs: poetry install - name: Run license checking script run: | - poetry run tests/license_check.py + poetry run python tests/license_check.py From c83c8fa83c3e0d8a69d8466a4f9e5e8a16f45d52 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Thu, 21 Dec 2023 15:47:23 -0500 Subject: [PATCH 07/24] Use poetry to run pytest in benchmark script The benchmark script has been updated to use 'poetry run' for executing pytest. This change ensures that pytest runs in the context of the poetry environment, appropriately respecting the project --- scripts/run_and_upload_benchmarks.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run_and_upload_benchmarks.sh b/scripts/run_and_upload_benchmarks.sh index 933b26552..74359f168 100755 --- a/scripts/run_and_upload_benchmarks.sh +++ b/scripts/run_and_upload_benchmarks.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -pytest -s tests/benchmarks/exercism_practice.py \ +poetry run pytest -s tests/benchmarks/exercism_practice.py \ --max_iterations 2 \ --max_workers 1 \ --max_benchmarks 4 \ From ad09fb48fdaa73d184c752cca231ede04a36d17c Mon Sep 17 00:00:00 2001 From: Greg L Date: Mon, 25 Dec 2023 14:35:47 -0500 Subject: [PATCH 08/24] Add markdown prompts and update file references This commit includes the addition of markdown formatted coding project prompts, as well as updating all file references in the Python files to reflect these changes. It also includes the installation of the "dataclasses-json" dependency. Lastly, the plain text prompt files were moved to a separate directory for better organization. --- mentat/agent_handler.py | 4 +- mentat/feature_filters/llm_feature_filter.py | 2 +- mentat/parsers/block_parser.py | 2 +- mentat/parsers/json_parser.py | 2 +- mentat/parsers/replacement_parser.py | 2 +- mentat/parsers/unified_diff_parser.py | 2 +- .../agent_command_selection_prompt.md | 27 ++ .../markdown/agent_file_selection_prompt.md | 23 ++ .../prompts/markdown/block_parser_prompt.md | 270 ++++++++++++++++++ .../markdown/feature_selection_prompt.md | 20 ++ .../prompts/markdown/json_parser_prompt.md | 89 ++++++ .../markdown/replacement_parser_prompt.md | 54 ++++ .../markdown/unified_diff_parser_prompt.md | 92 ++++++ .../agent_command_selection_prompt.txt | 0 .../agent_file_selection_prompt.txt | 0 .../{ => plain_text}/block_parser_prompt.txt | 0 .../feature_selection_prompt.txt | 0 .../{ => plain_text}/json_parser_prompt.txt | 0 .../replacement_parser_prompt.txt | 0 .../unified_diff_parser_prompt.txt | 0 poetry.lock | 54 +++- pyproject.toml | 1 + 22 files changed, 635 insertions(+), 9 deletions(-) create mode 100644 mentat/resources/prompts/markdown/agent_command_selection_prompt.md create mode 100644 mentat/resources/prompts/markdown/agent_file_selection_prompt.md create mode 100644 mentat/resources/prompts/markdown/block_parser_prompt.md create mode 100644 mentat/resources/prompts/markdown/feature_selection_prompt.md create mode 100644 mentat/resources/prompts/markdown/json_parser_prompt.md create mode 100644 mentat/resources/prompts/markdown/replacement_parser_prompt.md create mode 100644 mentat/resources/prompts/markdown/unified_diff_parser_prompt.md rename mentat/resources/prompts/{ => plain_text}/agent_command_selection_prompt.txt (100%) rename mentat/resources/prompts/{ => plain_text}/agent_file_selection_prompt.txt (100%) rename mentat/resources/prompts/{ => plain_text}/block_parser_prompt.txt (100%) rename mentat/resources/prompts/{ => plain_text}/feature_selection_prompt.txt (100%) rename mentat/resources/prompts/{ => plain_text}/json_parser_prompt.txt (100%) rename mentat/resources/prompts/{ => plain_text}/replacement_parser_prompt.txt (100%) rename mentat/resources/prompts/{ => plain_text}/unified_diff_parser_prompt.txt (100%) diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index 16edf69c7..e10f6cad5 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -16,8 +16,8 @@ from mentat.session_input import ask_yes_no, collect_user_input from mentat.transcripts import ModelMessage -agent_file_selection_prompt_path = Path("agent_file_selection_prompt.txt") -agent_command_prompt_path = Path("agent_command_selection_prompt.txt") +agent_file_selection_prompt_path = Path("markdown/agent_file_selection_prompt.md") +agent_command_prompt_path = Path("markdown/agent_command_selection_prompt.md") class AgentHandler: diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index 6419047a0..4b0b0d6dd 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -23,7 +23,7 @@ class LLMFeatureFilter(FeatureFilter): - feature_selection_prompt_path = Path("feature_selection_prompt.txt") + feature_selection_prompt_path = Path("markdown/feature_selection_prompt.md") def __init__( self, diff --git a/mentat/parsers/block_parser.py b/mentat/parsers/block_parser.py index b9f4e19e5..8d6cdcef1 100644 --- a/mentat/parsers/block_parser.py +++ b/mentat/parsers/block_parser.py @@ -14,7 +14,7 @@ from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT -block_parser_prompt_filename = Path("block_parser_prompt.txt") +block_parser_prompt_filename = Path("markdown/block_parser_prompt.md") class _BlockParserAction(Enum): diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index 3a1f84f2c..a70d40a45 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -19,7 +19,7 @@ from mentat.session_context import SESSION_CONTEXT from mentat.streaming_printer import StreamingPrinter -json_parser_prompt_filename = Path("json_parser_prompt.txt") +json_parser_prompt_filename = Path("markdown/json_parser_prompt.md") comment_schema = { "type": "object", diff --git a/mentat/parsers/replacement_parser.py b/mentat/parsers/replacement_parser.py index 6e852eeb2..93917d74c 100644 --- a/mentat/parsers/replacement_parser.py +++ b/mentat/parsers/replacement_parser.py @@ -10,7 +10,7 @@ from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT -replacement_parser_prompt_filename = Path("replacement_parser_prompt.txt") +replacement_parser_prompt_filename = Path("markdown/replacement_parser_prompt.md") class ReplacementParser(Parser): diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index 4ae605381..3a47696b3 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -16,7 +16,7 @@ from mentat.parsers.parser import Parser from mentat.prompts.prompts import read_prompt -unified_diff_parser_prompt_filename = Path("unified_diff_parser_prompt.txt") +unified_diff_parser_prompt_filename = Path("markdown/unified_diff_parser_prompt.md") class UnifiedDiffDelimiter(Enum): diff --git a/mentat/resources/prompts/markdown/agent_command_selection_prompt.md b/mentat/resources/prompts/markdown/agent_command_selection_prompt.md new file mode 100644 index 000000000..081582651 --- /dev/null +++ b/mentat/resources/prompts/markdown/agent_command_selection_prompt.md @@ -0,0 +1,27 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +- You are running autonomously to test your recent code changes. + +## Instructions +1. **Run Commands**: + - Use the following format for commands: `command_1 arg_1`, `command_2`, `command_3 arg_1 arg_2`. + - Commands should be listed as a new-line separated list. + +2. **View Output**: + - After running commands, review the output to adjust your changes accordingly. + +3. **File Selection**: + - Use only pre-selected files for testing. + - Avoid commands that test, lint, or run the entire project. + - Do not use files that may not exist. + - Prefer running files you edited or those that use the files you edited. + +4. **Linter Usage**: + - Run a linter to automatically lint the files you changed. + - Do not run a linter check; run the command that actively lints the file. + +5. **Restrictions**: + - Do not provide additional context to ensure correct parsing of your response. \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/agent_file_selection_prompt.md b/mentat/resources/prompts/markdown/agent_file_selection_prompt.md new file mode 100644 index 000000000..a6272fb1d --- /dev/null +++ b/mentat/resources/prompts/markdown/agent_file_selection_prompt.md @@ -0,0 +1,23 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +You are responsible for conducting smoke testing on a codebase. This involves identifying and using specific commands to lint, test, and run the code, with the aim of detecting any errors. + +### Identifying Commands +1. **Objective**: Find commands to lint, test, and run the code, detecting any errors. +2. **Examples for Python**: + - `pytest ` + - `pyright ` + - `python ` + +### Requesting Codebase Files +1. **Procedure**: Based on a provided map of the codebase, identify and request necessary files. +2. **File Request Format**: Use the specific format to request files. Examples: + - `path/to/file.json` + - `path/to/another/file.txt` + +### Important Notes +- **Avoid Additional Context**: Do not provide extra information beyond what is requested. +- **Single Opportunity**: You have only one chance to request the necessary files. \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/block_parser_prompt.md b/mentat/resources/prompts/markdown/block_parser_prompt.md new file mode 100644 index 000000000..d8cd824a3 --- /dev/null +++ b/mentat/resources/prompts/markdown/block_parser_prompt.md @@ -0,0 +1,270 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +- **Role**: Agent in an automated coding system. +- **Responsibilities**: Responding to user requests involving code modifications. + +### User Requests Include +- Adding new features. +- Updating existing code. +- Fixing bugs. +- Adding comments or docstrings. + +### Response Structure +1. **Summary of Planned Changes**: + - Begin with a brief summary of the changes you plan to implement. + +2. **Detailed List of Changes**: + - Include a structured list of all planned changes. + - Plan for necessary additions like imports. + +3. **Code Edit Format** + - Utilize edit types: insert, deletes, replacements, creating new files, deleting existing files, renaming existing files. + - Allow multi-line edits. + - Start each edit description with `@@start` and end with `@@end`. + - For delete or delete-file actions, use a JSON formatted section only. + - For insert, replace, create-file actions, include `@@code` followed by the code lines. + - Exclude edit description blocks for non-code changes (e.g., design ideas). + +### Edit Types Examples + +#### 1. Insert Object +Used to insert code into a file. +```text +@@start +{ + "file": "core/script.py", + "action": "insert", + "insert-after-line": 3, + "insert-before-line": 4 +} +@@code + if name == "Bob": + print("Nice to see you again!") +@@end +``` + +#### 2. Create File Object +Used to create a new file. +```text +@@start +{ + "file": "core/utils.py", + "action": "create-file" +} +@@code +def get_name(): + return input("Enter your name: ") +@@end +``` + +#### 3. Replace Object +Used to replace code in a file. +```text +@@start +{ + "file": "core/script.py", + "action": "replace", + "start-line": 10, + "end-line": 10 +} +@@code +def main(): + name = get_name() +@@end +``` + +#### 4. Delete Object +Used to delete code in a file. +```text +@@start +{ + "file": "core/script.py", + "action": "delete", + "start-line": 13, + "end-line": 13 +} +@@end +``` + +#### 5. Rename File Object +Used to rename files. +```text +@@start +{ + "file": "core/hello_world.py", + "action": "rename-file", + "name": "core/hello_again.py" +} +@@end +``` + +#### 6. Delete File Object +Used to delete files. +```text +@@start +{ + "file": "core/goodbye_world.py", + "action": "delete-file" +} +@@end +``` + +### Examples +Below are examples of how user-provided requests and agents final answers: + +#### Example 1: Specific Code Modification Request +##### User Request: +```text +Code Files: + +core/script.py +1: +2:def say_hello(name): +3: print(f"Hello {name}!") +4: +5: +6:def say_goodbye(): +7: print("Goodbye!") +8: +9: +10:def main(name): +11: say_hello(name) +12: say_goodbye() +13: print("Done!") +14: + +core/hello_world.py +1: +2:def hello_world(): +3: print("Hello, World!") +4: + +User Request: +After saying hello, if the user's name is "Bob", say "Nice to see you again!" on another line. +Add a function to get the user's name and use it in main instead of taking name as an argument. +The new function should be in a separate file called utils.py. Stop saying "Done!". Then, +rename hello_world.py to hello_again.py. Finally, delete the goodbye_world.py file. +``` +##### Final Response: +```text +I will make the modifications to script.py and create the new file, importing from it in script.py. + +Steps: +1. Modify `say_hello`, adding the case for Bob. +2. Create `utils.py` with a function to get the user's name. +3. Import the new function in `script.py`. +4. Modify main to use the new function instead of taking name as an argument. +5. Remove the line printing "Done!". +6. Rename the file `hello_world.py` to `hello_again.py` +7. Delete file `goodbye_world.py` + +@@start +{ + "file": "core/script.py", + "action": "insert", + "insert-after-line": 3, + "insert-before-line": 4 +} +@@code + if name == "Bob": + print("Nice to see you again!") +@@end +@@start +{ + "file": "core/utils.py", + "action": "create-file" +} +@@code +def get_name(): + return input("Enter your name: ") +@@end +@@start +{ + "file": "core/script.py", + "action": "insert", + "insert-after-line": 0, + "insert-before-line": 1 +} +@@code +from core.utils import get_name +@@end +@@start +{ + "file": "core/script.py", + "action": "replace", + "start-line": 10, + "end-line": 10 +} +@@code +def main(): + name = get_name() +@@end +@@start +{ + "file": "core/script.py", + "action": "delete", + "start-line": 13, + "end-line": 13 +} +@@end +@@start +{ + "file": "core/hello_world.py", + "action": "rename-file", + "name": "core/hello_again.py" +} +@@end +@@start +{ + "file": "core/goodbye_world.py", + "action": "delete-file" +} +@@end +``` + +#### Example 2: Function Import and Modification +##### User Request: +```text +Code Files: + +core/hello_world.py +1: +2:def hello_world(): +3: print("Hello, World!") +4: + +User Request: +Call say_goodbye after printing hello world +``` +##### Final Response: +```text +I will make the modifications to hello_world.py + +Steps: +1. Import the `say_goodbye` function in `hello_world.py` +2. Modify `hello_world.py`, adding a function call for `say_goodbye` + +@@start +{ + "file": "core/hello_world.py", + "action": "insert", + "insert-after-line": 0, + "insert-before-line": 1 +} +@@code +from core.script import say_goodbye +@@end +@@start +{ + "file": "core/hello_world.py", + "action": "insert", + "insert-after-line": 4, + "insert-before-line": 5 +} +@@code + say_goodbye() +@@end +``` \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/feature_selection_prompt.md b/mentat/resources/prompts/markdown/feature_selection_prompt.md new file mode 100644 index 000000000..559afda6d --- /dev/null +++ b/mentat/resources/prompts/markdown/feature_selection_prompt.md @@ -0,0 +1,20 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +Your role is to act as part of an automated coding system. Your task is to read a User Query, then identify and return relevant sections from the Code Files that address the query. The returned sections should be in a JSON-parsable format. + +## Compliance Guidelines + +1. **Understanding the User Query**: Fully comprehend the user's query to accurately select the necessary code sections. + +2. **Selection Criteria**: + - Choose files and specific lines that would be modified (edited, added, or deleted) in response to the query. + - If an 'Expected Edits' list is provided, include all lines affected by these edits. + +3. **Identification of Interacting Elements**: Identify variables and functions that interact with the chosen code. Include them in your selection if their behavior is critical for implementing the expected edits. + +4. **Merging Sections**: Combine nearby selected sections (less than 5 lines apart) into larger sections or entire files for better context. + +5. **JSON-Parsable Response**: Ensure the response format is JSON-parsable, following the schema "path:startline-endline". Example format: `"[mydir/file_a, mydir/file_b:10-34]"`. \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/json_parser_prompt.md b/mentat/resources/prompts/markdown/json_parser_prompt.md new file mode 100644 index 000000000..cac842923 --- /dev/null +++ b/mentat/resources/prompts/markdown/json_parser_prompt.md @@ -0,0 +1,89 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +Agents are part of an automated coding system and must respond with valid JSON, adhering to a specific format. + +## Compliance Guidelines + +### General Instructions +- **Input**: User request, code file contents, and related information. +- **Output**: A JSON object with a field "content" containing a list of valid JSON objects. + +### Types of JSON Objects + +#### 1. Comment Object +Used to inform the user of planned changes. +```json +{ + "type": "comment", + "content": "Summary of planned changes." +} +``` + +#### 2. Edit Object +Replaces lines between specified start and end lines in a file. +```json +{ + "type": "edit", + "filename": "file_to_edit.py", + "starting-line": 2, + "ending-line": 4, + "content": "Replacement content" +} +``` + +#### 3. File Creation Object +Creates a new file. +```json +{ + "type": "creation", + "filename": "new_file.py" +} +``` + +#### 4. File Deletion Object +Deletes a specified file. +```json +{ + "type": "deletion", + "filename": "to_be_deleted.py" +} +``` + +#### 5. File Rename Object +Renames a specified file. +```json +{ + "type": "rename", + "filename": "original_name.py", + "new-filename": "new_name.py" +} +``` + +### Important Notes +- **Line Numbering**: The starting line is inclusive, and the ending line is exclusive. +- **Order of Fields**: Maintain the given order of fields in the response. + +### Example Response +Here's an example of how to format a response to a user request: + +```json +{ + "content": [ + { + "type": "comment", + "content": "Planned modification steps..." + }, + { + "type": "edit", + "filename": "file1.py", + "starting-line": x, + "ending-line": y, + "content": "Edit content" + }, + ... + ] +} +``` \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/replacement_parser_prompt.md b/mentat/resources/prompts/markdown/replacement_parser_prompt.md new file mode 100644 index 000000000..51eef6c21 --- /dev/null +++ b/mentat/resources/prompts/markdown/replacement_parser_prompt.md @@ -0,0 +1,54 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +You are to act as part of an automated coding system, processing user requests for code modifications. Your response must be formatted precisely for programmatic parsing. + +## Compliance Guidelines +1. **Response Structure**: Organize your response in two distinct parts: a summary of planned changes and the changes in the required edit format. +2. **Summary of Changes**: Begin with a brief summary listing the changes you intend to make. +3. **Code Edit Format**: Follow the specific format for code edits, as detailed below. +4. **Code Edit Markers**: Use `@` to mark the beginning of a code edit. Include the file name and relevant line numbers or indicators for new, deleted, or renamed files. +5. **Inserting and Deleting Lines**: For inserting or deleting lines, adhere to the specified formats. +6. **Avoiding Duplication**: Ensure no duplication of existing lines. If inserting identical code, replace any duplicated lines. +7. **Import Statements**: Before writing an import statement, verify that it isn't already imported. +8. **Indentation**: Maintain correct indentation in your code changes. + +## Edit Format Instructions: +- **Creating a New File**: `@ +` +- **Deleting a File**: `@ -` +- **Renaming a File**: `@ ` +- **Replacing Code Section**: `@ starting_line= ending_line=` (followed by the new code lines, ending with `@`). +- **Deleting Lines Without Adding New Ones**: Leave no lines between the starting `@` and the ending `@`. +- **Inserting Lines Without Deleting**: `@ insert_line=` (followed by the lines to insert, ending with `@`). + +## Example Task and Response +### User Request: +Replace the `hello_world` function with a `goodbye_world` function in `core/hello_world.py`. Insert a new line "Goodbye, name" after "Hello, name". Rename the file to `goodbye_world.py`. Create a new file `test.py` with the line "testing...". + +### Example Response: +**Summary of Changes**: +1. Replace `hello_world` function with `goodbye_world`. +2. Insert new "Goodbye, name" line. +3. Rename `hello_world.py` to `goodbye_world.py`. +4. Create `test.py` and add "testing..." line. + +**Code Edits**: +``` +@ core/hello_world.py starting_line=2 ending_line=4 +def goodbye_world(): + print("Goodbye, World!") +@ +@ core/hello_world.py starting_line=6 ending_line=7 + goodbye_world() +@ +@ core/hello_world.py insert_line=8 + print(f"Goodbye, {name}!") +@ +@ core/hello_world.py core/goodbye_world.py +@ core/test.py + +@ core/test.py insert_line=1 +print("testing...") +@ +``` \ No newline at end of file diff --git a/mentat/resources/prompts/markdown/unified_diff_parser_prompt.md b/mentat/resources/prompts/markdown/unified_diff_parser_prompt.md new file mode 100644 index 000000000..36df7a51f --- /dev/null +++ b/mentat/resources/prompts/markdown/unified_diff_parser_prompt.md @@ -0,0 +1,92 @@ +You are an Agent. + +# Instructions for Agent + +## Task Overview +You are to act as an automated coding system, processing user requests for code modifications and file management. Your output must follow a specific format for programmatic parsing. + +## Compliance Guidelines + +### Instruction Style +- Directly address the LLM Agent. +- Be precise and unambiguous. + +### Initial Prompt Structure +- Start with a concise statement of the task. + +### User Instructions Identification +- Clearly identify and interpret user instructions. + +### Markdown Formatting +- Use Markdown for clear organization. + +### Permitted Modifications +- Enhance clarity without altering intent. + +### Clear and Structured Output +- Ensure outputs are organized and easy to follow. + +### Conciseness and Relevance +- Maintain focus on the task. + +## Reformatted Instructions + +1. **Summarize Planned Changes**: Begin your response with a brief summary of the changes you will implement. + +2. **List of Changes**: + - Itemize the steps involved in the code modification and file management process. + +3. **Edit Format**: + - Use a git diff-like format for edits. + - Start edits with `--- ` and `+++ ` lines to indicate the file being edited. + - Use `--- /dev/null` for file creation and `+++ /dev/null` for file deletion. + - In the git diff section, prefix context lines with a space, deleted lines with a `-`, and added lines with a `+`. + - Separate different sections of code with `@@ @@` markers. + - Conclude the diff with a `@@ end @@` marker. + +4. **Context Requirement**: + - Always provide context for additions unless the file is empty. + - Context lines must match the lines in the file for acceptance. + +5. **Demonstration**: + - Provide an example user request and a corresponding example response following the above format. + +### Example User Request +User requests to modify `core/hello_world.py` by replacing `hello_world` function with `goodbye_world`, adding a new line, renaming the file, and creating a new file `test.py`. + +### Example Response +- **Summary of Changes**: + - Rename `hello_world.py` to `goodbye_world.py`. + - Replace `hello_world` with `goodbye_world`. + - Add a new `Goodbye, name` line. + - Create `test.py` and add content. + - Delete `test.py`. + +- **Git Diff Format**: + - Edit `core/hello_world.py` and rename to `core/goodbye_world.py`. + - Edit `test.py` creation and deletion. + +```diff +--- core/hello_world.py ++++ core/goodbye_world.py +@@ @@ +-def hello_world(): +- print("Hello, World!") ++def goodbye_world(): ++ print("Goodbye, World!") +@@ @@ + def main(name): +- hello_world() ++ goodbye_world() + print(f"Hello, {name}!") ++ print(f"Goodbye, {name}!") +@@ end @@ +--- /dev/null ++++ test.py +@@ @@ ++print("testing...") +@@ end @@ +--- test.py ++++ /dev/null +@@ end @@ +``` \ No newline at end of file diff --git a/mentat/resources/prompts/agent_command_selection_prompt.txt b/mentat/resources/prompts/plain_text/agent_command_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/agent_command_selection_prompt.txt rename to mentat/resources/prompts/plain_text/agent_command_selection_prompt.txt diff --git a/mentat/resources/prompts/agent_file_selection_prompt.txt b/mentat/resources/prompts/plain_text/agent_file_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/agent_file_selection_prompt.txt rename to mentat/resources/prompts/plain_text/agent_file_selection_prompt.txt diff --git a/mentat/resources/prompts/block_parser_prompt.txt b/mentat/resources/prompts/plain_text/block_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/block_parser_prompt.txt rename to mentat/resources/prompts/plain_text/block_parser_prompt.txt diff --git a/mentat/resources/prompts/feature_selection_prompt.txt b/mentat/resources/prompts/plain_text/feature_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/feature_selection_prompt.txt rename to mentat/resources/prompts/plain_text/feature_selection_prompt.txt diff --git a/mentat/resources/prompts/json_parser_prompt.txt b/mentat/resources/prompts/plain_text/json_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/json_parser_prompt.txt rename to mentat/resources/prompts/plain_text/json_parser_prompt.txt diff --git a/mentat/resources/prompts/replacement_parser_prompt.txt b/mentat/resources/prompts/plain_text/replacement_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/replacement_parser_prompt.txt rename to mentat/resources/prompts/plain_text/replacement_parser_prompt.txt diff --git a/mentat/resources/prompts/unified_diff_parser_prompt.txt b/mentat/resources/prompts/plain_text/unified_diff_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/unified_diff_parser_prompt.txt rename to mentat/resources/prompts/plain_text/unified_diff_parser_prompt.txt diff --git a/poetry.lock b/poetry.lock index 618c44749..622e5030b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "aiomultiprocess" @@ -318,6 +318,21 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "dataclasses-json" +version = "0.6.3" +description = "Easily serialize dataclasses to and from JSON." +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "dataclasses_json-0.6.3-py3-none-any.whl", hash = "sha256:4aeb343357997396f6bca1acae64e486c3a723d8f5c76301888abeccf0c45176"}, + {file = "dataclasses_json-0.6.3.tar.gz", hash = "sha256:35cb40aae824736fdf959801356641836365219cfe14caeb115c39136f775d2a"}, +] + +[package.dependencies] +marshmallow = ">=3.18.0,<4.0.0" +typing-inspect = ">=0.4.0,<1" + [[package]] name = "distro" version = "1.8.0" @@ -615,6 +630,26 @@ files = [ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, ] +[[package]] +name = "marshmallow" +version = "3.20.1" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.20.1-py3-none-any.whl", hash = "sha256:684939db93e80ad3561392f47be0230743131560a41c5110684c16e21ade0a5c"}, + {file = "marshmallow-3.20.1.tar.gz", hash = "sha256:5d2371bbe42000f2b3fb5eaa065224df7d8f8597bc19a1bbfa5bfe7fba8da889"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)", "pytest", "pytz", "simplejson", "tox"] +docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] +lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1758,6 +1793,21 @@ files = [ {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + [[package]] name = "urllib3" version = "2.1.0" @@ -1821,4 +1871,4 @@ h11 = ">=0.9.0,<1" [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "fad963a8412c0f9fc089a4852f2b7d0fc711bf096e10980cde6aa795bc5914c2" +content-hash = "dd5d00a56644e15b6cfe755270cec2b5a08e6cb4d6cd2398bd4100d929a10816" diff --git a/pyproject.toml b/pyproject.toml index 4d1ba67b4..3be47b9e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ tiktoken = "0.4.0" typing_extensions = "4.8.0" tqdm = "4.66.1" webdriver_manager = "4.0.1" +dataclasses-json = "^0.6.3" [tool.poetry.group.dev.dependencies] aiomultiprocess = "^0.9.0" From e6f25963f068efb7e791ac9aaf5c4f12b23f91fe Mon Sep 17 00:00:00 2001 From: Greg L Date: Tue, 26 Dec 2023 15:58:29 -0500 Subject: [PATCH 09/24] Compressed Coding Project prompts to markdown Updated default prompts for the coding project to a markdown file format and adjusted all related file references. New dependency, "dataclasses-json", was installed to handle potential future data classes with the JSON format. Moved previously existing plain text format prompts to a distinct directory for easier management. --- mentat/__main__.py | 10 +- mentat/code_context.py | 35 ++- mentat/code_feature.py | 6 +- mentat/config.py | 354 +++++++++++-------------- mentat/conversation.py | 105 +++----- mentat/git_handler.py | 2 + mentat/include_files.py | 15 +- mentat/llm_api_handler.py | 16 +- mentat/resources/conf/.mentatconf.yaml | 29 ++ mentat/session.py | 19 +- mentat/terminal/client.py | 151 ++++++----- mentat/utils.py | 37 +++ poetry.lock | 135 +++++++++- pyproject.toml | 5 +- 14 files changed, 536 insertions(+), 383 deletions(-) create mode 100644 mentat/resources/conf/.mentatconf.yaml diff --git a/mentat/__main__.py b/mentat/__main__.py index 08682fc6f..c82399aa2 100644 --- a/mentat/__main__.py +++ b/mentat/__main__.py @@ -5,8 +5,8 @@ exit("Error: Python version 3.10 or higher is required.") -from mentat.terminal.client import run_cli - -# This is only here so that mentat can be run with python -m mentat -# This file will NOT run when run from pip installation -run_cli() +# from mentat.terminal.client import run_cli +# +# # This is only here so that mentat can be run with python -m mentat +# # This file will NOT run when run from pip installation +# run_cli() diff --git a/mentat/code_context.py b/mentat/code_context.py index 06b766084..6cf05de79 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -34,7 +34,8 @@ from mentat.session_context import SESSION_CONTEXT from mentat.session_stream import SessionStream from mentat.utils import sha256 - +from mentat.config import config +from rich import print class CodeContext: def __init__( @@ -66,20 +67,19 @@ def display_context(self): """Display the baseline context: included files and auto-context settings""" session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config - stream.send("Code Context:", color="blue") + + print("[blue]Code Context:[/blue]") prefix = " " stream.send(f"{prefix}Directory: {session_context.cwd}") if self.diff_context and self.diff_context.name: - stream.send(f"{prefix}Diff:", end=" ") - stream.send(self.diff_context.get_display_context(), color="green") + print(f"{prefix}Diff: [green]{self.diff_context.get_display_context()}[/green]") - if config.auto_context: - stream.send(f"{prefix}Auto-Context: Enabled") - stream.send(f"{prefix}Auto-Tokens: {config.auto_tokens}") + if config.run.auto_context: + print(f"{prefix}Auto-Context: [green]Enabled[/green]") + print(f"{prefix}Auto-Tokens: [green]{config.auto_tokens}[/green]") else: - stream.send(f"{prefix}Auto-Context: Disabled") + print(f"{prefix}Auto-Context: [red]Disabled[/red]") features = None if self.features: @@ -92,8 +92,7 @@ def display_context(self): _feat for _file in self.include_files.values() for _feat in _file ] else: - stream.send(f"{prefix}Included files: ", end="") - stream.send("None", color="yellow") + print(f"{prefix}Included files: [yellow]None[/green]") if features is not None: refs = get_consolidated_feature_refs(features) @@ -111,7 +110,7 @@ def _get_code_message_checksum( self, prompt: Optional[str] = None, max_tokens: Optional[int] = None ) -> str: session_context = SESSION_CONTEXT.get() - config = session_context.config + code_file_manager = session_context.code_file_manager if not self.features: @@ -128,7 +127,7 @@ def _get_code_message_checksum( features_checksum = sha256("".join(feature_file_checksums)) settings = { "prompt": prompt or "", - "auto_context": config.auto_context, + "auto_context": config.run.auto_context, "use_llm": self.use_llm, "diff": self.diff, "pr_diff": self.pr_diff, @@ -167,9 +166,7 @@ async def _get_code_message( expected_edits: Optional[list[str]] = None, loading_multiplier: float = 0.0, ) -> str: - session_context = SESSION_CONTEXT.get() - config = session_context.config - model = config.model + model = config.ai.model # Setup code message metadata code_message = list[str]() @@ -197,13 +194,13 @@ async def _get_code_message( auto_tokens = ( None if remaining_tokens is None - else min(remaining_tokens, config.auto_tokens + include_features_tokens) + else min(remaining_tokens, config.run.auto_tokens + include_features_tokens) ) if remaining_tokens is not None and remaining_tokens <= 0: self.features = [] return "" - elif not config.auto_context: + elif not config.run.auto_context: self.features = include_features if ( remaining_tokens is not None @@ -340,7 +337,7 @@ def include( [ *exclude_patterns, *self.ignore_patterns, - *session_context.config.file_exclude_glob_list, + *config.run.file_exclude_glob_list, ] ) for pattern in all_exclude_patterns: diff --git a/mentat/code_feature.py b/mentat/code_feature.py index b742c6fe3..e31cbb268 100644 --- a/mentat/code_feature.py +++ b/mentat/code_feature.py @@ -15,7 +15,8 @@ from mentat.interval import Interval, parse_intervals, split_intervals_from_path from mentat.llm_api_handler import count_tokens from mentat.session_context import SESSION_CONTEXT -from mentat.utils import get_relative_path, sha256 +from mentat.utils import get_relative_path, sha256, dd +from mentat.config import config MIN_INTERVAL_LINES = 10 @@ -172,8 +173,7 @@ def contains_line(self, line_number: int): def _get_code_message(self) -> list[str]: session_context = SESSION_CONTEXT.get() code_file_manager = session_context.code_file_manager - parser = session_context.config.parser - + parser = config.parser.parser code_message: list[str] = [] # We always want to give GPT posix paths diff --git a/mentat/config.py b/mentat/config.py index 45207fda8..a4192d210 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -1,23 +1,29 @@ from __future__ import annotations -import json -from argparse import ArgumentParser, Namespace -from json import JSONDecodeError +import os from pathlib import Path +import yaml +import shutil -import attr -from attr import converters, validators +from dataclasses import asdict from mentat.git_handler import get_git_root_for_path -from mentat.llm_api_handler import known_models -from mentat.parsers.parser import Parser from mentat.parsers.parser_map import parser_map -from mentat.session_context import SESSION_CONTEXT -from mentat.utils import mentat_dir_path +from mentat.parsers.block_parser import BlockParser +from mentat.utils import mentat_dir_path, dd +from dataclasses import dataclass, field +from dataclasses_json import DataClassJsonMixin +from typing import Optional, List, Tuple +from mentat.parsers.parser import Parser +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional -config_file_name = Path(".mentat_config.json") + +config_file_name = Path(".mentat_config.yaml") user_config_path = mentat_dir_path / config_file_name +APP_ROOT = Path.cwd() +MENTAT_ROOT = Path(__file__).parent +USER_MENTAT_ROOT = Path.home() / ".mentat" def int_or_none(s: str | None) -> int | None: if s is not None: @@ -28,206 +34,150 @@ def int_or_none(s: str | None) -> int | None: bool_autocomplete = ["True", "False"] -@attr.define -class Config: - _errors: list[str] = attr.field(factory=list) +@dataclass() +class RunSettings(DataClassJsonMixin): + file_exclude_glob_list: List[Path] = field(default_factory=list) + auto_context: bool = False + auto_tokens: int = 8000 - # Model specific settings - model: str = attr.field( - default="gpt-4-1106-preview", - metadata={"auto_completions": list(known_models.keys())}, - ) - feature_selection_model: str = attr.field( - default="gpt-4-1106-preview", - metadata={"auto_completions": list(known_models.keys())}, - ) - embedding_model: str = attr.field( - default="text-embedding-ada-002", - metadata={ - "auto_completions": [ - model.name for model in known_models.values() if model.embedding_model - ] - }, - ) - temperature: float = attr.field( - default=0.2, converter=float, validator=[validators.le(1), validators.ge(0)] - ) +@dataclass() +class AIModelSettings(DataClassJsonMixin): + model: str = "gpt-4-1106-preview" + feature_selection_model: str = "gpt-4-1106-preview" + embedding_model: str = "text-embedding-ada-002" + temperature: float = 0.2 - maximum_context: int | None = attr.field( - default=None, - metadata={ - "description": ( - "The maximum number of lines of context to include in the prompt. It is" - " inferred automatically for openai models but you can still set it to" - " save costs. It must be set for other models." - ), - }, - converter=int_or_none, - validator=validators.optional(validators.ge(0)), - ) - token_buffer: int = attr.field( - default=1000, - metadata={ - "description": ( - "The amount of tokens to always be reserved as a buffer for user and" - " model messages." - ), - }, - ) - parser: Parser = attr.field( # pyright: ignore - default="block", - metadata={ - "description": ( - "The format for the LLM to write code in. You probably don't want to" - " mess with this setting." - ), - "auto_completions": list(parser_map.keys()), - }, - converter=parser_map.get, # pyright: ignore - validator=validators.instance_of(Parser), # pyright: ignore - ) - no_parser_prompt: bool = attr.field( - default=False, - metadata={ - "description": ( - "Whether to include the parser prompt in the system message. This" - " should only be set to true for fine tuned models" - ), - "auto_completions": bool_autocomplete, - }, - converter=converters.optional(converters.to_bool), + maximum_context: Optional[int] = None + token_buffer: int = 1000 + no_parser_prompt: bool = False + +@dataclass() +class UISettings(DataClassJsonMixin): + input_style: List[Tuple[str, str]] = field( + default_factory=lambda: [ + ["", "#9835bd"], + ["prompt", "#ffffff bold"], + ["continuation", "#ffffff bold"], + ] ) - # Context specific settings - file_exclude_glob_list: list[str] = attr.field( - factory=list, - metadata={"description": "List of glob patterns to exclude from context"}, +@dataclass() +class ParserSettings: + # The type of parser that should be ued + parser: Parser = BlockParser(), + parser_type: str = "block" + + +@dataclass() +class MentatConfig: + # Directory where the mentat is running + root = APP_ROOT + + run: RunSettings + ai: AIModelSettings + ui: UISettings + parser: ParserSettings + +def load_yaml(path: str) -> dict: + """Load the data from the YAML file.""" + with open(path, 'r') as file: + return yaml.safe_load(file) + +def merge_configs(original: dict[str, Any | None], new: dict[str, Any | None]) -> dict[str, Any | None]: + """Merge two dictionaries, with the second one overwriting the values in the first one.""" + original.update(new) # Update the original dict with the new one + return original # Return the merged dict + +def yaml_to_config(yaml_dict: dict): + """gets the allowed config settings from a YAML""" + + return { + "model": yaml_dict.get("model"), + "maximum_context": yaml_dict.get("maximum_context"), + "file_exclude_glob_list": yaml_dict.get("file_exclude_glob_list", []), + "input_style": yaml_dict.get("input_style"), + "format": yaml_dict.get("format") + } + +def init_config(): + """Initialize the configuration file if it doesn't exist.""" + default_conf_path = os.path.join(MENTAT_ROOT, 'resources', 'conf', '.mentatconf.yaml') + current_conf_path = os.path.join(APP_ROOT, '.mentatconf.yaml') + + if not os.path.exists(current_conf_path): + shutil.copy(default_conf_path, current_conf_path) + + +def load_settings(): + """Load the configuration from the `.mentatconf.yaml` file.""" + + current_conf_path = APP_ROOT / '.mentatconf.yaml' + user_conf_path = USER_MENTAT_ROOT / '.mentatconf.yaml' + git_root = get_git_root_for_path(APP_ROOT, raise_error=False) + + yaml_config = {} + + if user_conf_path.exists(): + yaml_dict = load_yaml(str(user_conf_path)) + user_config = yaml_to_config(yaml_dict) + yaml_config = merge_configs(yaml_config, user_config) + + if git_root is not None: + git_conf_path = Path(git_root) / '.mentatconf.yaml' + if git_conf_path.exists(): + yaml_dict = load_yaml(str(git_conf_path)) + git_config = yaml_to_config(yaml_dict) + yaml_config = merge_configs(yaml_config, git_config) + + if current_conf_path.exists(): + yaml_dict = load_yaml(str(current_conf_path)) + current_path_config = yaml_to_config(yaml_dict) + yaml_config = merge_configs(yaml_config, current_path_config) + + run_settings = RunSettings( + file_exclude_glob_list=[Path(p) for p in yaml_config.get("file_exclude_glob_list", [])] ) - auto_context: bool = attr.field( - default=False, - metadata={ - "description": "Automatically select code files to include in context.", - "abbreviation": "a", - "auto_completions": bool_autocomplete, - }, - converter=converters.optional(converters.to_bool), + + ui_settings = UISettings( + input_style=yaml_config.get("input_style", []) ) - auto_tokens: int = attr.field( - default=8000, - metadata={ - "description": "The number of tokens auto-context will add.", - }, - converter=int, + + ai_model_settings = AIModelSettings( + model=yaml_config.get("model", "gpt-4-1106-preview"), + feature_selection_model=yaml_config.get("model", "gpt-4-1106-preview"), + maximum_context=yaml_config.get("maximum_context", 16000) ) - # Only settable by config file - input_style: list[tuple[str, str]] = attr.field( - factory=lambda: [ - ["", "#9835bd"], - ["prompt", "#ffffff bold"], - ["continuation", "#ffffff bold"], - ], - metadata={ - "description": "Styling information for the terminal.", - "no_flag": True, - "no_midsession_change": True, - }, + parser_type = yaml_config.get("format", "block") + parser_settings = ParserSettings( + parser_type=parser_type, + parser=parser_map[parser_type] ) - @classmethod - def get_fields(cls) -> list[str]: - return [ - field.name for field in attr.fields(cls) if not field.name.startswith("_") - ] + return { + "run": run_settings, + "ai": ai_model_settings, + "ui": ui_settings, + "parser": parser_settings, + } + + +def update_config(**kwargs): + """Reload the configuration using the provided keyword arguments.""" + global config + if config is None: + return + + # setting the values from kwargs to the global config + for key, value in kwargs.items(): + setattr(config, key, value) + +def load_config() -> MentatConfig: + init_config() + settings = load_settings() + config = MentatConfig(**settings) + + return config + - @classmethod - def add_fields_to_argparse(cls, parser: ArgumentParser) -> None: - for field in attr.fields(cls): - if "no_flag" in field.metadata: - continue - name = [f"--{field.name.replace('_', '-')}"] - if "abbreviation" in field.metadata: - name.append(f"-{field.metadata['abbreviation'].replace('_', '-')}") - - arguments = { - "help": field.metadata.get("description", ""), - } - - if field.type == "bool": - if arguments.get("default", False): - arguments["action"] = "store_false" - else: - arguments["action"] = "store_true" - elif field.type == "int": - arguments["type"] = int - elif field.type == "float": - arguments["type"] = float - elif field.type == "list[str]": - arguments["nargs"] = "*" - - parser.add_argument(*name, **arguments) - - @classmethod - def create(cls, cwd: Path, args: Namespace | None = None) -> Config: - config = Config() - - # Each method overwrites the previous so they are in order of precedence - config.load_file(user_config_path) - git_root = get_git_root_for_path(cwd, raise_error=False) - if git_root is not None: - config.load_file(git_root / config_file_name) - config.load_file(cwd / config_file_name) - - if args is not None: - config.load_namespace(args) - - return config - - def load_namespace(self, args: Namespace) -> None: - for field in attr.fields(Config): - if field.name in args and field.name != "_errors": - value = getattr(args, field.name) - if value is not None and value != field.default: - try: - setattr(self, field.name, value) - except (ValueError, TypeError) as e: - self.error(f"Warning: Illegal value for {field}: {e}") - - def load_file(self, path: Path) -> None: - if path.exists(): - with open(path) as config_file: - try: - config = json.load(config_file) - except JSONDecodeError: - self.error( - f"Warning: Config {path} contains invalid json; ignoring user" - " configuration file" - ) - return - for field in config: - if hasattr(self, field): - try: - setattr(self, field, config[field]) - except (ValueError, TypeError) as e: - self.error( - f"Warning: Config {path} contains invalid value for" - f" setting: {field}\n{e}" - ) - else: - self.error( - f"Warning: Config {path} contains unrecognized setting: {field}" - ) - - def error(self, message: str) -> None: - self._errors.append(message) - try: - self.send_errors_to_stream() - except LookupError: - pass - - def send_errors_to_stream(self): - session_context = SESSION_CONTEXT.get() - stream = session_context.stream - for error in self._errors: - stream.send(error, color="light_yellow") - self._errors = [] +config = load_config() \ No newline at end of file diff --git a/mentat/conversation.py b/mentat/conversation.py index e91ea24f4..e485e2e39 100644 --- a/mentat/conversation.py +++ b/mentat/conversation.py @@ -28,7 +28,9 @@ from mentat.session_context import SESSION_CONTEXT from mentat.transcripts import ModelMessage, TranscriptMessage, UserMessage from mentat.utils import add_newline +from mentat.config import config +from rich import print class Conversation: def __init__(self): @@ -40,31 +42,28 @@ def __init__(self): async def display_token_count(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config code_context = session_context.code_context llm_api_handler = session_context.llm_api_handler - if not await llm_api_handler.is_model_available(config.model): + if not await llm_api_handler.is_model_available(config.ai.model): raise MentatError( - f"Model {config.model} is not available. Please try again with a" + f"Model {config.ai.model} is not available. Please try again with a" " different model." ) - if "gpt-4" not in config.model: - stream.send( - "Warning: Mentat has only been tested on GPT-4. You may experience" + if "gpt-4" not in config.ai.model: + print( + "[yellow]Warning: Mentat has only been tested on GPT-4. You may experience" " issues with quality. This model may not be able to respond in" - " mentat's edit format.", - color="yellow", + " mentat's edit format.[/yellow]", ) - if "gpt-3.5" not in config.model: - stream.send( - "Warning: Mentat does not know how to calculate costs or context" - " size for this model.", - color="yellow", + if "gpt-3.5" not in config.ai.model: + print( + "[yellow]Warning: Mentat does not know how to calculate costs or context" + " size for this model.[/yellow]" ) - context_size = model_context_size(config.model) - maximum_context = config.maximum_context + context_size = model_context_size(config.ai.model) + maximum_context = config.ai.maximum_context if maximum_context: if context_size: context_size = min(context_size, maximum_context) @@ -85,7 +84,7 @@ async def display_token_count(self): ] tokens = prompt_tokens( messages, - config.model, + config.ai.model, ) context_size = get_max_tokens() @@ -94,7 +93,7 @@ async def display_token_count(self): f"Context size for {config.model} is not known. Please set" " maximum-context with `/config maximum_context value`." ) - if tokens + config.token_buffer > context_size: + if tokens + config.ai.token_buffer > context_size: _plural = len(code_context.include_files) > 1 _exceed = tokens > context_size message: dict[tuple[bool, bool], str] = { @@ -103,16 +102,10 @@ async def display_token_count(self): (True, False): "s are close to", (True, True): "s exceed", } - stream.send( - f"Included file{message[(_plural, _exceed)]} token limit" - f" ({tokens} / {context_size}). Truncating based on task similarity.", - color="yellow", - ) + print(f"[yellow]Included file{message[(_plural, _exceed)]} token limit \n ({tokens} / {context_size}). Truncating based on task similarity.[/yellow]") else: - stream.send( - f"Prompt and included files token count: {tokens} / {context_size}", - color="cyan", - ) + print( + f"[cyan]Prompt and included files token count: {tokens} / {context_size}[/cyan]") # The transcript logger logs tuples containing the actual message sent by the user or LLM # and (for LLM messages) the LLM conversation that led to that LLM response @@ -162,12 +155,10 @@ def get_messages( """Returns the messages in the conversation. The system message may change throughout the conversation so it is important to access the messages through this method. """ - session_context = SESSION_CONTEXT.get() - config = session_context.config - if config.no_parser_prompt or not include_system_prompt: + if config.ai.no_parser_prompt or not include_system_prompt: return self._messages.copy() else: - parser = config.parser + parser = config.parser.parser prompt = parser.get_system_prompt() prompt_message: ChatCompletionMessageParam = ( ChatCompletionSystemMessageParam( @@ -188,21 +179,19 @@ async def _stream_model_response( ): session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config - parser = config.parser + + parser = config.parser.parser llm_api_handler = session_context.llm_api_handler start_time = default_timer() - num_prompt_tokens = prompt_tokens(messages, config.model) - context_size = model_context_size(config.model) + num_prompt_tokens = prompt_tokens(messages, config.ai.model) + context_size = model_context_size(config.ai.model) if context_size: - if num_prompt_tokens > context_size - config.token_buffer: - stream.send( - f"Warning: {config.model} has a maximum context length of" - f" {context_size} tokens. Attempting to run anyway:", - color="yellow", - ) + if num_prompt_tokens > context_size - config.ai.token_buffer: + print( + f"[yellow]Warning: {config.ai.model} has a maximum context length of" + f" {context_size} tokens. Attempting to run anyway:[/yellow]") if loading_multiplier: stream.send( @@ -212,7 +201,7 @@ async def _stream_model_response( ) response = await llm_api_handler.call_llm_api( messages, - config.model, + config.ai.model, stream=True, response_format=parser.response_format(), ) @@ -224,8 +213,9 @@ async def _stream_model_response( terminate=True, ) - stream.send(f"Total token count: {num_prompt_tokens}", color="cyan") - stream.send("Streaming... use control-c to interrupt the model at any point\n") + print(f"[cyan]Total token count: {num_prompt_tokens}[/cyan]") + print("Streaming... use control-c to interrupt the model at any point\n") + async with parser.interrupt_catcher(): parsed_llm_response = await parser.stream_and_parse_llm_response( add_newline(response) @@ -237,16 +227,15 @@ async def _stream_model_response( async def get_model_response(self) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config code_context = session_context.code_context cost_tracker = session_context.cost_tracker messages_snapshot = self.get_messages() # Rebuild code context with active code and available tokens - tokens = prompt_tokens(messages_snapshot, config.model) + tokens = prompt_tokens(messages_snapshot, config.ai.model) - loading_multiplier = 1.0 if config.auto_context else 0.0 + loading_multiplier = 1.0 if config.run.auto_context else 0.0 prompt = messages_snapshot[-1]["content"] if isinstance(prompt, list): text_prompts = [ @@ -255,14 +244,10 @@ async def get_model_response(self) -> ParsedLLMResponse: prompt = " ".join(text_prompts) max_tokens = get_max_tokens() if max_tokens is None: - stream.send( - f"Context size for {config.model} is not known. Please set" - " maximum-context with `/config maximum_context value`.", - color="light_red", - ) + print(f"[pink]Context size for {config.ai.model} is not known. Please set maximum-context with `/config maximum_context value`.[/pink]") return ParsedLLMResponse("", "", list[FileEdit]()) - if max_tokens - tokens < config.token_buffer: + if max_tokens - tokens < config.ai.token_buffer: if max_tokens - tokens < 0: stream.send( f"The context size is limited to {max_tokens} tokens and" @@ -287,7 +272,7 @@ async def get_model_response(self) -> ParsedLLMResponse: if isinstance(prompt, str) else "" ), - max_tokens - tokens - config.token_buffer, + max_tokens - tokens - config.ai.token_buffer, loading_multiplier=0.5 * loading_multiplier, ) messages_snapshot.insert( @@ -315,7 +300,7 @@ async def get_model_response(self) -> ParsedLLMResponse: except RateLimitError: stream.send( "Rate limit error received from OpenAI's servers using model" - f' {config.model}.\nUse "/config model " to switch to a' + f' {config.ai.model}.\nUse "/config model " to switch to a' " different model.", color="light_red", ) @@ -327,9 +312,9 @@ async def get_model_response(self) -> ParsedLLMResponse: cost_tracker.log_api_call_stats( num_prompt_tokens, count_tokens( - parsed_llm_response.full_response, config.model, full_message=False + parsed_llm_response.full_response, config.ai.model, full_message=False ), - config.model, + config.ai.model, time_elapsed, display=True, ) @@ -343,26 +328,24 @@ async def get_model_response(self) -> ParsedLLMResponse: return parsed_llm_response def remaining_context(self) -> int | None: - ctx = SESSION_CONTEXT.get() max_context = get_max_tokens() if max_context is None: return None - return max_context - prompt_tokens(self.get_messages(), ctx.config.model) + return max_context - prompt_tokens(self.get_messages(), config.ai.model) def can_add_to_context(self, message: str) -> bool: """ Whether or not the model has enough context remaining to add this message. Will take token buffer into account and uses full_message=True. """ - ctx = SESSION_CONTEXT.get() remaining_context = self.remaining_context() return ( remaining_context is not None and remaining_context - - count_tokens(message, ctx.config.model, full_message=True) - - ctx.config.token_buffer + - count_tokens(message, config.ai.model, full_message=True) + - config.ai.token_buffer > 0 ) diff --git a/mentat/git_handler.py b/mentat/git_handler.py index a033126d8..c175c7358 100644 --- a/mentat/git_handler.py +++ b/mentat/git_handler.py @@ -67,6 +67,8 @@ def get_paths_with_git_diffs(git_root: Path) -> set[Path]: def get_git_root_for_path(path: Path, raise_error: bool = True) -> Optional[Path]: + + if os.path.isdir(path): dir_path = path else: diff --git a/mentat/include_files.py b/mentat/include_files.py index 2f7ad8454..8655253f5 100644 --- a/mentat/include_files.py +++ b/mentat/include_files.py @@ -12,6 +12,7 @@ from mentat.interval import parse_intervals, split_intervals_from_path from mentat.session_context import SESSION_CONTEXT +from rich import print # TODO: replace this with something that doesn't load the file into memory def is_file_text_encoded(abs_path: Path): @@ -333,16 +334,18 @@ def print_path_tree( keys = list(tree.keys()) for i, key in enumerate(sorted(keys)): + cur = cur_path / key + star = "* " if cur in changed_files else "" + color = "green" if star else ("blue" if tree[key] else "default") + if i < len(keys) - 1: new_prefix = prefix + "│ " - stream.send(f"{prefix}├── ", end="") + # stream.send(f"{prefix}├── ", end="") + print(f"{prefix}├── [{color}]{star}{key}:[/{color}]") else: new_prefix = prefix + " " - stream.send(f"{prefix}└── ", end="") + # stream.send(f"{prefix}└── ", end="") + print(f"{prefix}└── [{color}]{star}{key}:[/{color}]") - cur = cur_path / key - star = "* " if cur in changed_files else "" - color = "green" if star else ("blue" if tree[key] else None) - stream.send(f"{star}{key}", color=color) if tree[key]: print_path_tree(tree[key], changed_files, cur, new_prefix) diff --git a/mentat/llm_api_handler.py b/mentat/llm_api_handler.py index 4947c554f..c43ab0c32 100644 --- a/mentat/llm_api_handler.py +++ b/mentat/llm_api_handler.py @@ -168,11 +168,9 @@ def model_price_per_1000_tokens(model: str) -> Optional[tuple[float, float]]: def get_max_tokens() -> Optional[int]: - session_context = SESSION_CONTEXT.get() - config = session_context.config - - context_size = model_context_size(config.model) - maximum_context = config.maximum_context + from mentat.config import config + context_size = model_context_size(config.ai.model) + maximum_context = config.ai.maximum_context if maximum_context is not None: if context_size: return min(context_size, maximum_context) @@ -230,8 +228,8 @@ async def call_llm_api( stream: bool, response_format: ResponseFormat = ResponseFormat(type="text"), ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: - session_context = SESSION_CONTEXT.get() - config = session_context.config + from mentat.config import config + with sentry_sdk.start_span(description="LLM Call") as span: span.set_tag("model", model) @@ -242,7 +240,7 @@ async def call_llm_api( response = await self.async_client.chat.completions.create( model=model, messages=messages, - temperature=config.temperature, + temperature=config.ai.temperature, stream=stream, max_tokens=4096, ) @@ -250,7 +248,7 @@ async def call_llm_api( response = await self.async_client.chat.completions.create( model=model, messages=messages, - temperature=config.temperature, + temperature=config.ai.temperature, stream=stream, response_format=response_format, ) diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml new file mode 100644 index 000000000..358a8cd40 --- /dev/null +++ b/mentat/resources/conf/.mentatconf.yaml @@ -0,0 +1,29 @@ +# This field is for specifying the model name. You can find the list of valid options at https://platform.openai.com/docs/models/overview +model: gpt-4 + +# For models other than gpt-3.5 and gpt-4, the model's context size can't be inferred. +# In such cases, you need to specify the maximum context manually. +maximum_context: 16000 + +# This list contains glob patterns. Mentat uses these patterns to exclude certain files when provided with a directory argument. +# Mentat considers all files that do not match your .gitignore file and these patterns. +# Glob patterns are interpreted from the git root location, so if you want to exclude all .py files, use "**/*.py" instead of "*.py". +# This example excludes all hidden files and directories: +file_exclude_glob_list: + - "**/.*" + - "**/.*/**" + +# This section contains key-value pairs for defining a custom Pygment Style for the Mentat prompt. +input_style: + - - "" + - "#9835bd" + - - "prompt" + - "#ffffff bold" + - - "continuation" + - "#ffffff bold" + +# Mentat parses files following a specific format, which you can set here. +# Multiple formats are available, though the default one is expected to be the best fit for most cases. +# You can experiment with different formats as per your need. +# Available formats include: block, replacement, unified-diff. +format: block \ No newline at end of file diff --git a/mentat/session.py b/mentat/session.py index c0f938ba4..a14c04265 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -16,7 +16,7 @@ from mentat.code_context import CodeContext from mentat.code_edit_feedback import get_user_feedback_on_edits from mentat.code_file_manager import CodeFileManager -from mentat.config import Config +from mentat.config import config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.ctags import ensure_ctags_installed @@ -39,6 +39,8 @@ class Session: A message will be sent on the client_exit channel when ready for client to quit. """ + _errors = [] + def __init__( self, cwd: Path, @@ -47,7 +49,6 @@ def __init__( ignore_paths: List[Path] = [], diff: Optional[str] = None, pr_diff: Optional[str] = None, - config: Config = Config(), ): # All errors thrown here need to be caught here self.stopped = False @@ -101,7 +102,7 @@ def __init__( # Functions that require session_context check_version() - config.send_errors_to_stream() + self.send_errors_to_stream() for path in paths: code_context.include(path, exclude_patterns=exclude_paths) @@ -126,7 +127,7 @@ async def _main(self): agent_handler = session_context.agent_handler # check early for ctags so we can fail fast - if session_context.config.auto_context: + if config.run.auto_context: ensure_ctags_installed() session_context.llm_api_handler.initialize_client() @@ -215,7 +216,8 @@ async def run_main(): with sentry_sdk.start_transaction( op="mentat_started", name="Mentat Started" ) as transaction: - transaction.set_tag("config", attr.asdict(ctx.config)) + #TODO: check if we need this as config should be gloabl now + #transaction.set_tag("config", attr.asdict(config)) await self._main() except (SessionExit, CancelledError): pass @@ -263,3 +265,10 @@ async def _stop(self): self.stream.send(None, channel="client_exit") await self.stream.join() self.stream.stop() + + def send_errors_to_stream(self): + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + for error in self._errors: + stream.send(error, color="light_yellow") + self._errors = [] \ No newline at end of file diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 629653e89..f6c07195e 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -11,7 +11,7 @@ from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent from prompt_toolkit.styles import Style -from mentat.config import Config +from mentat.config import config, update_config from mentat.session import Session from mentat.session_stream import StreamMessageSource from mentat.terminal.loading import LoadingHandler @@ -19,6 +19,44 @@ from mentat.terminal.prompt_completer import MentatCompleter from mentat.terminal.prompt_session import MentatPromptSession +from typing import List +from pathlib import Path + +import anyio +import inspect +import typer +from functools import partial, wraps +from typer import Typer + +from mentat.utils import dd +from asyncio import run as aiorun + +class AsyncTyper(Typer): + @staticmethod + def maybe_run_async(decorator, f): + if inspect.iscoroutinefunction(f): + + @wraps(f) + def runner(*args, **kwargs): + return asyncio.run(f(*args, **kwargs)) + + decorator(runner) + else: + decorator(f) + return f + + def callback(self, *args, **kwargs): + decorator = super().callback(*args, **kwargs) + return partial(self.maybe_run_async, decorator) + + def command(self, *args, **kwargs): + decorator = super().command(*args, **kwargs) + return partial(self.maybe_run_async, decorator) + + +app = AsyncTyper() + + class TerminalClient: def __init__( @@ -28,8 +66,7 @@ def __init__( exclude_paths: List[str] = [], ignore_paths: List[str] = [], diff: str | None = None, - pr_diff: str | None = None, - config: Config = Config(), + pr_diff: str | None = None ): self.cwd = cwd self.paths = [Path(path) for path in paths] @@ -142,15 +179,14 @@ async def _run(self): self.exclude_paths, self.ignore_paths, self.diff, - self.pr_diff, - self.config, + self.pr_diff ) self.session.start() mentat_completer = MentatCompleter(self.session.stream) self._prompt_session = MentatPromptSession( completer=mentat_completer, - style=Style(self.config.input_style), + style=Style(self.config.ui.input_style), enable_suspend=True, ) @@ -166,7 +202,7 @@ def _(event: KeyPressEvent): self._plain_session = PromptSession[str]( message=[("class:prompt", ">>> ")], - style=Style(self.config.input_style), + style=Style(self.config.ui.input_style), completer=None, key_bindings=plain_bindings, enable_suspend=True, @@ -190,68 +226,37 @@ async def _shutdown(self): task.cancel() self._stopped.set() - def run(self): - asyncio.run(self._run()) - -def run_cli(): - parser = argparse.ArgumentParser( - description="Run conversation with command line args" - ) - parser.add_argument( - "paths", - nargs="*", - default=[], - help="List of file paths, directory paths, or glob patterns", - ) - parser.add_argument( - "--exclude", - "-e", - nargs="*", - default=[], - help="List of file paths, directory paths, or glob patterns to exclude", - ) - parser.add_argument( - "--ignore", - "-g", - nargs="*", - default=[], - help=( - "List of file paths, directory paths, or glob patterns to ignore in" - " auto-context" - ), - ) - parser.add_argument( - "--diff", - "-d", - nargs="?", - type=str, - default=None, - const="HEAD", - help="A git tree-ish (e.g. commit, branch, tag) to diff against", - ) - parser.add_argument( - "--pr-diff", - "-p", - type=str, - default=None, - help="A git tree-ish to diff against the latest common ancestor of", - ) - parser.add_argument( - "--cwd", default=Path.cwd(), help="The current working directory" - ) - - Config.add_fields_to_argparse(parser) - args = parser.parse_args() - - cwd = args.cwd - paths = args.paths - exclude_paths = args.exclude - ignore_paths = args.ignore - diff = args.diff - pr_diff = args.pr_diff - - config = Config.create(cwd, args) +@app.command() +async def async_hello(name: str, last_name: str = "") -> None: + await anyio.sleep(1) + typer.echo(f"Hello World {name} {last_name}") + + +@app.command() +def start(paths: List[str] = typer.Argument(...), + exclude_paths: List[str] = typer.Option([], "--exclude-paths", "-e", help="List of file paths, directory paths, or glob patterns to exclude"), + ignore_paths: List[str] = typer.Option([], "--ignore-paths", "-g", help="List of file paths, directory paths, or glob patterns to ignore in auto-context"), + diff: str = typer.Option(None, "--diff", "-d", show_default='HEAD', help="A git tree-ish (e.g. commit, branch, tag) to diff against"), + pr_diff: str = typer.Option(None, "--pr-diff", "-p", help="A git tree-ish to diff against the latest common ancestor of"), + cwd: Path = typer.Option(Path.cwd(), "--cwd", help="The current working directory")) -> None: + # Check if these variables are set and pass them to update_config function as kwargs + kwargs = {} + if paths: + kwargs["paths"] = paths + if exclude_paths: + kwargs["exclude"] = exclude_paths + if ignore_paths: + kwargs["ignore"] = ignore_paths + if diff: + kwargs["diff"] = diff + if pr_diff: + kwargs["pr_diff"] = pr_diff + if cwd: + kwargs["cwd"] = cwd + update_config(**kwargs) + + cwd = Path(cwd).expanduser().resolve() terminal_client = TerminalClient( cwd, @@ -259,7 +264,11 @@ def run_cli(): exclude_paths, ignore_paths, diff, - pr_diff, - config, + pr_diff ) - terminal_client.run() + asyncio.run(terminal_client._run()) + + + +if __name__ == "__main__": + typer.run(start()) \ No newline at end of file diff --git a/mentat/utils.py b/mentat/utils.py index 46c3d7f31..685993b36 100644 --- a/mentat/utils.py +++ b/mentat/utils.py @@ -17,6 +17,9 @@ from mentat import __version__ from mentat.session_context import SESSION_CONTEXT +import pprint +import sys + if TYPE_CHECKING: from mentat.transcripts import Transcript @@ -174,3 +177,37 @@ def get_relative_path(path: Path, target: Path) -> Path: relative_path = Path(*relative_parts) return relative_path + + +def dd(args): + """ + This method dd takes an argument args and performs the following operations: + + 1. Checks if any arguments are provided. If not, raises a ValueError with the message "No args provided". + + 2. Prints the argument args in a pretty format using pprint.pprint(). + + 3. Handles any exception that might occur, and prints the exception message. + + 4. Finally, exits the program using sys.exit(). + + Note: This method does not return any value. + + Example usage: + args = [1, 2, 3] + dd(args) + """ + try: + # Throw an exception if needed + if not args: + raise ValueError("No args provided") + + # Pretty print the argument + pprint.pprint(args) + + except Exception as e: + print(f"Exception occurred: {e}") + + finally: + # Exit the program + sys.exit() \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 622e5030b..58b613e7f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -561,6 +561,30 @@ files = [ [package.dependencies] referencing = ">=0.31.0" +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + [[package]] name = "markupsafe" version = "2.1.3" @@ -650,6 +674,17 @@ docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "s lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"] tests = ["pytest", "pytz", "simplejson"] +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1224,6 +1259,65 @@ files = [ [package.extras] cli = ["click (>=5.0)"] +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + [[package]] name = "referencing" version = "0.32.0" @@ -1357,6 +1451,24 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rich" +version = "13.7.0" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"}, + {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + [[package]] name = "rpds-py" version = "0.15.2" @@ -1782,6 +1894,27 @@ exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} trio = ">=0.11" wsproto = ">=0.14" +[[package]] +name = "typer" +version = "0.9.0" +description = "Typer, build great CLIs. Easy to code. Based on Python type hints." +optional = false +python-versions = ">=3.6" +files = [ + {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, + {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, +] + +[package.dependencies] +click = ">=7.1.1,<9.0.0" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] +dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] +doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] +test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] + [[package]] name = "typing-extensions" version = "4.8.0" @@ -1871,4 +2004,4 @@ h11 = ">=0.9.0,<1" [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "dd5d00a56644e15b6cfe755270cec2b5a08e6cb4d6cd2398bd4100d929a10816" +content-hash = "dcf5c2cc18dc9f6239768f26060f311daed588b822dec3082662955ac3fc8506" diff --git a/pyproject.toml b/pyproject.toml index 3be47b9e9..3bcfd2c94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ authors = ["bio_bootloader "] readme = "README.md" [tool.poetry.scripts] -mentat = 'mentat.terminal.client:run_cli' +mentat = 'mentat.terminal.client:app' [tool.poetry.dependencies] python = "^3.10" @@ -35,6 +35,9 @@ typing_extensions = "4.8.0" tqdm = "4.66.1" webdriver_manager = "4.0.1" dataclasses-json = "^0.6.3" +pyyaml = "^6.0.1" +rich = "^13.7.0" +typer = "^0.9.0" [tool.poetry.group.dev.dependencies] aiomultiprocess = "^0.9.0" From c2a935e0b39ae46c44ccc4d8aecfc62f2ade016a Mon Sep 17 00:00:00 2001 From: Greg L Date: Tue, 26 Dec 2023 20:47:41 -0500 Subject: [PATCH 10/24] Refactor prompt configuration and improve output formatting The prompt type for each parser has been moved from hardcoded value within the parser to a centralized configuration through 'mentat.config.py'. This improves maintainability by centralizing configuration. Additionally, the use of 'rich' library has been expanded in 'session.py' and 'code_context.py' to improve readability and color-coding of messages. --- mentat/agent_handler.py | 5 ++- mentat/code_context.py | 22 ++++------- mentat/config.py | 39 ++++++++++++++++--- mentat/feature_filters/llm_feature_filter.py | 3 +- mentat/parsers/block_parser.py | 4 +- mentat/parsers/json_parser.py | 3 +- mentat/parsers/replacement_parser.py | 4 +- mentat/parsers/unified_diff_parser.py | 3 +- .../agent_command_selection_prompt.txt | 0 .../agent_file_selection_prompt.txt | 0 .../block_parser_prompt.txt | 0 .../feature_selection_prompt.txt | 0 .../json_parser_prompt.txt | 0 .../replacement_parser_prompt.txt | 0 .../unified_diff_parser_prompt.txt | 0 mentat/session.py | 8 ++-- 16 files changed, 58 insertions(+), 33 deletions(-) rename mentat/resources/prompts/{plain_text => text}/agent_command_selection_prompt.txt (100%) rename mentat/resources/prompts/{plain_text => text}/agent_file_selection_prompt.txt (100%) rename mentat/resources/prompts/{plain_text => text}/block_parser_prompt.txt (100%) rename mentat/resources/prompts/{plain_text => text}/feature_selection_prompt.txt (100%) rename mentat/resources/prompts/{plain_text => text}/json_parser_prompt.txt (100%) rename mentat/resources/prompts/{plain_text => text}/replacement_parser_prompt.txt (100%) rename mentat/resources/prompts/{plain_text => text}/unified_diff_parser_prompt.txt (100%) diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index e10f6cad5..6dcb83912 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -15,9 +15,10 @@ from mentat.session_context import SESSION_CONTEXT from mentat.session_input import ask_yes_no, collect_user_input from mentat.transcripts import ModelMessage +from mentat.config import config -agent_file_selection_prompt_path = Path("markdown/agent_file_selection_prompt.md") -agent_command_prompt_path = Path("markdown/agent_command_selection_prompt.md") +agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt") +agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt") class AgentHandler: diff --git a/mentat/code_context.py b/mentat/code_context.py index 6cf05de79..12bd28546 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -66,12 +66,11 @@ def __init__( def display_context(self): """Display the baseline context: included files and auto-context settings""" session_context = SESSION_CONTEXT.get() - stream = session_context.stream print("[blue]Code Context:[/blue]") prefix = " " - stream.send(f"{prefix}Directory: {session_context.cwd}") + print(f"{prefix}Directory: {session_context.cwd}") if self.diff_context and self.diff_context.name: print(f"{prefix}Diff: [green]{self.diff_context.get_display_context()}[/green]") @@ -83,11 +82,11 @@ def display_context(self): features = None if self.features: - stream.send(f"{prefix}Active Features:") + print(f"{prefix}Active Features:") features = self.features elif self.include_files: - stream.send(f"{prefix}Included files:") - stream.send(f"{prefix + prefix}{session_context.cwd.name}") + print(f"{prefix}Included files:") + print(f"{prefix + prefix}{session_context.cwd.name}") features = [ _feat for _file in self.include_files.values() for _feat in _file ] @@ -354,7 +353,7 @@ def include( exclude_patterns=abs_exclude_patterns, ) except PathValidationError as e: - session_context.stream.send(str(e), color="light_red") + print(f"[red]{str(e)}[/red]") return included_paths for code_feature in code_features: @@ -382,20 +381,15 @@ def _exclude_file(self, path: Path) -> Path | None: del self.include_files[path] return path else: - session_context.stream.send( - f"Path {path} not in context", color="light_red" - ) + print(f"[red]Path {path} not in context[/red]") def _exclude_file_interval(self, path: Path) -> Set[Path]: - session_context = SESSION_CONTEXT.get() excluded_paths: Set[Path] = set() interval_path, interval_str = split_intervals_from_path(path) if interval_path not in self.include_files: - session_context.stream.send( - f"Path {interval_path} not in context", color="light_red" - ) + print(f"[red]Path {interval_path} not in context[/red]") return excluded_paths intervals = parse_intervals(interval_str) @@ -478,7 +472,7 @@ def exclude(self, path: Path | str) -> Set[Path]: case PathType.GLOB: excluded_paths.update(self._exclude_glob(validated_path)) except PathValidationError as e: - session_context.stream.send(str(e), color="light_red") + print(f"[red]Path {str(e)}[/red]") return excluded_paths diff --git a/mentat/config.py b/mentat/config.py index a4192d210..d812b2549 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -5,17 +5,15 @@ import yaml import shutil -from dataclasses import asdict - from mentat.git_handler import get_git_root_for_path from mentat.parsers.parser_map import parser_map from mentat.parsers.block_parser import BlockParser -from mentat.utils import mentat_dir_path, dd +from mentat.utils import mentat_dir_path from dataclasses import dataclass, field from dataclasses_json import DataClassJsonMixin -from typing import Optional, List, Tuple +from typing import Tuple from mentat.parsers.parser import Parser -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional +from typing import Any, Dict, List, Optional config_file_name = Path(".mentat_config.yaml") @@ -45,6 +43,7 @@ class AIModelSettings(DataClassJsonMixin): model: str = "gpt-4-1106-preview" feature_selection_model: str = "gpt-4-1106-preview" embedding_model: str = "text-embedding-ada-002" + prompts: Dict[str, Path] = None temperature: float = 0.2 maximum_context: Optional[int] = None @@ -93,6 +92,7 @@ def yaml_to_config(yaml_dict: dict): return { "model": yaml_dict.get("model"), + "prompt_type": yaml_dict.get("prompt_type", "text"), "maximum_context": yaml_dict.get("maximum_context"), "file_exclude_glob_list": yaml_dict.get("file_exclude_glob_list", []), "input_style": yaml_dict.get("input_style"), @@ -108,6 +108,27 @@ def init_config(): shutil.copy(default_conf_path, current_conf_path) +def load_prompts(prompt_type: str): + + if prompt_type == "markdown": + return { + "agent_file_selection_prompt" : Path("markdown/agent_file_selection_prompt.md"), + "agent_command_selection_prompt" : Path("markdown/agent_command_selection_prompt.md"), + "block_parser_prompt" : Path("markdown/block_parser_prompt.md"), + "feature_selection_prompt" : Path("markdown/feature_selection_prompt.md"), + "replacement_parser_prompt" : Path("markdown/replacement_parser_prompt.md"), + "unified_diff_parser_prompt" : Path("markdown/unified_diff_parser_prompt.md"), + } + + return { + "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), + "agent_command_prompt": Path("text/agent_command_selection_prompt.txt"), + "block_parser_prompt": Path("text/block_parser_prompt.txt"), + "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), + "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), + "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), + } + def load_settings(): """Load the configuration from the `.mentatconf.yaml` file.""" @@ -134,8 +155,13 @@ def load_settings(): current_path_config = yaml_to_config(yaml_dict) yaml_config = merge_configs(yaml_config, current_path_config) + file_exclude_glob_list = yaml_config.get("file_exclude_glob_list", []) + + #always ignore .mentatconf + file_exclude_glob_list.append(".mentatconf.yaml") + run_settings = RunSettings( - file_exclude_glob_list=[Path(p) for p in yaml_config.get("file_exclude_glob_list", [])] + file_exclude_glob_list=[Path(p) for p in file_exclude_glob_list] ) ui_settings = UISettings( @@ -144,6 +170,7 @@ def load_settings(): ai_model_settings = AIModelSettings( model=yaml_config.get("model", "gpt-4-1106-preview"), + prompts=load_prompts(yaml_config.get("prompt_type", "text")), feature_selection_model=yaml_config.get("model", "gpt-4-1106-preview"), maximum_context=yaml_config.get("maximum_context", 16000) ) diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index 4b0b0d6dd..eeb8aab9c 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -13,6 +13,7 @@ CodeMessageLevel, get_code_message_from_features, ) +from mentat.config import config from mentat.errors import ModelError, UserError from mentat.feature_filters.feature_filter import FeatureFilter from mentat.feature_filters.truncate_filter import TruncateFilter @@ -23,7 +24,7 @@ class LLMFeatureFilter(FeatureFilter): - feature_selection_prompt_path = Path("markdown/feature_selection_prompt.md") + feature_selection_prompt_path = config.ai.prompts.get("feature_selection_prompt") def __init__( self, diff --git a/mentat/parsers/block_parser.py b/mentat/parsers/block_parser.py index 8d6cdcef1..649216a97 100644 --- a/mentat/parsers/block_parser.py +++ b/mentat/parsers/block_parser.py @@ -14,8 +14,6 @@ from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT -block_parser_prompt_filename = Path("markdown/block_parser_prompt.md") - class _BlockParserAction(Enum): Insert = "insert" @@ -71,6 +69,8 @@ def __init__(self, json_data: dict[str, Any]): class BlockParser(Parser): @override def get_system_prompt(self) -> str: + from mentat.config import config + block_parser_prompt_filename = config.ai.prompts.get("block_parser_prompt") return read_prompt(block_parser_prompt_filename) @override diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index a70d40a45..cfd26bc41 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -19,7 +19,6 @@ from mentat.session_context import SESSION_CONTEXT from mentat.streaming_printer import StreamingPrinter -json_parser_prompt_filename = Path("markdown/json_parser_prompt.md") comment_schema = { "type": "object", @@ -84,6 +83,8 @@ class JsonParser(Parser): @override def get_system_prompt(self) -> str: + from mentat.config import config + json_parser_prompt_filename = config.ai.prompts.get("json_parser_prompt") return read_prompt(json_parser_prompt_filename) @override diff --git a/mentat/parsers/replacement_parser.py b/mentat/parsers/replacement_parser.py index 93917d74c..1adbcf68b 100644 --- a/mentat/parsers/replacement_parser.py +++ b/mentat/parsers/replacement_parser.py @@ -10,12 +10,12 @@ from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT -replacement_parser_prompt_filename = Path("markdown/replacement_parser_prompt.md") - class ReplacementParser(Parser): @override def get_system_prompt(self) -> str: + from mentat.config import config + replacement_parser_prompt_filename = config.ai.prompts.get("replacement_parser_prompt") return read_prompt(replacement_parser_prompt_filename) @override diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index 3a47696b3..bc0642850 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -16,7 +16,6 @@ from mentat.parsers.parser import Parser from mentat.prompts.prompts import read_prompt -unified_diff_parser_prompt_filename = Path("markdown/unified_diff_parser_prompt.md") class UnifiedDiffDelimiter(Enum): @@ -29,6 +28,8 @@ class UnifiedDiffDelimiter(Enum): class UnifiedDiffParser(Parser): @override def get_system_prompt(self) -> str: + from mentat.config import config + unified_diff_parser_prompt_filename = config.ai.prompts.get("unified_diff_parser_prompt") return read_prompt(unified_diff_parser_prompt_filename) @override diff --git a/mentat/resources/prompts/plain_text/agent_command_selection_prompt.txt b/mentat/resources/prompts/text/agent_command_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/agent_command_selection_prompt.txt rename to mentat/resources/prompts/text/agent_command_selection_prompt.txt diff --git a/mentat/resources/prompts/plain_text/agent_file_selection_prompt.txt b/mentat/resources/prompts/text/agent_file_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/agent_file_selection_prompt.txt rename to mentat/resources/prompts/text/agent_file_selection_prompt.txt diff --git a/mentat/resources/prompts/plain_text/block_parser_prompt.txt b/mentat/resources/prompts/text/block_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/block_parser_prompt.txt rename to mentat/resources/prompts/text/block_parser_prompt.txt diff --git a/mentat/resources/prompts/plain_text/feature_selection_prompt.txt b/mentat/resources/prompts/text/feature_selection_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/feature_selection_prompt.txt rename to mentat/resources/prompts/text/feature_selection_prompt.txt diff --git a/mentat/resources/prompts/plain_text/json_parser_prompt.txt b/mentat/resources/prompts/text/json_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/json_parser_prompt.txt rename to mentat/resources/prompts/text/json_parser_prompt.txt diff --git a/mentat/resources/prompts/plain_text/replacement_parser_prompt.txt b/mentat/resources/prompts/text/replacement_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/replacement_parser_prompt.txt rename to mentat/resources/prompts/text/replacement_parser_prompt.txt diff --git a/mentat/resources/prompts/plain_text/unified_diff_parser_prompt.txt b/mentat/resources/prompts/text/unified_diff_parser_prompt.txt similarity index 100% rename from mentat/resources/prompts/plain_text/unified_diff_parser_prompt.txt rename to mentat/resources/prompts/text/unified_diff_parser_prompt.txt diff --git a/mentat/session.py b/mentat/session.py index a14c04265..5c6468f70 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -7,7 +7,7 @@ from typing import Any, Coroutine, List, Optional, Set from uuid import uuid4 -import attr +from rich import print import sentry_sdk from openai import APITimeoutError, BadRequestError, RateLimitError @@ -222,7 +222,7 @@ async def run_main(): except (SessionExit, CancelledError): pass except (MentatError, UserError) as e: - self.stream.send(str(e), color="red") + print(f"[red]{str(e)}[/red]") except Exception as e: # All unhandled exceptions end up here error = f"Unhandled Exception: {traceback.format_exc()}" @@ -230,7 +230,7 @@ async def run_main(): if is_test_environment(): print(error) sentry_sdk.capture_exception(e) - self.stream.send(error, color="red") + print(f"[red]{str(e)}[/red]") finally: await self._stop() sentry_sdk.flush() @@ -270,5 +270,5 @@ def send_errors_to_stream(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream for error in self._errors: - stream.send(error, color="light_yellow") + print(f"[light_yellow3]{error}[/light_yellow3]") self._errors = [] \ No newline at end of file From eb9725ddc7de1f1cba6168290ce2511d1c8deb3d Mon Sep 17 00:00:00 2001 From: Greg L Date: Wed, 27 Dec 2023 07:36:08 -0500 Subject: [PATCH 11/24] Refactor code to use global config object The code has been refactored to use a global configuration object instead of a local one. This change standardizes how the config is accessed across multiple modules and simplifies the code by reducing redundant variable assignments. Along with this, color print formatting has been updated to use the 'rich' module's syntax. --- mentat/agent_handler.py | 31 +++++++++++-------------------- mentat/code_context.py | 20 +++++++++----------- mentat/config.py | 2 ++ mentat/session.py | 30 +++++++++++++----------------- 4 files changed, 35 insertions(+), 48 deletions(-) diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index 7cd6b3ce0..c204bcae1 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -15,6 +15,7 @@ from mentat.session_input import ask_yes_no, collect_user_input from mentat.transcripts import ModelMessage from mentat.config import config +from rich import print agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt") agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt") @@ -39,9 +40,7 @@ def disable_agent_mode(self): async def enable_agent_mode(self): ctx = SESSION_CONTEXT.get() - ctx.stream.send( - "Finding files to determine how to test changes...", color="cyan" - ) + print(f"* [cyan]Finding files to determine how to test changes...[/cyan]") features = ctx.code_context.get_all_features(split_intervals=False) messages: List[ChatCompletionMessageParam] = [ ChatCompletionSystemMessageParam( @@ -66,11 +65,8 @@ async def enable_agent_mode(self): file_contents = "\n\n".join(ctx.code_file_manager.read_file(path)) self.agent_file_message += f"{path}\n\n{file_contents}" - ctx.stream.send( - "The model has chosen these files to help it determine how to test its" - " changes:", - color="cyan", - ) + print(f"[cyan]The model has chosen these files to help it determine how to test its changes:[/cyan]") + ctx.stream.send("\n".join(str(path) for path in paths)) ctx.cost_tracker.display_last_api_call() @@ -107,7 +103,7 @@ async def _determine_commands(self) -> List[str]: response = await ctx.llm_api_handler.call_llm_api(messages, model, False) ctx.cost_tracker.display_last_api_call() except BadRequestError as e: - ctx.stream.send(f"Error accessing OpenAI API: {e.message}", color="red") + print(f"[red]Error accessing OpenAI API: {e.message}[/red]") return [] content = response.choices[0].message.content or "" @@ -129,20 +125,15 @@ async def add_agent_context(self) -> bool: commands = await self._determine_commands() if not commands: return True - ctx.stream.send( - "The model has chosen these commands to test its changes:", color="cyan" - ) + print(f"[cyan]The model has chosen these commands to test its changes:[/cyan]") + for command in commands: - ctx.stream.send("* ", end="") - ctx.stream.send(command, color="light_yellow") - ctx.stream.send("Run these commands?", color="cyan") + print(f"* [yellow]{command}[/yellow]") + + print(f"* [cyan]Run these commands?[/cyan]") run_commands = await ask_yes_no(default_yes=True) if not run_commands: - ctx.stream.send( - "Enter a new-line separated list of commands to run, or nothing to" - " return control to the user:", - color="cyan", - ) + print(f"* [cyan]Enter a new-line separated list of commands to run, or nothing to return control to the user:[/cyan]") commands: list[str] = (await collect_user_input()).data.strip().splitlines() if not commands: return True diff --git a/mentat/code_context.py b/mentat/code_context.py index ec4d20798..82fd46f30 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -30,6 +30,7 @@ from mentat.llm_api_handler import count_tokens, get_max_tokens, is_context_sufficient from mentat.session_context import SESSION_CONTEXT from mentat.session_stream import SessionStream +from mentat.config import config class CodeContext: @@ -60,7 +61,6 @@ def display_context(self): """Display the baseline context: included files and auto-context settings""" session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config stream.send("Code Context:", color="blue") prefix = " " @@ -69,9 +69,9 @@ def display_context(self): stream.send(f"{prefix}Diff:", end=" ") stream.send(self.diff_context.get_display_context(), color="green") - if config.auto_context_tokens > 0: + if config.run.auto_context_tokens > 0: stream.send(f"{prefix}Auto-Context: Enabled") - stream.send(f"{prefix}Auto-Context Tokens: {config.auto_context_tokens}") + stream.send(f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}") else: stream.send(f"{prefix}Auto-Context: Disabled") @@ -120,9 +120,7 @@ async def get_code_message( 'prompt_tokens' argument is the total number of tokens used by the prompt before the code message, used to ensure that the code message won't overflow the model's context size """ - session_context = SESSION_CONTEXT.get() - config = session_context.config - model = config.model + model = config.ai.model # Setup code message metadata code_message = list[str]() @@ -151,14 +149,14 @@ async def get_code_message( ) tokens_used = ( - prompt_tokens + meta_tokens + include_files_tokens + config.token_buffer + prompt_tokens + meta_tokens + include_files_tokens + config.ai.token_buffer ) if not is_context_sufficient(tokens_used): raise ContextSizeInsufficient() - auto_tokens = min(get_max_tokens() - tokens_used, config.auto_context_tokens) + auto_tokens = min(get_max_tokens() - tokens_used, config.run.auto_context_tokens) # Get auto included features - if config.auto_context_tokens > 0 and prompt: + if config.run.auto_context_tokens > 0 and prompt: features = self.get_all_features() feature_filter = DefaultFilter( auto_tokens, @@ -190,7 +188,7 @@ def get_all_features( abs_exclude_patterns: Set[Path] = set() for pattern in self.ignore_patterns.union( - session_context.config.file_exclude_glob_list + config.run.file_exclude_glob_list ): if not Path(pattern).is_absolute(): abs_exclude_patterns.add(session_context.cwd / pattern) @@ -278,7 +276,7 @@ def include( [ *exclude_patterns, *self.ignore_patterns, - *session_context.config.file_exclude_glob_list, + *config.run.file_exclude_glob_list, ] ) for pattern in all_exclude_patterns: diff --git a/mentat/config.py b/mentat/config.py index d812b2549..d2bc93be6 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -37,6 +37,8 @@ class RunSettings(DataClassJsonMixin): file_exclude_glob_list: List[Path] = field(default_factory=list) auto_context: bool = False auto_tokens: int = 8000 + #Automatically selects code files for every request to include in context. Adds this many tokens to context each request. + auto_context_tokens: int = 0 @dataclass() class AIModelSettings(DataClassJsonMixin): diff --git a/mentat/session.py b/mentat/session.py index 219833c85..12aaaf591 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -16,11 +16,11 @@ from mentat.code_context import CodeContext from mentat.code_edit_feedback import get_user_feedback_on_edits from mentat.code_file_manager import CodeFileManager -from mentat.config import Config +from mentat.config import config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.ctags import ensure_ctags_installed -from mentat.errors import MentatError, SessionExit, UserError +from mentat.errors import MentatError, SessionExit, UserError, ContextSizeInsufficient from mentat.git_handler import get_git_root_for_path from mentat.llm_api_handler import LlmApiHandler, is_test_environment from mentat.logging_config import setup_logging @@ -47,7 +47,6 @@ def __init__( ignore_paths: List[Path] = [], diff: Optional[str] = None, pr_diff: Optional[str] = None, - config: Config = Config(), ): # All errors thrown here need to be caught here self.stopped = False @@ -126,15 +125,14 @@ async def _main(self): agent_handler = session_context.agent_handler # check early for ctags so we can fail fast - if config.run.auto_context: - if session_context.config.auto_context_tokens > 0: + if config.run.auto_context_tokens > 0: ensure_ctags_installed() session_context.llm_api_handler.initialize_client() code_context.display_context() await conversation.display_token_count() - stream.send("Type 'q' or use Ctrl-C to quit at any time.") + print(f"Type 'q' or use Ctrl-C to quit at any time.") need_user_request = True while True: try: @@ -143,12 +141,9 @@ async def _main(self): # edits made between user input to be collected together. if agent_handler.agent_enabled: code_file_manager.history.push_edits() - stream.send( - "Use /undo to undo all changes from agent mode since last" - " input.", - color="green", - ) - stream.send("\nWhat can I do for you?", color="light_blue") + print(f"[green]Use /undo to undo all changes from agent mode since last input.[/green]") + + print(f"[blue]What can I do for you?[/blue]") message = await collect_input_with_commands() if message.data.strip() == "": continue @@ -174,10 +169,11 @@ async def _main(self): applied_edits = await code_file_manager.write_changes_to_files( file_edits ) - stream.send( - "Changes applied." if applied_edits else "No changes applied.", - color="light_blue", - ) + + if applied_edits: + print(f"[blue]Changes applied.[/blue]") + else: + print(f"[blue]No Changes applied.[/blue]") if agent_handler.agent_enabled: if parsed_llm_response.interrupted: @@ -193,7 +189,7 @@ async def _main(self): need_user_request = True continue except (APITimeoutError, RateLimitError, BadRequestError) as e: - stream.send(f"Error accessing OpenAI API: {e.message}", color="red") + print(f"[red]Error accessing OpenAI API: {e.message}[/red]") break async def listen_for_session_exit(self): From f8da6ba668315edc8b9f301f99d1364c720e5ab2 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Wed, 27 Dec 2023 11:22:49 -0500 Subject: [PATCH 12/24] Refactor config import and usage across multiple files This commit removes 'config' import from 'mentat/session_context.py' and adds it directly in other files where it's used, refactoring relevant lines accordingly. This change cleans up the codebase and makes config usage more immediate and intuitive. Additionally, minor code adjustments were made in several other files for consistency and readability. --- mentat/agent_handler.py | 3 +- mentat/code_feature.py | 3 +- mentat/config.py | 4 +- mentat/feature_filters/llm_feature_filter.py | 2 - mentat/llm_api_handler.py | 11 +++--- mentat/resources/conf/.mentatconf.yaml | 5 ++- mentat/session.py | 41 ++++++++++++-------- mentat/session_context.py | 2 - 8 files changed, 41 insertions(+), 30 deletions(-) diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index c204bcae1..ae5ac5e3c 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -17,10 +17,11 @@ from mentat.config import config from rich import print +from mentat.utils import dd + agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt") agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt") - class AgentHandler: def __init__(self): self._agent_enabled = False diff --git a/mentat/code_feature.py b/mentat/code_feature.py index 93f544197..74f8819ab 100644 --- a/mentat/code_feature.py +++ b/mentat/code_feature.py @@ -16,6 +16,7 @@ from mentat.llm_api_handler import count_tokens from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path +from mentat.config import config MIN_INTERVAL_LINES = 10 @@ -130,7 +131,7 @@ def get_code_message(self, standalone: bool = True) -> list[str]: """ session_context = SESSION_CONTEXT.get() code_file_manager = session_context.code_file_manager - parser = session_context.config.parser + parser = config.parser.parser code_context = session_context.code_context code_message: list[str] = [] diff --git a/mentat/config.py b/mentat/config.py index d2bc93be6..7d15ffbd0 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -124,7 +124,7 @@ def load_prompts(prompt_type: str): return { "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), - "agent_command_prompt": Path("text/agent_command_selection_prompt.txt"), + "agent_command_selection_prompt": Path("text/agent_command_selection_prompt.txt"), "block_parser_prompt": Path("text/block_parser_prompt.txt"), "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), @@ -209,4 +209,4 @@ def load_config() -> MentatConfig: return config -config = load_config() \ No newline at end of file +config = load_config() diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index a52e0101c..547b93ae1 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -9,10 +9,8 @@ ChatCompletionSystemMessageParam, ) -from mentat.code_feature import CodeFeature, get_code_message_from_features from mentat.code_feature import ( CodeFeature, - CodeMessageLevel, get_code_message_from_features, ) from mentat.config import config diff --git a/mentat/llm_api_handler.py b/mentat/llm_api_handler.py index 081fb0d26..c37e309da 100644 --- a/mentat/llm_api_handler.py +++ b/mentat/llm_api_handler.py @@ -189,12 +189,12 @@ def model_price_per_1000_tokens(model: str) -> Optional[tuple[float, float]]: def get_max_tokens() -> int: + from mentat.config import config session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config - context_size = model_context_size(config.model) - maximum_context = config.maximum_context + context_size = model_context_size(config.ai.model) + maximum_context = config.ai.maximum_context if context_size is not None and maximum_context is not None: return min(context_size, maximum_context) @@ -204,7 +204,7 @@ def get_max_tokens() -> int: return maximum_context else: stream.send( - f"Context size for {config.model} is not known. Please set" + f"Context size for {config.ai.model} is not known. Please set" " maximum-context with `/config maximum_context `.", color="light_red", ) @@ -212,10 +212,11 @@ def get_max_tokens() -> int: def is_context_sufficient(tokens: int) -> bool: + from mentat.config import config ctx = SESSION_CONTEXT.get() max_tokens = get_max_tokens() - if max_tokens - tokens < ctx.config.token_buffer: + if max_tokens - tokens < config.ai.token_buffer: ctx.stream.send( f"The context size is limited to {max_tokens} tokens and your current" f" request uses {tokens} tokens. Please use `/exclude` to remove" diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml index 358a8cd40..3216c384a 100644 --- a/mentat/resources/conf/.mentatconf.yaml +++ b/mentat/resources/conf/.mentatconf.yaml @@ -5,6 +5,9 @@ model: gpt-4 # In such cases, you need to specify the maximum context manually. maximum_context: 16000 +#the type of prompts that the agent should be using options are text and markdown +prompt_type: markdown + # This list contains glob patterns. Mentat uses these patterns to exclude certain files when provided with a directory argument. # Mentat considers all files that do not match your .gitignore file and these patterns. # Glob patterns are interpreted from the git root location, so if you want to exclude all .py files, use "**/*.py" instead of "*.py". @@ -26,4 +29,4 @@ input_style: # Multiple formats are available, though the default one is expected to be the best fit for most cases. # You can experiment with different formats as per your need. # Available formats include: block, replacement, unified-diff. -format: block \ No newline at end of file +format: block diff --git a/mentat/session.py b/mentat/session.py index 12aaaf591..768434eea 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -6,6 +6,8 @@ from pathlib import Path from typing import Any, Coroutine, List, Optional, Set from uuid import uuid4 +from rich import print +from rich.console import Console import attr import sentry_sdk @@ -30,7 +32,9 @@ from mentat.session_stream import SessionStream from mentat.utils import check_version, mentat_dir_path from mentat.vision.vision_manager import VisionManager +from mentat.sampler.sampler import Sampler +console = Console() class Session: """ @@ -49,6 +53,7 @@ def __init__( pr_diff: Optional[str] = None, ): # All errors thrown here need to be caught here + self._errors = [] self.stopped = False if not mentat_dir_path.exists(): @@ -82,25 +87,27 @@ def __init__( auto_completer = AutoCompleter() + sampler = Sampler() + session_context = SessionContext( - cwd, - stream, - llm_api_handler, - cost_tracker, - config, - code_context, - code_file_manager, - conversation, - vision_manager, - agent_handler, - auto_completer, + cwd=cwd, + stream=stream, + llm_api_handler=llm_api_handler, + cost_tracker=cost_tracker, + code_context=code_context, + code_file_manager=code_file_manager, + conversation=conversation, + vision_manager=vision_manager, + agent_handler=agent_handler, + auto_completer=auto_completer, + sampler=sampler ) self.ctx = session_context SESSION_CONTEXT.set(session_context) # Functions that require session_context check_version() - config.send_errors_to_stream() + self.send_errors_to_stream() for path in paths: code_context.include(path, exclude_patterns=exclude_paths) @@ -225,16 +232,18 @@ async def run_main(): except (SessionExit, CancelledError): pass except (MentatError, UserError) as e: - print(f"[red]{str(e)}[/red]") + if is_test_environment(): + console.print_exception(show_locals=True) + print(f"[red]Unhandled Exception: {str(e)}[/red]") except Exception as e: # All unhandled exceptions end up here error = f"Unhandled Exception: {traceback.format_exc()}" # Helps us handle errors in tests if is_test_environment(): - print(error) + console.print_exception(show_locals=True) self.error = error sentry_sdk.capture_exception(e) - print(f"[red]{str(e)}[/red]") + print(f"[red]{str(error)}[/red]") finally: await self._stop() sentry_sdk.flush() @@ -275,4 +284,4 @@ def send_errors_to_stream(self): stream = session_context.stream for error in self._errors: print(f"[light_yellow3]{error}[/light_yellow3]") - self._errors = [] \ No newline at end of file + self._errors = [] diff --git a/mentat/session_context.py b/mentat/session_context.py index 6c88969cb..cae5b070f 100644 --- a/mentat/session_context.py +++ b/mentat/session_context.py @@ -11,7 +11,6 @@ from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_file_manager import CodeFileManager - from mentat.config import Config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.llm_api_handler import LlmApiHandler @@ -28,7 +27,6 @@ class SessionContext: stream: SessionStream = attr.field() llm_api_handler: LlmApiHandler = attr.field() cost_tracker: CostTracker = attr.field() - config: Config = attr.field() code_context: CodeContext = attr.field() code_file_manager: CodeFileManager = attr.field() conversation: Conversation = attr.field() From 55e29ec713de6a3086dc1d5b7390d7e1ec5458d8 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Wed, 27 Dec 2023 15:19:23 -0500 Subject: [PATCH 13/24] Replace custom debug function with rich inspect The debug function in mentat/utils.py has been replaced with the inspect function from the rich library. The inspect function provides a more detailed view of objects for easy debugging and visualization. The former custom debug function which uses pprint for pretty printing and handles exceptions has been commented out for reference. --- mentat/utils.py | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/mentat/utils.py b/mentat/utils.py index 22498b364..22fc2e2d8 100644 --- a/mentat/utils.py +++ b/mentat/utils.py @@ -15,6 +15,7 @@ from jinja2 import Environment, PackageLoader, select_autoescape from openai.types.chat import ChatCompletionChunk from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta +from rich import inspect from mentat import __version__ from mentat.session_context import SESSION_CONTEXT @@ -199,20 +200,21 @@ def dd(args): args = [1, 2, 3] dd(args) """ - try: - # Throw an exception if needed - if not args: - raise ValueError("No args provided") - - # Pretty print the argument - pprint.pprint(args) - - except Exception as e: - print(f"Exception occurred: {e}") - - finally: - # Exit the program - sys.exit() + inspect(args, methods=True) + # try: + # # Throw an exception if needed + # if not args: + # raise ValueError("No args provided") + # + # # Pretty print the argument + # pprint.pprint(args) + # + # except Exception as e: + # print(f"Exception occurred: {e}") + # + # finally: + # # Exit the program + # sys.exit() CLONE_TO_DIR = Path(__file__).parent.parent / "benchmark_repos" From d22d33daadd7c9fe8f989662aa04beb533afef61 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Wed, 27 Dec 2023 16:07:22 -0500 Subject: [PATCH 14/24] wip --- mentat/code_context.py | 36 ++-- mentat/command/commands/search.py | 29 ++- mentat/config.py | 17 +- mentat/conversation.py | 240 +++++++++--------------- mentat/edit_history.py | 10 +- mentat/llm_api_handler.py | 6 +- mentat/parsers/change_display_helper.py | 22 +-- mentat/parsers/file_edit.py | 58 ++---- mentat/parsers/json_parser.py | 15 +- mentat/parsers/parser.py | 14 +- mentat/parsers/unified_diff_parser.py | 14 +- mentat/resources/conf/.mentatconf.yaml | 4 +- mentat/session_input.py | 11 +- mentat/streaming_printer.py | 45 ++--- mentat/terminal/client.py | 24 +-- mentat/terminal/output.py | 2 +- mentat/utils.py | 31 +-- 17 files changed, 233 insertions(+), 345 deletions(-) diff --git a/mentat/code_context.py b/mentat/code_context.py index 82fd46f30..1cec50be1 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -3,6 +3,7 @@ import os from pathlib import Path from typing import Dict, Iterable, List, Optional, Set, Union +from rich import print from mentat.code_feature import ( CodeFeature, @@ -31,6 +32,7 @@ from mentat.session_context import SESSION_CONTEXT from mentat.session_stream import SessionStream from mentat.config import config +from mentat.utils import dd class CodeContext: @@ -66,18 +68,17 @@ def display_context(self): prefix = " " stream.send(f"{prefix}Directory: {session_context.cwd}") if self.diff_context and self.diff_context.name: - stream.send(f"{prefix}Diff:", end=" ") - stream.send(self.diff_context.get_display_context(), color="green") + print(f"{prefix}Diff:[green]{self.diff_context.get_display_context()}[/green]") if config.run.auto_context_tokens > 0: - stream.send(f"{prefix}Auto-Context: Enabled") - stream.send(f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}") + print(f"{prefix}Auto-Context: [green]Enabled[/green]") + print(f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}") else: - stream.send(f"{prefix}Auto-Context: Disabled") + print(f"{prefix}Auto-Context: [yellow]Disabled[/yellow]") if self.include_files: - stream.send(f"{prefix}Included files:") - stream.send(f"{prefix + prefix}{session_context.cwd.name}") + print(f"{prefix}Included files:") + print(f"{prefix + prefix}{session_context.cwd.name}") features = [ feature for file_features in self.include_files.values() @@ -91,11 +92,10 @@ def display_context(self): prefix + prefix, ) else: - stream.send(f"{prefix}Included files: ", end="") - stream.send("None", color="yellow") + print(f"{prefix}Included files: [yellow]None[/yellow]") if self.auto_features: - stream.send(f"{prefix}Auto-Included Features:") + print(f"{prefix}Auto-Included Features:") refs = get_consolidated_feature_refs(self.auto_features) print_path_tree( build_path_tree([Path(r) for r in refs], session_context.cwd), @@ -148,6 +148,8 @@ async def get_code_message( "\n".join(include_files_message), model, full_message=False ) + + tokens_used = ( prompt_tokens + meta_tokens + include_files_tokens + config.ai.token_buffer ) @@ -291,21 +293,19 @@ def include( cwd=session_context.cwd, exclude_patterns=abs_exclude_patterns, ) + except PathValidationError as e: - session_context.stream.send(str(e), color="light_red") + print(f"[red]Path Validation Error:{str(e)}[/red]") return set() return self.include_features(code_features) def _exclude_file(self, path: Path) -> Path | None: - session_context = SESSION_CONTEXT.get() if path in self.include_files: del self.include_files[path] return path else: - session_context.stream.send( - f"Path {path} not in context", color="light_red" - ) + print(f"[red]Path {path} not in context[/red]") def _exclude_file_interval(self, path: Path) -> Set[Path]: session_context = SESSION_CONTEXT.get() @@ -314,9 +314,7 @@ def _exclude_file_interval(self, path: Path) -> Set[Path]: interval_path, interval_str = split_intervals_from_path(path) if interval_path not in self.include_files: - session_context.stream.send( - f"Path {interval_path} not in context", color="light_red" - ) + print(f"[red]Path {interval_path} not in context[/red]") return excluded_paths intervals = parse_intervals(interval_str) @@ -399,7 +397,7 @@ def exclude(self, path: Path | str) -> Set[Path]: case PathType.GLOB: excluded_paths.update(self._exclude_glob(validated_path)) except PathValidationError as e: - session_context.stream.send(str(e), color="light_red") + print(f"[red]Path Validation Error: {str(e)}[/red]") return excluded_paths diff --git a/mentat/command/commands/search.py b/mentat/command/commands/search.py index 3d771306b..01e1487e8 100644 --- a/mentat/command/commands/search.py +++ b/mentat/command/commands/search.py @@ -1,12 +1,12 @@ from typing import List, Set -from termcolor import colored from typing_extensions import override from mentat.command.command import Command, CommandArgument from mentat.errors import UserError from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path +from rich import print SEARCH_RESULT_BATCH_SIZE = 10 @@ -34,19 +34,19 @@ def _parse_include_input(user_input: str, max_num: int) -> Set[int] | None: class SearchCommand(Command, command_name="search"): @override async def apply(self, *args: str) -> None: + from mentat.config import config session_context = SESSION_CONTEXT.get() - stream = session_context.stream + code_context = session_context.code_context - config = session_context.config if len(args) == 0: - stream.send("No search query specified", color="yellow") + print("[yellow]No search query specified[/]") return try: query = " ".join(args) results = await code_context.search(query=query) except UserError as e: - stream.send(str(e), color="red") + print(f"[red]{str(e)}[/]") return cumulative_tokens = 0 @@ -54,29 +54,28 @@ async def apply(self, *args: str) -> None: prefix = "\n " file_name = feature.rel_path(session_context.cwd) - file_name = colored(file_name, "blue", attrs=["bold"]) - file_name += colored(feature.interval_string(), "light_cyan") + file_name = f"[blue bold]{file_name}[/]" - tokens = feature.count_tokens(config.model) + tokens = feature.count_tokens(config.ai.model) cumulative_tokens += tokens - tokens_str = colored(f" ({tokens} tokens)", "yellow") + tokens_str = f"[yellow] ({tokens} tokens)[/]" file_name += tokens_str name = [] if feature.name: name = feature.name.split(",") name = [ - f"{'└' if i == len(name) - 1 else '├'}─ {colored(n, 'cyan')}" + f"{'└' if i == len(name) - 1 else '├'}─ [blue]{n}[/]" for i, n in enumerate(name) ] message = f"{str(i).ljust(3)}" + prefix.join([file_name] + name + [""]) - stream.send(message) + print(message) if i > 1 and i % SEARCH_RESULT_BATCH_SIZE == 0: # Required to avoid circular imports, but not ideal. from mentat.session_input import collect_user_input - stream.send( + print( "(Y/n) for more results or to exit search mode.\nResults to" ' include in context: (eg: "1 3 4" or "1-4")' ) @@ -90,14 +89,14 @@ async def apply(self, *args: str) -> None: rel_path = get_relative_path( included_path, session_context.cwd ) - stream.send(f"{rel_path} added to context", color="green") + print(f"[green]{rel_path} added to context[/]") else: - stream.send("(Y/n)") + print("(Y/n)") user_input: str = ( await collect_user_input(plain=True) ).data.strip() if user_input.lower() == "n": - stream.send("Exiting search mode...", color="light_blue") + print("[bright_blue]Exiting search mode...[/]") break @override diff --git a/mentat/config.py b/mentat/config.py index 7d15ffbd0..8cdd284c5 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -8,13 +8,15 @@ from mentat.git_handler import get_git_root_for_path from mentat.parsers.parser_map import parser_map from mentat.parsers.block_parser import BlockParser -from mentat.utils import mentat_dir_path +from mentat.utils import mentat_dir_path, dd from dataclasses import dataclass, field from dataclasses_json import DataClassJsonMixin from typing import Tuple from mentat.parsers.parser import Parser from typing import Any, Dict, List, Optional +from rich.console import Console +console = Console() config_file_name = Path(".mentat_config.yaml") user_config_path = mentat_dir_path / config_file_name @@ -131,7 +133,7 @@ def load_prompts(prompt_type: str): "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), } -def load_settings(): +def load_settings(config_session_dict = None): """Load the configuration from the `.mentatconf.yaml` file.""" current_conf_path = APP_ROOT / '.mentatconf.yaml' @@ -157,6 +159,9 @@ def load_settings(): current_path_config = yaml_to_config(yaml_dict) yaml_config = merge_configs(yaml_config, current_path_config) + if config_session_dict is not None and config_session_dict.get('file_exclude_glob_list') is not None: + yaml_config["file_exclude_glob_list"].extend(config_session_dict['file_exclude_glob_list']) + file_exclude_glob_list = yaml_config.get("file_exclude_glob_list", []) #always ignore .mentatconf @@ -191,15 +196,15 @@ def load_settings(): } -def update_config(**kwargs): +def update_config(session_config): """Reload the configuration using the provided keyword arguments.""" global config if config is None: return - # setting the values from kwargs to the global config - for key, value in kwargs.items(): - setattr(config, key, value) + settings = load_settings(session_config) + config = MentatConfig(**settings) + def load_config() -> MentatConfig: init_config() diff --git a/mentat/conversation.py b/mentat/conversation.py index e485e2e39..3f8279867 100644 --- a/mentat/conversation.py +++ b/mentat/conversation.py @@ -4,7 +4,6 @@ import json import logging import subprocess -from timeit import default_timer from typing import List, Optional from openai import RateLimitError @@ -15,12 +14,13 @@ ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ) +from rich import print from mentat.errors import MentatError from mentat.llm_api_handler import ( + TOKEN_COUNT_WARNING, count_tokens, get_max_tokens, - model_context_size, prompt_tokens, ) from mentat.parsers.file_edit import FileEdit @@ -30,7 +30,6 @@ from mentat.utils import add_newline from mentat.config import config -from rich import print class Conversation: def __init__(self): @@ -54,7 +53,7 @@ async def display_token_count(self): print( "[yellow]Warning: Mentat has only been tested on GPT-4. You may experience" " issues with quality. This model may not be able to respond in" - " mentat's edit format.[/yellow]", + " mentat's edit format.[/yellow]" ) if "gpt-3.5" not in config.ai.model: print( @@ -62,38 +61,28 @@ async def display_token_count(self): " size for this model.[/yellow]" ) - context_size = model_context_size(config.ai.model) - maximum_context = config.ai.maximum_context - if maximum_context: - if context_size: - context_size = min(context_size, maximum_context) - else: - context_size = maximum_context - - included_code_message = ["Code Files:"] + [ - line - for features_for_path in code_context.include_files.values() - for feature in features_for_path - for line in feature.get_code_message() - ] # NOTE: missing diff info. negligible (0-30 tokens) - messages = self.get_messages() + [ + messages = self.get_messages() + code_message = await code_context.get_code_message( + prompt_tokens( + messages, + config.ai.model, + ) + ) + messages.append( ChatCompletionSystemMessageParam( role="system", - content="\n".join(included_code_message), + content=code_message, ) - ] - tokens = prompt_tokens( - messages, - config.ai.model, ) + tokens = prompt_tokens(messages, config.ai.model) context_size = get_max_tokens() if not context_size: - raise MentatError( - f"Context size for {config.model} is not known. Please set" - " maximum-context with `/config maximum_context value`." + print( + f"[red]Context size for {config.ai.model} is not known. Please set" + " the maximum context with `/config maximum_context value`.[/red]" ) - if tokens + config.ai.token_buffer > context_size: + elif tokens + config.ai.token_buffer > context_size: _plural = len(code_context.include_files) > 1 _exceed = tokens > context_size message: dict[tuple[bool, bool], str] = { @@ -102,10 +91,14 @@ async def display_token_count(self): (True, False): "s are close to", (True, True): "s exceed", } - print(f"[yellow]Included file{message[(_plural, _exceed)]} token limit \n ({tokens} / {context_size}). Truncating based on task similarity.[/yellow]") + print( + f"[yellow]Included file{message[(_plural, _exceed)]} token limit" + f" ({tokens} / {context_size}). Truncating based on task similarity.[/yellow]" + ) else: print( - f"[cyan]Prompt and included files token count: {tokens} / {context_size}[/cyan]") + f"[cyan]Prompt and included files token count: {tokens} / {context_size}[/cyan]" + ) # The transcript logger logs tuples containing the actual message sent by the user or LLM # and (for LLM messages) the LLM conversation that led to that LLM response @@ -116,26 +109,25 @@ def add_transcript_message(self, transcript_message: TranscriptMessage): def add_user_message(self, message: str, image: Optional[str] = None): """Used for actual user input messages""" - content: List[ChatCompletionContentPartParam] = [ - { - "type": "text", - "text": message, - }, - ] + content: List[ChatCompletionContentPartParam] | str = message if image: - content.append( + content = [ + { + "type": "text", + "text": message, + }, { "type": "image_url", "image_url": { "url": image, }, }, - ) + ] self.add_transcript_message(UserMessage(message=content, prior_messages=None)) self.add_message(ChatCompletionUserMessageParam(role="user", content=content)) def add_model_message( - self, message: str, messages_snapshot: list[ChatCompletionMessageParam] + self, message: str, messages_snapshot: list[ChatCompletionMessageParam] ): """Used for actual model output messages""" self.add_transcript_message( @@ -150,11 +142,12 @@ def add_message(self, message: ChatCompletionMessageParam): self._messages.append(message) def get_messages( - self, include_system_prompt: bool = True + self, include_system_prompt: bool = True ) -> list[ChatCompletionMessageParam]: """Returns the messages in the conversation. The system message may change throughout the conversation so it is important to access the messages through this method. """ + if config.ai.no_parser_prompt or not include_system_prompt: return self._messages.copy() else: @@ -173,25 +166,16 @@ def clear_messages(self) -> None: self._messages = list[ChatCompletionMessageParam]() async def _stream_model_response( - self, - messages: list[ChatCompletionMessageParam], - loading_multiplier: float = 0.0, - ): + self, + messages: list[ChatCompletionMessageParam], + loading_multiplier: float = 0.0, + ) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream parser = config.parser.parser llm_api_handler = session_context.llm_api_handler - - start_time = default_timer() - - num_prompt_tokens = prompt_tokens(messages, config.ai.model) - context_size = model_context_size(config.ai.model) - if context_size: - if num_prompt_tokens > context_size - config.ai.token_buffer: - print( - f"[yellow]Warning: {config.ai.model} has a maximum context length of" - f" {context_size} tokens. Attempting to run anyway:[/yellow]") + cost_tracker = session_context.cost_tracker if loading_multiplier: stream.send( @@ -213,126 +197,90 @@ async def _stream_model_response( terminate=True, ) - print(f"[cyan]Total token count: {num_prompt_tokens}[/cyan]") - print("Streaming... use control-c to interrupt the model at any point\n") + num_prompt_tokens = prompt_tokens(messages, config.ai.model) + print(f"[blue]Total token count: {num_prompt_tokens}[/blue]") + if num_prompt_tokens > TOKEN_COUNT_WARNING: + print( + "[yellow]Warning: LLM performance drops off rapidly at large context sizes. Use" + " /clear to clear context or use /exclude to exclude any uneccessary" + " files.[/yellow]", + ) + print("[blue]Streaming... use [bold white]control-c[/] to interrupt the model at any point[/]\n") async with parser.interrupt_catcher(): parsed_llm_response = await parser.stream_and_parse_llm_response( add_newline(response) ) + if not parsed_llm_response.interrupted: + cost_tracker.display_last_api_call() + else: + # Generator doesn't log the api call if we interrupt it + cost_tracker.log_api_call_stats( + num_prompt_tokens, + count_tokens( + parsed_llm_response.full_response, config.ai.model, full_message=False + ), + config.ai.model, + display=True, + ) - time_elapsed = default_timer() - start_time - return (parsed_llm_response, time_elapsed, num_prompt_tokens) + messages.append( + ChatCompletionAssistantMessageParam( + role="assistant", content=parsed_llm_response.full_response + ) + ) + self.add_model_message(parsed_llm_response.full_response, messages) + + return parsed_llm_response async def get_model_response(self) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream + code_context = session_context.code_context - cost_tracker = session_context.cost_tracker messages_snapshot = self.get_messages() - # Rebuild code context with active code and available tokens - tokens = prompt_tokens(messages_snapshot, config.ai.model) - - loading_multiplier = 1.0 if config.run.auto_context else 0.0 + # Get current code message + loading_multiplier = 1.0 if config.run.auto_context_tokens > 0 else 0.0 prompt = messages_snapshot[-1]["content"] if isinstance(prompt, list): text_prompts = [ p.get("text", "") for p in prompt if p.get("type") == "text" ] prompt = " ".join(text_prompts) - max_tokens = get_max_tokens() - if max_tokens is None: - print(f"[pink]Context size for {config.ai.model} is not known. Please set maximum-context with `/config maximum_context value`.[/pink]") - return ParsedLLMResponse("", "", list[FileEdit]()) - - if max_tokens - tokens < config.ai.token_buffer: - if max_tokens - tokens < 0: - stream.send( - f"The context size is limited to {max_tokens} tokens and" - f" previous messages plus system prompts use {tokens} tokens." - " Please use `/clear` to reset or restart the session.", - color="light_red", - ) - else: - stream.send( - f"The context size is limited to {max_tokens} tokens and" - f" previous messages plus system prompts use {tokens} tokens," - " leaving insufficent tokens for a response. Please use" - " `/clear` to reset or restart the session.", - color="light_red", - ) - return ParsedLLMResponse("", "", list[FileEdit]()) - code_message = await code_context.get_code_message( - ( - # Prompt can be image as well as text - prompt + prompt_tokens(messages_snapshot, config.ai.model), + prompt=( + prompt # Prompt can be image as well as text if isinstance(prompt, str) else "" ), - max_tokens - tokens - config.ai.token_buffer, loading_multiplier=0.5 * loading_multiplier, ) messages_snapshot.insert( - 1, - ChatCompletionSystemMessageParam(role="system", content=code_message), + 1, ChatCompletionSystemMessageParam(role="system", content=code_message) ) - # If we want to add agent specific messages in (this one didn't work too well): - # if agent_handler.agent_enabled: - # agent_message = ( - # "You are currently being run autonomously. After making your changes," - # " you will have the chance to lint and test your code. If applicable," - # " add tests that you can use to test your code." - # ) - # messages_snapshot.append( - # ChatCompletionSystemMessageParam(role="system", content=agent_message) - # ) - try: response = await self._stream_model_response( messages_snapshot, loading_multiplier=0.5 * loading_multiplier, ) - parsed_llm_response, time_elapsed, num_prompt_tokens = response except RateLimitError: - stream.send( - "Rate limit error received from OpenAI's servers using model" + print( + "[red]Rate limit error received from OpenAI's servers using model" f' {config.ai.model}.\nUse "/config model " to switch to a' - " different model.", - color="light_red", + " different model.[/red]" ) return ParsedLLMResponse("", "", list[FileEdit]()) finally: if loading_multiplier: stream.send(None, channel="loading", terminate=True) - - cost_tracker.log_api_call_stats( - num_prompt_tokens, - count_tokens( - parsed_llm_response.full_response, config.ai.model, full_message=False - ), - config.ai.model, - time_elapsed, - display=True, - ) - - messages_snapshot.append( - ChatCompletionAssistantMessageParam( - role="assistant", content=parsed_llm_response.full_response - ) - ) - self.add_model_message(parsed_llm_response.full_response, messages_snapshot) - return parsed_llm_response + return response def remaining_context(self) -> int | None: - max_context = get_max_tokens() - if max_context is None: - return None - - return max_context - prompt_tokens(self.get_messages(), config.ai.model) + return get_max_tokens() - prompt_tokens(self.get_messages(), config.ai.model) def can_add_to_context(self, message: str) -> bool: """ @@ -342,11 +290,11 @@ def can_add_to_context(self, message: str) -> bool: remaining_context = self.remaining_context() return ( - remaining_context is not None - and remaining_context - - count_tokens(message, config.ai.model, full_message=True) - - config.ai.token_buffer - > 0 + remaining_context is not None + and remaining_context + - count_tokens(message, config.ai.model, full_message=True) + - config.ai.token_buffer + > 0 ) async def run_command(self, command: list[str]) -> bool: @@ -355,9 +303,8 @@ async def run_command(self, command: list[str]) -> bool: """ ctx = SESSION_CONTEXT.get() - ctx.stream.send("Running command: ", end="", color="cyan") - ctx.stream.send(" ".join(command), color="yellow") - ctx.stream.send("Command output:", color="cyan") + print(f"[cyan]Running command: [/cyan][yellow]" + " ".join(command) + "[/yellow]") + print(f"[cyan]Command output: [/cyan]") try: process = subprocess.Popen( @@ -375,14 +322,14 @@ async def run_command(self, command: list[str]) -> bool: if not line: break output.append(line) - ctx.stream.send(line, end="") + print(line, end="") # This gives control back to the asyncio event loop so we can actually print what we sent # Unfortunately asyncio.sleep(0) won't work https://stackoverflow.com/a/74505785 # Note: if subprocess doesn't flush, output can't and won't be streamed. await asyncio.sleep(0.01) except FileNotFoundError: output = [f"Invalid command: {' '.join(command)}"] - ctx.stream.send(output[0]) + print(output[0]) output = "".join(output) message = f"Command ran:\n{' '.join(command)}\nCommand output:\n{output}" @@ -390,14 +337,13 @@ async def run_command(self, command: list[str]) -> bool: self.add_message( ChatCompletionSystemMessageParam(role="system", content=message) ) - ctx.stream.send( - "Successfully added command output to model context.", color="green" + print( + "[green]Successfully added command output to model context.[/green]" ) return True else: - ctx.stream.send( - "Not enough tokens remaining in model's context to add command output" - " to model context.", - color="light_red", + print( + "[red]Not enough tokens remaining in model's context to add command output" + " to model context.[/red]" ) return False diff --git a/mentat/edit_history.py b/mentat/edit_history.py index d1a1540d7..4643e210e 100644 --- a/mentat/edit_history.py +++ b/mentat/edit_history.py @@ -1,7 +1,5 @@ from typing import Optional -from termcolor import colored - from mentat.errors import HistoryError from mentat.parsers.file_edit import FileEdit from mentat.session_context import SESSION_CONTEXT @@ -24,7 +22,7 @@ def push_edits(self): def undo(self) -> str: if not self.edits: - return colored("No edits available to undo", color="light_red") + return "[bright_red]No edits available to undo[/]" # Make sure to go top down cur_edit = self.edits.pop() @@ -36,14 +34,14 @@ def undo(self) -> str: cur_file_edit.undo() undone_edit.append(cur_file_edit) except HistoryError as e: - errors.append(colored(str(e), color="light_red")) + errors.append(f"[bright_red]{str(e)}[/]") if undone_edit: self.undone_edits.append(undone_edit) return "\n".join(errors) async def redo(self) -> Optional[str]: if not self.undone_edits: - return colored("No edits available to redo", color="light_red") + return "[bright_red]No edits available to redo[/]" session_context = SESSION_CONTEXT.get() code_file_manager = session_context.code_file_manager @@ -56,7 +54,7 @@ async def redo(self) -> Optional[str]: def undo_all(self) -> str: if not self.edits: - return colored("No edits available to undo", color="light_red") + return "[bright_red]No edits available to undo[/]" errors = list[str]() while self.edits: diff --git a/mentat/llm_api_handler.py b/mentat/llm_api_handler.py index c37e309da..b50ecaf5d 100644 --- a/mentat/llm_api_handler.py +++ b/mentat/llm_api_handler.py @@ -285,8 +285,8 @@ async def call_llm_api( stream: bool, response_format: ResponseFormat = ResponseFormat(type="text"), ) -> ChatCompletion | AsyncIterator[ChatCompletionChunk]: + from mentat.config import config session_context = SESSION_CONTEXT.get() - config = session_context.config cost_tracker = session_context.cost_tracker # Confirm that model has enough tokens remaining. @@ -304,7 +304,7 @@ async def call_llm_api( response = await self.async_client.chat.completions.create( model=model, messages=messages, - temperature=config.temperature, + temperature=config.ai.temperature, stream=stream, max_tokens=4096, ) @@ -312,7 +312,7 @@ async def call_llm_api( response = await self.async_client.chat.completions.create( model=model, messages=messages, - temperature=config.temperature, + temperature=config.ai.temperature, stream=stream, response_format=response_format, ) diff --git a/mentat/parsers/change_display_helper.py b/mentat/parsers/change_display_helper.py index 10db7f44d..9abab5fd1 100644 --- a/mentat/parsers/change_display_helper.py +++ b/mentat/parsers/change_display_helper.py @@ -7,7 +7,6 @@ from pygments.lexer import Lexer from pygments.lexers import TextLexer, get_lexer_for_filename from pygments.util import ClassNotFound -from termcolor import colored from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path @@ -111,7 +110,7 @@ def _get_code_block( ): lines = _prefixed_lines(line_number_buffer, code_lines, prefix) if lines: - return "\n".join(colored(line, color=color) for line in lines.split("\n")) + return "\n".join(f"[{color}]{line}[/{color}]" for line in lines.split("\n")) else: return "" @@ -146,23 +145,14 @@ def get_file_name( ): match display_information.file_action_type: case FileActionType.CreateFile: - return "\n" + colored( - f"{display_information.file_name}*", color="light_green" - ) + return f"\n[light_green]{display_information.file_name}*[/light_green]" + case FileActionType.DeleteFile: - return "\n" + colored( - f"Deletion: {display_information.file_name}", color="light_red" - ) + return f"\n[bright_red]Deletion: {display_information.file_name}[/bright_red]" case FileActionType.RenameFile: - return "\n" + colored( - f"Rename: {display_information.file_name} ->" - f" {display_information.new_name}", - color="yellow", - ) + return f"\n[yellow]Rename: {display_information.file_name} -> {display_information.new_name}[/yellow]" case FileActionType.UpdateFile: - return "\n" + colored( - f"{display_information.file_name}", color="light_blue" - ) + return f"\n[bright_blue]{display_information.file_name}[/bright_blue]" def get_added_lines( diff --git a/mentat/parsers/file_edit.py b/mentat/parsers/file_edit.py index 6ad2ae459..7de1c2515 100644 --- a/mentat/parsers/file_edit.py +++ b/mentat/parsers/file_edit.py @@ -3,6 +3,7 @@ from pathlib import Path from typing import Any +from rich import print import attr from mentat.errors import HistoryError, MentatError @@ -41,10 +42,7 @@ def __lt__(self, other: Replacement): async def _ask_user_change( text: str, ) -> bool: - session_context = SESSION_CONTEXT.get() - stream = session_context.stream - - stream.send(text, color="light_blue") + print(f"[bright_blue]{text}[/]") return await ask_yes_no(default_yes=True) @@ -151,17 +149,11 @@ def is_valid(self) -> bool: if self.is_creation: if self.file_path.exists(): - stream.send( - f"File {display_path} already exists, canceling creation.", - color="light_yellow", - ) + print(f"[bright_yellow]File {display_path} already exists, canceling creation.[/]") return False else: if not self.file_path.exists(): - stream.send( - f"File {display_path} does not exist, canceling all edits to file.", - color="light_yellow", - ) + print(f"[bright_yellow]File {display_path} does not exist, canceling all edits to file.[/]") return False file_features_in_context = [ f for f in code_context.auto_features if f.path == self.file_path @@ -171,21 +163,15 @@ def is_valid(self) -> bool: for r in self.replacements for i in range(r.starting_line + 1, r.ending_line + 1) ): - stream.send( - f"File {display_path} not in context, canceling all edits to file.", - color="light_yellow", - ) + print(f"[bright_yellow]File {display_path} not in context, canceling all edits to file.[/]") return False if self.rename_file_path is not None and self.rename_file_path.exists(): rel_rename_path = None if self.rename_file_path.is_relative_to(session_context.cwd): rel_rename_path = self.rename_file_path.relative_to(session_context.cwd) - stream.send( - f"File {display_path} being renamed to existing file" - f" {rel_rename_path or self.rename_file_path}, canceling rename.", - color="light_yellow", - ) + print(f"[bright_yellow]File {display_path} being renamed to existing file" + f" {rel_rename_path or self.rename_file_path}, canceling rename.[/]") self.rename_file_path = None return True @@ -229,15 +215,12 @@ async def filter_replacements( ) def _print_resolution(self, first: Replacement, second: Replacement): - session_context = SESSION_CONTEXT.get() - stream = session_context.stream - - stream.send("Change overlap detected, auto-merged back to back changes:\n") - stream.send(self.file_path) - stream.send(change_delimiter) + print("Change overlap detected, auto-merged back to back changes:\n") + print(self.file_path) + print(change_delimiter) for line in first.new_lines + second.new_lines: - stream.send("+ " + line, color="green") - stream.send(change_delimiter) + print(f"[green]+ {line}[/green]") + print(change_delimiter) def resolve_conflicts(self): self.replacements.sort(reverse=True) @@ -290,8 +273,8 @@ def undo(self): ctx.code_file_manager.delete_file(self.file_path) self._display_creation(prefix=prefix) - ctx.stream.send( - f"Creation of file {self.file_path} undone", color="light_blue" + print( + f"[bright_blue]Creation of file {self.file_path} undone.[/bright_blue]" ) return @@ -309,9 +292,8 @@ def undo(self): ctx.code_file_manager.rename_file(self.rename_file_path, self.file_path) self._display_rename(prefix=prefix) - ctx.stream.send( - f"Rename of file {self.file_path} to {self.rename_file_path} undone", - color="light_blue", + print( + f"[bright_blue]Rename of file {self.file_path} to {self.rename_file_path} undone.[/bright_blue]" ) if self.is_deletion: @@ -329,8 +311,8 @@ def undo(self): ) self._display_deletion(self.previous_file_lines, prefix=prefix) - ctx.stream.send( - f"Deletion of file {self.file_path} undone", color="light_red" + print( + f"[bright_blue]Deletion of file {self.file_path} undone.[/bright_blue]" ) elif self.replacements: if not self.file_path.exists(): @@ -345,6 +327,6 @@ def undo(self): f.write("\n".join(self.previous_file_lines)) self._display_replacements(self.previous_file_lines, prefix=prefix) - ctx.stream.send( - f"Edits to file {self.file_path} undone", color="light_blue" + print( + f"[bright_blue]Edits to file {self.file_path} undone.[/bright_blue]" ) diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index 926a19aaa..d3e352eb4 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -4,11 +4,11 @@ from json import JSONDecodeError from pathlib import Path from typing import AsyncIterator, Dict +from rich import print from jsonschema import ValidationError, validate from openai.types.chat import ChatCompletionChunk from openai.types.chat.completion_create_params import ResponseFormat -from termcolor import colored from typing_extensions import override from mentat.errors import ModelError @@ -100,7 +100,7 @@ async def stream_and_parse_llm_response( self, response: AsyncIterator[ChatCompletionChunk] ) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() - stream = session_context.stream + printer = StreamingPrinter() printer_task = asyncio.create_task(printer.print_lines()) @@ -111,10 +111,7 @@ async def stream_and_parse_llm_response( if self.shutdown.is_set(): printer.shutdown_printer() await printer_task - stream.send( - colored("") # Reset ANSI codes - + "\n\nInterrupted by user. Using the response up to this point." - ) + print("\n\nInterrupted by user. Using the response up to this point.") break for content in chunk_to_lines(chunk): @@ -134,12 +131,10 @@ async def stream_and_parse_llm_response( validate(instance=response_json, schema=output_schema) except JSONDecodeError: # Should never happen with OpenAI's response_format set to json - stream.send("Error processing model response: Invalid JSON", color="red") + print("[red]Error processing model response: Invalid JSON[/red]") return ParsedLLMResponse(message, "", []) except ValidationError: - stream.send( - "Error processing model response: Invalid format given", color="red" - ) + print("[red]Error processing model response: Invalid format given[/red]") return ParsedLLMResponse(message, "", []) file_edits: Dict[Path, FileEdit] = {} diff --git a/mentat/parsers/parser.py b/mentat/parsers/parser.py index cb8904606..618969a5c 100644 --- a/mentat/parsers/parser.py +++ b/mentat/parsers/parser.py @@ -7,11 +7,11 @@ from contextlib import asynccontextmanager from pathlib import Path from typing import AsyncIterator +from rich import print import attr from openai.types.chat import ChatCompletionChunk from openai.types.chat.completion_create_params import ResponseFormat -from termcolor import colored from mentat.code_file_manager import CodeFileManager from mentat.errors import ModelError @@ -85,7 +85,6 @@ async def stream_and_parse_llm_response( To make a parser that differs from these assumptions, override this method instead of the helper methods """ session_context = SESSION_CONTEXT.get() - stream = session_context.stream code_file_manager = session_context.code_file_manager printer = StreamingPrinter() @@ -115,10 +114,7 @@ async def stream_and_parse_llm_response( printer.shutdown_printer() if printer_task is not None: await printer_task - stream.send( - colored("") # Reset ANSI codes - + "\n\nInterrupted by user. Using the response up to this point." - ) + print("\n\nInterrupted by user. Using the response up to this point.") break for content in chunk_to_lines(chunk): @@ -358,8 +354,8 @@ def _code_line_beginning( """ The beginning of a code line; normally this means printing the + prefix """ - return colored( - "+" + " " * (display_information.line_number_buffer - 1), color="green" + return ( + "[green]+" + " " * (display_information.line_number_buffer - 1) + "[/green]" ) def _code_line_content( @@ -372,7 +368,7 @@ def _code_line_content( """ Part of a code line; normally this means printing in green """ - return colored(content, color="green") + return f"[green]{content}[/green]" # These methods must be overriden if using the default stream and parse function def _could_be_special(self, cur_line: str) -> bool: diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index b9616943b..6234447bb 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -1,7 +1,6 @@ from enum import Enum from pathlib import Path -from termcolor import colored from typing_extensions import override from mentat.code_file_manager import CodeFileManager @@ -53,9 +52,9 @@ def _code_line_content( if cur_line == UnifiedDiffDelimiter.MidChange.value: return change_delimiter + "\n" elif cur_line.startswith("+"): - return colored(content, "green") + return f"[green]{content}[/]" elif cur_line.startswith("-"): - return colored(content, "red") + return f"[red]{content}[/]" else: return highlight_text(display_information, content) @@ -161,9 +160,7 @@ def _add_code_block( and not line.startswith("-") and not line.startswith(" ") ): - return colored( - "Error: Invalid diff format given. Discarding this change." - ) + return "[red]Error: Invalid diff format given. Discarding this change.[/]" cur_lines.append(line) if cur_lines: changes.append(cur_lines) @@ -186,10 +183,7 @@ def _add_code_block( start_index = matching_index(file_lines, search_lines) if start_index == -1: - return colored( - "Error: Original lines not found. Discarding this change.", - color="red", - ) + return "[red]Error: Original lines not found. Discarding this change.[/]" # Matching lines checks for matches that are missing whitespace only lines; # this will cause errors with line numbering if we don't add those lines into the change lines diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml index 3216c384a..39604fe65 100644 --- a/mentat/resources/conf/.mentatconf.yaml +++ b/mentat/resources/conf/.mentatconf.yaml @@ -1,9 +1,9 @@ # This field is for specifying the model name. You can find the list of valid options at https://platform.openai.com/docs/models/overview -model: gpt-4 +model: gpt-4-1106-preview # For models other than gpt-3.5 and gpt-4, the model's context size can't be inferred. # In such cases, you need to specify the maximum context manually. -maximum_context: 16000 +# maximum_context: 16000 #the type of prompts that the agent should be using options are text and markdown prompt_type: markdown diff --git a/mentat/session_input.py b/mentat/session_input.py index f34c1473b..db0e9aaba 100644 --- a/mentat/session_input.py +++ b/mentat/session_input.py @@ -2,6 +2,7 @@ import logging import shlex from typing import Any, Coroutine +from rich import print from mentat.command.command import Command from mentat.errors import RemoteKeyboardInterrupt, SessionExit @@ -37,12 +38,10 @@ async def collect_user_input(plain: bool = False) -> StreamMessage: async def ask_yes_no(default_yes: bool) -> bool: - session_context = SESSION_CONTEXT.get() - stream = session_context.stream while True: # TODO: combine this into a single message (include content) - stream.send("(Y/n)" if default_yes else "(y/N)") + print("(Y/n)" if default_yes else "(y/N)") response = await collect_user_input(plain=True) content = response.data if content in ["y", "n", ""]: @@ -51,8 +50,6 @@ async def ask_yes_no(default_yes: bool) -> bool: async def collect_input_with_commands() -> StreamMessage: - session_context = SESSION_CONTEXT.get() - stream = session_context.stream response = await collect_user_input() while isinstance(response.data, str) and response.data.startswith("/"): @@ -62,7 +59,7 @@ async def collect_input_with_commands() -> StreamMessage: command = Command.create_command(response.data[1:].split(" ")[0]) await command.apply(*arguments) except ValueError as e: - stream.send(f"Error processing command arguments: {e}", color="light_red") + print(f"[bright_red]Error processing command arguments: {e}[/]") response = await collect_user_input() return response @@ -106,7 +103,7 @@ async def listen_for_interrupt( return wrapped_task.result() else: # Send a newline for terminal clients (remove later) - stream.send("\n") + print("\n") if raise_exception_on_interrupt: raise RemoteKeyboardInterrupt diff --git a/mentat/streaming_printer.py b/mentat/streaming_printer.py index d910f1a13..89a8c918f 100644 --- a/mentat/streaming_printer.py +++ b/mentat/streaming_printer.py @@ -1,15 +1,15 @@ import asyncio from collections import deque +from rich import print +import re -from termcolor import colored - -from mentat.session_context import SESSION_CONTEXT +from mentat.utils import dd, dump class StreamingPrinter: def __init__(self): self.strings_to_print = deque[str]([]) - self.chars_remaining = 0 + self.words_remaining = 0 self.finishing = False self.shutdown = False @@ -17,40 +17,33 @@ def add_string(self, string: str, end: str = "\n", color: str | None = None): if self.finishing: return - if len(string) == 0: - return - string += end - - colored_string = colored(string, color) if color is not None else string + words = string.split(" ") - index = colored_string.index(string) - characters = list(string) - characters[0] = colored_string[:index] + characters[0] - characters[-1] = characters[-1] + colored_string[index + len(string) :] + for word in words: + if word: + if color is not None: + colored_word = f"[{color}]{word}[/{color}]" + else: + colored_word = word + self.strings_to_print.append(colored_word) + self.words_remaining += 1 - self.strings_to_print.extend(characters) - self.chars_remaining += len(characters) + self.strings_to_print.append(end) + self.words_remaining += 1 def sleep_time(self) -> float: max_finish_time = 1.0 - required_sleep_time = max_finish_time / (self.chars_remaining + 1) + required_sleep_time = max_finish_time / (self.words_remaining + 1) max_sleep = 0.002 if self.finishing else 0.006 min_sleep = 0.002 return max(min(max_sleep, required_sleep_time), min_sleep) async def print_lines(self): - session_context = SESSION_CONTEXT.get() - stream = session_context.stream - while not self.shutdown: if self.strings_to_print: - if len(self.strings_to_print) > 500: - next_string = "".join(self.strings_to_print) - self.strings_to_print = deque[str]([]) - else: - next_string = self.strings_to_print.popleft() - stream.send(next_string, end="", flush=True) - self.chars_remaining -= 1 + next_word = self.strings_to_print.popleft() + print(next_word, end=" ", flush=True) + self.words_remaining -= 1 elif self.finishing: break await asyncio.sleep(self.sleep_time()) diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index f6c07195e..21bd0d05d 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -56,8 +56,6 @@ def command(self, *args, **kwargs): app = AsyncTyper() - - class TerminalClient: def __init__( self, @@ -240,21 +238,15 @@ def start(paths: List[str] = typer.Argument(...), diff: str = typer.Option(None, "--diff", "-d", show_default='HEAD', help="A git tree-ish (e.g. commit, branch, tag) to diff against"), pr_diff: str = typer.Option(None, "--pr-diff", "-p", help="A git tree-ish to diff against the latest common ancestor of"), cwd: Path = typer.Option(Path.cwd(), "--cwd", help="The current working directory")) -> None: + + # Check if these variables are set and pass them to update_config function as kwargs - kwargs = {} - if paths: - kwargs["paths"] = paths + session_config = {'file_exclude_glob_list': []} + if exclude_paths: - kwargs["exclude"] = exclude_paths - if ignore_paths: - kwargs["ignore"] = ignore_paths - if diff: - kwargs["diff"] = diff - if pr_diff: - kwargs["pr_diff"] = pr_diff - if cwd: - kwargs["cwd"] = cwd - update_config(**kwargs) + session_config["file_exclude_glob_list"] = exclude_paths + + update_config(session_config) cwd = Path(cwd).expanduser().resolve() @@ -271,4 +263,4 @@ def start(paths: List[str] = typer.Argument(...), if __name__ == "__main__": - typer.run(start()) \ No newline at end of file + typer.run(start()) diff --git a/mentat/terminal/output.py b/mentat/terminal/output.py index 3d770a963..b35fd8263 100644 --- a/mentat/terminal/output.py +++ b/mentat/terminal/output.py @@ -4,7 +4,7 @@ from prompt_toolkit.formatted_text import FormattedText from mentat.session_stream import StreamMessage - +from rich import print def _print_stream_message_string( content: Any, diff --git a/mentat/utils.py b/mentat/utils.py index 22fc2e2d8..82ebe56ed 100644 --- a/mentat/utils.py +++ b/mentat/utils.py @@ -201,20 +201,23 @@ def dd(args): dd(args) """ inspect(args, methods=True) - # try: - # # Throw an exception if needed - # if not args: - # raise ValueError("No args provided") - # - # # Pretty print the argument - # pprint.pprint(args) - # - # except Exception as e: - # print(f"Exception occurred: {e}") - # - # finally: - # # Exit the program - # sys.exit() + # Exit the program + sys.exit() + +def dump(args): + """ + This method dd takes an argument args and performs the following operations: + + 1. Checks if any arguments are provided. If not, raises a ValueError with the message "No args provided". + + Note: This method does not return any value. + + Example usage: + args = [1, 2, 3] + dump(args) + """ + inspect(args, methods=True) + CLONE_TO_DIR = Path(__file__).parent.parent / "benchmark_repos" From 5468797e32c02943fafa4d79754d27dc450fdcba Mon Sep 17 00:00:00 2001 From: Greg L Date: Wed, 27 Dec 2023 20:33:21 -0500 Subject: [PATCH 15/24] Update terminal client and dependencies Replaced Typer with Click for command line interface of terminal client, added Click to poetry dependencies, and rearranged and removed redundant code in terminal client for better optimization. Also, fixed asynchronous task in the client exit listener. This commit simplifies the command line interface code while enhancing the application's functionality and efficiency. --- mentat/terminal/client.py | 63 ++++++++++++++------------------------- poetry.lock | 2 +- pyproject.toml | 3 +- 3 files changed, 25 insertions(+), 43 deletions(-) diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 21bd0d05d..6fa38e767 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -21,40 +21,25 @@ from typing import List from pathlib import Path +import click import anyio import inspect -import typer + from functools import partial, wraps -from typer import Typer from mentat.utils import dd from asyncio import run as aiorun -class AsyncTyper(Typer): - @staticmethod - def maybe_run_async(decorator, f): - if inspect.iscoroutinefunction(f): - - @wraps(f) - def runner(*args, **kwargs): - return asyncio.run(f(*args, **kwargs)) - - decorator(runner) - else: - decorator(f) - return f - - def callback(self, *args, **kwargs): - decorator = super().callback(*args, **kwargs) - return partial(self.maybe_run_async, decorator) - - def command(self, *args, **kwargs): - decorator = super().command(*args, **kwargs) - return partial(self.maybe_run_async, decorator) - +from prompt_toolkit.application import Application +from prompt_toolkit.application import Application +from prompt_toolkit.application.current import get_app +from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous +from prompt_toolkit.layout import HSplit, Layout, VSplit +from prompt_toolkit.styles import Style +from prompt_toolkit.widgets import Box, Button, Frame, Label, TextArea -app = AsyncTyper() class TerminalClient: def __init__( @@ -132,7 +117,7 @@ async def _handle_input_requests(self): async def _listen_for_client_exit(self): """When the Session shuts down, it will send the client_exit signal for the client to shutdown.""" await self.session.stream.recv(channel="client_exit") - asyncio.create_task(self._shutdown()) + await asyncio.create_task(self._shutdown()) async def _listen_for_should_exit(self): """This listens for a user event signaling shutdown (like SigInt), and tells the session to shutdown.""" @@ -225,20 +210,16 @@ async def _shutdown(self): self._stopped.set() -@app.command() -async def async_hello(name: str, last_name: str = "") -> None: - await anyio.sleep(1) - typer.echo(f"Hello World {name} {last_name}") - - -@app.command() -def start(paths: List[str] = typer.Argument(...), - exclude_paths: List[str] = typer.Option([], "--exclude-paths", "-e", help="List of file paths, directory paths, or glob patterns to exclude"), - ignore_paths: List[str] = typer.Option([], "--ignore-paths", "-g", help="List of file paths, directory paths, or glob patterns to ignore in auto-context"), - diff: str = typer.Option(None, "--diff", "-d", show_default='HEAD', help="A git tree-ish (e.g. commit, branch, tag) to diff against"), - pr_diff: str = typer.Option(None, "--pr-diff", "-p", help="A git tree-ish to diff against the latest common ancestor of"), - cwd: Path = typer.Option(Path.cwd(), "--cwd", help="The current working directory")) -> None: +# Event handlers for all the buttons. +@click.command() +@click.option('-e', '--exclude-paths', multiple=True, default=[], help='List of file paths, directory paths, or glob patterns to exclude.') +@click.option('-g', '--ignore-paths', multiple=True, default=[], help='List of file paths, directory paths, or glob patterns to ignore in auto-context.') +@click.option('-d', '--diff', default=None, show_default='HEAD', help='A git tree-ish (e.g. commit, branch, tag) to diff against.') +@click.option('-p', '--pr-diff', default=None, help='A git tree-ish to diff against the latest common ancestor of.') +@click.option('--cwd', default=str(Path.cwd()), help='The current working directory.') +@click.argument('paths', nargs=-1, required=True) +def start(paths, exclude_paths, ignore_paths, diff, pr_diff, cwd) -> None: # Check if these variables are set and pass them to update_config function as kwargs session_config = {'file_exclude_glob_list': []} @@ -258,9 +239,9 @@ def start(paths: List[str] = typer.Argument(...), diff, pr_diff ) - asyncio.run(terminal_client._run()) + asyncio.run(terminal_client._run()) if __name__ == "__main__": - typer.run(start()) + start() diff --git a/poetry.lock b/poetry.lock index 58b613e7f..7a1fd9d59 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2004,4 +2004,4 @@ h11 = ">=0.9.0,<1" [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "dcf5c2cc18dc9f6239768f26060f311daed588b822dec3082662955ac3fc8506" +content-hash = "4b1e59982d7096c184b82f709a213952de378796ea74c6d96ea4cadd67e8d27c" diff --git a/pyproject.toml b/pyproject.toml index 3bcfd2c94..b18fc5abb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,7 @@ authors = ["bio_bootloader "] readme = "README.md" [tool.poetry.scripts] -mentat = 'mentat.terminal.client:app' +mentat = 'mentat.terminal.client:start' [tool.poetry.dependencies] python = "^3.10" @@ -38,6 +38,7 @@ dataclasses-json = "^0.6.3" pyyaml = "^6.0.1" rich = "^13.7.0" typer = "^0.9.0" +click = "^8.1.7" [tool.poetry.group.dev.dependencies] aiomultiprocess = "^0.9.0" From a529d138617e1ce34cb9d325066f6331fae9eabf Mon Sep 17 00:00:00 2001 From: Greg L Date: Wed, 27 Dec 2023 21:27:19 -0500 Subject: [PATCH 16/24] Replace rich module with termcolor for message formatting The rich module has been replaced with termcolor for text coloring and formatting in various Python files. The change improves consistency across the codebase and streamlines the process of sending colored text with the stream.send() function. The changes include updating function and method calls, adjusting import statements, and modifying line color settings. --- mentat/agent_handler.py | 38 ++++++---- mentat/code_context.py | 36 ++++----- mentat/command/commands/search.py | 29 ++++---- mentat/config.py | 3 - mentat/conversation.py | 97 ++++++++++++++----------- mentat/edit_history.py | 10 ++- mentat/include_files.py | 1 - mentat/parsers/change_display_helper.py | 22 ++++-- mentat/parsers/file_edit.py | 58 ++++++++++----- mentat/parsers/json_parser.py | 15 ++-- mentat/parsers/parser.py | 14 ++-- mentat/parsers/unified_diff_parser.py | 14 +++- mentat/session.py | 71 +++++++++--------- mentat/session_input.py | 11 ++- mentat/streaming_printer.py | 45 +++++++----- mentat/terminal/client.py | 14 ++-- mentat/terminal/output.py | 2 +- 17 files changed, 280 insertions(+), 200 deletions(-) diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index ae5ac5e3c..891c13b22 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -15,13 +15,11 @@ from mentat.session_input import ask_yes_no, collect_user_input from mentat.transcripts import ModelMessage from mentat.config import config -from rich import print - -from mentat.utils import dd agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt") agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt") + class AgentHandler: def __init__(self): self._agent_enabled = False @@ -41,7 +39,9 @@ def disable_agent_mode(self): async def enable_agent_mode(self): ctx = SESSION_CONTEXT.get() - print(f"* [cyan]Finding files to determine how to test changes...[/cyan]") + ctx.stream.send( + "Finding files to determine how to test changes...", color="cyan" + ) features = ctx.code_context.get_all_features(split_intervals=False) messages: List[ChatCompletionMessageParam] = [ ChatCompletionSystemMessageParam( @@ -54,7 +54,7 @@ async def enable_agent_mode(self): ), ), ] - model = ctx.config.model + model = config.ai.model response = await ctx.llm_api_handler.call_llm_api(messages, model, False) content = response.choices[0].message.content or "" @@ -66,8 +66,11 @@ async def enable_agent_mode(self): file_contents = "\n\n".join(ctx.code_file_manager.read_file(path)) self.agent_file_message += f"{path}\n\n{file_contents}" - print(f"[cyan]The model has chosen these files to help it determine how to test its changes:[/cyan]") - + ctx.stream.send( + "The model has chosen these files to help it determine how to test its" + " changes:", + color="cyan", + ) ctx.stream.send("\n".join(str(path) for path in paths)) ctx.cost_tracker.display_last_api_call() @@ -82,7 +85,7 @@ async def enable_agent_mode(self): async def _determine_commands(self) -> List[str]: ctx = SESSION_CONTEXT.get() - model = ctx.config.model + model = config.ai.model messages = [ ChatCompletionSystemMessageParam( role="system", content=self.agent_command_prompt @@ -104,7 +107,7 @@ async def _determine_commands(self) -> List[str]: response = await ctx.llm_api_handler.call_llm_api(messages, model, False) ctx.cost_tracker.display_last_api_call() except BadRequestError as e: - print(f"[red]Error accessing OpenAI API: {e.message}[/red]") + ctx.stream.send(f"Error accessing OpenAI API: {e.message}", color="red") return [] content = response.choices[0].message.content or "" @@ -126,15 +129,20 @@ async def add_agent_context(self) -> bool: commands = await self._determine_commands() if not commands: return True - print(f"[cyan]The model has chosen these commands to test its changes:[/cyan]") - + ctx.stream.send( + "The model has chosen these commands to test its changes:", color="cyan" + ) for command in commands: - print(f"* [yellow]{command}[/yellow]") - - print(f"* [cyan]Run these commands?[/cyan]") + ctx.stream.send("* ", end="") + ctx.stream.send(command, color="light_yellow") + ctx.stream.send("Run these commands?", color="cyan") run_commands = await ask_yes_no(default_yes=True) if not run_commands: - print(f"* [cyan]Enter a new-line separated list of commands to run, or nothing to return control to the user:[/cyan]") + ctx.stream.send( + "Enter a new-line separated list of commands to run, or nothing to" + " return control to the user:", + color="cyan", + ) commands: list[str] = (await collect_user_input()).data.strip().splitlines() if not commands: return True diff --git a/mentat/code_context.py b/mentat/code_context.py index 1cec50be1..82fd46f30 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -3,7 +3,6 @@ import os from pathlib import Path from typing import Dict, Iterable, List, Optional, Set, Union -from rich import print from mentat.code_feature import ( CodeFeature, @@ -32,7 +31,6 @@ from mentat.session_context import SESSION_CONTEXT from mentat.session_stream import SessionStream from mentat.config import config -from mentat.utils import dd class CodeContext: @@ -68,17 +66,18 @@ def display_context(self): prefix = " " stream.send(f"{prefix}Directory: {session_context.cwd}") if self.diff_context and self.diff_context.name: - print(f"{prefix}Diff:[green]{self.diff_context.get_display_context()}[/green]") + stream.send(f"{prefix}Diff:", end=" ") + stream.send(self.diff_context.get_display_context(), color="green") if config.run.auto_context_tokens > 0: - print(f"{prefix}Auto-Context: [green]Enabled[/green]") - print(f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}") + stream.send(f"{prefix}Auto-Context: Enabled") + stream.send(f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}") else: - print(f"{prefix}Auto-Context: [yellow]Disabled[/yellow]") + stream.send(f"{prefix}Auto-Context: Disabled") if self.include_files: - print(f"{prefix}Included files:") - print(f"{prefix + prefix}{session_context.cwd.name}") + stream.send(f"{prefix}Included files:") + stream.send(f"{prefix + prefix}{session_context.cwd.name}") features = [ feature for file_features in self.include_files.values() @@ -92,10 +91,11 @@ def display_context(self): prefix + prefix, ) else: - print(f"{prefix}Included files: [yellow]None[/yellow]") + stream.send(f"{prefix}Included files: ", end="") + stream.send("None", color="yellow") if self.auto_features: - print(f"{prefix}Auto-Included Features:") + stream.send(f"{prefix}Auto-Included Features:") refs = get_consolidated_feature_refs(self.auto_features) print_path_tree( build_path_tree([Path(r) for r in refs], session_context.cwd), @@ -148,8 +148,6 @@ async def get_code_message( "\n".join(include_files_message), model, full_message=False ) - - tokens_used = ( prompt_tokens + meta_tokens + include_files_tokens + config.ai.token_buffer ) @@ -293,19 +291,21 @@ def include( cwd=session_context.cwd, exclude_patterns=abs_exclude_patterns, ) - except PathValidationError as e: - print(f"[red]Path Validation Error:{str(e)}[/red]") + session_context.stream.send(str(e), color="light_red") return set() return self.include_features(code_features) def _exclude_file(self, path: Path) -> Path | None: + session_context = SESSION_CONTEXT.get() if path in self.include_files: del self.include_files[path] return path else: - print(f"[red]Path {path} not in context[/red]") + session_context.stream.send( + f"Path {path} not in context", color="light_red" + ) def _exclude_file_interval(self, path: Path) -> Set[Path]: session_context = SESSION_CONTEXT.get() @@ -314,7 +314,9 @@ def _exclude_file_interval(self, path: Path) -> Set[Path]: interval_path, interval_str = split_intervals_from_path(path) if interval_path not in self.include_files: - print(f"[red]Path {interval_path} not in context[/red]") + session_context.stream.send( + f"Path {interval_path} not in context", color="light_red" + ) return excluded_paths intervals = parse_intervals(interval_str) @@ -397,7 +399,7 @@ def exclude(self, path: Path | str) -> Set[Path]: case PathType.GLOB: excluded_paths.update(self._exclude_glob(validated_path)) except PathValidationError as e: - print(f"[red]Path Validation Error: {str(e)}[/red]") + session_context.stream.send(str(e), color="light_red") return excluded_paths diff --git a/mentat/command/commands/search.py b/mentat/command/commands/search.py index 01e1487e8..3d771306b 100644 --- a/mentat/command/commands/search.py +++ b/mentat/command/commands/search.py @@ -1,12 +1,12 @@ from typing import List, Set +from termcolor import colored from typing_extensions import override from mentat.command.command import Command, CommandArgument from mentat.errors import UserError from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path -from rich import print SEARCH_RESULT_BATCH_SIZE = 10 @@ -34,19 +34,19 @@ def _parse_include_input(user_input: str, max_num: int) -> Set[int] | None: class SearchCommand(Command, command_name="search"): @override async def apply(self, *args: str) -> None: - from mentat.config import config session_context = SESSION_CONTEXT.get() - + stream = session_context.stream code_context = session_context.code_context + config = session_context.config if len(args) == 0: - print("[yellow]No search query specified[/]") + stream.send("No search query specified", color="yellow") return try: query = " ".join(args) results = await code_context.search(query=query) except UserError as e: - print(f"[red]{str(e)}[/]") + stream.send(str(e), color="red") return cumulative_tokens = 0 @@ -54,28 +54,29 @@ async def apply(self, *args: str) -> None: prefix = "\n " file_name = feature.rel_path(session_context.cwd) - file_name = f"[blue bold]{file_name}[/]" + file_name = colored(file_name, "blue", attrs=["bold"]) + file_name += colored(feature.interval_string(), "light_cyan") - tokens = feature.count_tokens(config.ai.model) + tokens = feature.count_tokens(config.model) cumulative_tokens += tokens - tokens_str = f"[yellow] ({tokens} tokens)[/]" + tokens_str = colored(f" ({tokens} tokens)", "yellow") file_name += tokens_str name = [] if feature.name: name = feature.name.split(",") name = [ - f"{'└' if i == len(name) - 1 else '├'}─ [blue]{n}[/]" + f"{'└' if i == len(name) - 1 else '├'}─ {colored(n, 'cyan')}" for i, n in enumerate(name) ] message = f"{str(i).ljust(3)}" + prefix.join([file_name] + name + [""]) - print(message) + stream.send(message) if i > 1 and i % SEARCH_RESULT_BATCH_SIZE == 0: # Required to avoid circular imports, but not ideal. from mentat.session_input import collect_user_input - print( + stream.send( "(Y/n) for more results or to exit search mode.\nResults to" ' include in context: (eg: "1 3 4" or "1-4")' ) @@ -89,14 +90,14 @@ async def apply(self, *args: str) -> None: rel_path = get_relative_path( included_path, session_context.cwd ) - print(f"[green]{rel_path} added to context[/]") + stream.send(f"{rel_path} added to context", color="green") else: - print("(Y/n)") + stream.send("(Y/n)") user_input: str = ( await collect_user_input(plain=True) ).data.strip() if user_input.lower() == "n": - print("[bright_blue]Exiting search mode...[/]") + stream.send("Exiting search mode...", color="light_blue") break @override diff --git a/mentat/config.py b/mentat/config.py index 8cdd284c5..21ce6ae17 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -14,9 +14,6 @@ from typing import Tuple from mentat.parsers.parser import Parser from typing import Any, Dict, List, Optional -from rich.console import Console - -console = Console() config_file_name = Path(".mentat_config.yaml") user_config_path = mentat_dir_path / config_file_name diff --git a/mentat/conversation.py b/mentat/conversation.py index 3f8279867..b0c89782c 100644 --- a/mentat/conversation.py +++ b/mentat/conversation.py @@ -14,7 +14,6 @@ ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ) -from rich import print from mentat.errors import MentatError from mentat.llm_api_handler import ( @@ -41,6 +40,7 @@ def __init__(self): async def display_token_count(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream + code_context = session_context.code_context llm_api_handler = session_context.llm_api_handler @@ -50,15 +50,17 @@ async def display_token_count(self): " different model." ) if "gpt-4" not in config.ai.model: - print( - "[yellow]Warning: Mentat has only been tested on GPT-4. You may experience" + stream.send( + "Warning: Mentat has only been tested on GPT-4. You may experience" " issues with quality. This model may not be able to respond in" - " mentat's edit format.[/yellow]" + " mentat's edit format.", + color="yellow", ) if "gpt-3.5" not in config.ai.model: - print( - "[yellow]Warning: Mentat does not know how to calculate costs or context" - " size for this model.[/yellow]" + stream.send( + "Warning: Mentat does not know how to calculate costs or context" + " size for this model.", + color="yellow", ) messages = self.get_messages() @@ -78,9 +80,10 @@ async def display_token_count(self): context_size = get_max_tokens() if not context_size: - print( - f"[red]Context size for {config.ai.model} is not known. Please set" - " the maximum context with `/config maximum_context value`.[/red]" + stream.send( + f"Context size for {config.ai.model} is not known. Please set" + " the maximum context with `/config maximum_context value`.", + color="light_red", ) elif tokens + config.ai.token_buffer > context_size: _plural = len(code_context.include_files) > 1 @@ -91,13 +94,15 @@ async def display_token_count(self): (True, False): "s are close to", (True, True): "s exceed", } - print( - f"[yellow]Included file{message[(_plural, _exceed)]} token limit" - f" ({tokens} / {context_size}). Truncating based on task similarity.[/yellow]" + stream.send( + f"Included file{message[(_plural, _exceed)]} token limit" + f" ({tokens} / {context_size}). Truncating based on task similarity.", + color="yellow", ) else: - print( - f"[cyan]Prompt and included files token count: {tokens} / {context_size}[/cyan]" + stream.send( + f"Prompt and included files token count: {tokens} / {context_size}", + color="cyan", ) # The transcript logger logs tuples containing the actual message sent by the user or LLM @@ -127,7 +132,7 @@ def add_user_message(self, message: str, image: Optional[str] = None): self.add_message(ChatCompletionUserMessageParam(role="user", content=content)) def add_model_message( - self, message: str, messages_snapshot: list[ChatCompletionMessageParam] + self, message: str, messages_snapshot: list[ChatCompletionMessageParam] ): """Used for actual model output messages""" self.add_transcript_message( @@ -142,7 +147,7 @@ def add_message(self, message: ChatCompletionMessageParam): self._messages.append(message) def get_messages( - self, include_system_prompt: bool = True + self, include_system_prompt: bool = True ) -> list[ChatCompletionMessageParam]: """Returns the messages in the conversation. The system message may change throughout the conversation so it is important to access the messages through this method. @@ -166,9 +171,9 @@ def clear_messages(self) -> None: self._messages = list[ChatCompletionMessageParam]() async def _stream_model_response( - self, - messages: list[ChatCompletionMessageParam], - loading_multiplier: float = 0.0, + self, + messages: list[ChatCompletionMessageParam], + loading_multiplier: float = 0.0, ) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream @@ -198,15 +203,16 @@ async def _stream_model_response( ) num_prompt_tokens = prompt_tokens(messages, config.ai.model) - print(f"[blue]Total token count: {num_prompt_tokens}[/blue]") + stream.send(f"Total token count: {num_prompt_tokens}", color="cyan") if num_prompt_tokens > TOKEN_COUNT_WARNING: - print( - "[yellow]Warning: LLM performance drops off rapidly at large context sizes. Use" + stream.send( + "Warning: LLM performance drops off rapidly at large context sizes. Use" " /clear to clear context or use /exclude to exclude any uneccessary" - " files.[/yellow]", + " files.", + color="light_yellow", ) - print("[blue]Streaming... use [bold white]control-c[/] to interrupt the model at any point[/]\n") + stream.send("Streaming... use control-c to interrupt the model at any point\n") async with parser.interrupt_catcher(): parsed_llm_response = await parser.stream_and_parse_llm_response( add_newline(response) @@ -268,10 +274,11 @@ async def get_model_response(self) -> ParsedLLMResponse: loading_multiplier=0.5 * loading_multiplier, ) except RateLimitError: - print( - "[red]Rate limit error received from OpenAI's servers using model" + stream.send( + "Rate limit error received from OpenAI's servers using model" f' {config.ai.model}.\nUse "/config model " to switch to a' - " different model.[/red]" + " different model.", + color="light_red", ) return ParsedLLMResponse("", "", list[FileEdit]()) finally: @@ -280,6 +287,7 @@ async def get_model_response(self) -> ParsedLLMResponse: return response def remaining_context(self) -> int | None: + ctx = SESSION_CONTEXT.get() return get_max_tokens() - prompt_tokens(self.get_messages(), config.ai.model) def can_add_to_context(self, message: str) -> bool: @@ -287,14 +295,15 @@ def can_add_to_context(self, message: str) -> bool: Whether or not the model has enough context remaining to add this message. Will take token buffer into account and uses full_message=True. """ + ctx = SESSION_CONTEXT.get() remaining_context = self.remaining_context() return ( - remaining_context is not None - and remaining_context - - count_tokens(message, config.ai.model, full_message=True) - - config.ai.token_buffer - > 0 + remaining_context is not None + and remaining_context + - count_tokens(message, config.ai.model, full_message=True) + - config.ai.token_buffer + > 0 ) async def run_command(self, command: list[str]) -> bool: @@ -303,8 +312,9 @@ async def run_command(self, command: list[str]) -> bool: """ ctx = SESSION_CONTEXT.get() - print(f"[cyan]Running command: [/cyan][yellow]" + " ".join(command) + "[/yellow]") - print(f"[cyan]Command output: [/cyan]") + ctx.stream.send("Running command: ", end="", color="cyan") + ctx.stream.send(" ".join(command), color="yellow") + ctx.stream.send("Command output:", color="cyan") try: process = subprocess.Popen( @@ -322,14 +332,14 @@ async def run_command(self, command: list[str]) -> bool: if not line: break output.append(line) - print(line, end="") + ctx.stream.send(line, end="") # This gives control back to the asyncio event loop so we can actually print what we sent # Unfortunately asyncio.sleep(0) won't work https://stackoverflow.com/a/74505785 # Note: if subprocess doesn't flush, output can't and won't be streamed. await asyncio.sleep(0.01) except FileNotFoundError: output = [f"Invalid command: {' '.join(command)}"] - print(output[0]) + ctx.stream.send(output[0]) output = "".join(output) message = f"Command ran:\n{' '.join(command)}\nCommand output:\n{output}" @@ -337,13 +347,14 @@ async def run_command(self, command: list[str]) -> bool: self.add_message( ChatCompletionSystemMessageParam(role="system", content=message) ) - print( - "[green]Successfully added command output to model context.[/green]" + ctx.stream.send( + "Successfully added command output to model context.", color="green" ) return True else: - print( - "[red]Not enough tokens remaining in model's context to add command output" - " to model context.[/red]" + ctx.stream.send( + "Not enough tokens remaining in model's context to add command output" + " to model context.", + color="light_red", ) - return False + return False \ No newline at end of file diff --git a/mentat/edit_history.py b/mentat/edit_history.py index 4643e210e..d1a1540d7 100644 --- a/mentat/edit_history.py +++ b/mentat/edit_history.py @@ -1,5 +1,7 @@ from typing import Optional +from termcolor import colored + from mentat.errors import HistoryError from mentat.parsers.file_edit import FileEdit from mentat.session_context import SESSION_CONTEXT @@ -22,7 +24,7 @@ def push_edits(self): def undo(self) -> str: if not self.edits: - return "[bright_red]No edits available to undo[/]" + return colored("No edits available to undo", color="light_red") # Make sure to go top down cur_edit = self.edits.pop() @@ -34,14 +36,14 @@ def undo(self) -> str: cur_file_edit.undo() undone_edit.append(cur_file_edit) except HistoryError as e: - errors.append(f"[bright_red]{str(e)}[/]") + errors.append(colored(str(e), color="light_red")) if undone_edit: self.undone_edits.append(undone_edit) return "\n".join(errors) async def redo(self) -> Optional[str]: if not self.undone_edits: - return "[bright_red]No edits available to redo[/]" + return colored("No edits available to redo", color="light_red") session_context = SESSION_CONTEXT.get() code_file_manager = session_context.code_file_manager @@ -54,7 +56,7 @@ async def redo(self) -> Optional[str]: def undo_all(self) -> str: if not self.edits: - return "[bright_red]No edits available to undo[/]" + return colored("No edits available to undo", color="light_red") errors = list[str]() while self.edits: diff --git a/mentat/include_files.py b/mentat/include_files.py index 5e9fde82c..fac1048e7 100644 --- a/mentat/include_files.py +++ b/mentat/include_files.py @@ -12,7 +12,6 @@ from mentat.interval import parse_intervals, split_intervals_from_path from mentat.session_context import SESSION_CONTEXT -from rich import print from mentat.utils import is_file_text_encoded diff --git a/mentat/parsers/change_display_helper.py b/mentat/parsers/change_display_helper.py index 9abab5fd1..10db7f44d 100644 --- a/mentat/parsers/change_display_helper.py +++ b/mentat/parsers/change_display_helper.py @@ -7,6 +7,7 @@ from pygments.lexer import Lexer from pygments.lexers import TextLexer, get_lexer_for_filename from pygments.util import ClassNotFound +from termcolor import colored from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path @@ -110,7 +111,7 @@ def _get_code_block( ): lines = _prefixed_lines(line_number_buffer, code_lines, prefix) if lines: - return "\n".join(f"[{color}]{line}[/{color}]" for line in lines.split("\n")) + return "\n".join(colored(line, color=color) for line in lines.split("\n")) else: return "" @@ -145,14 +146,23 @@ def get_file_name( ): match display_information.file_action_type: case FileActionType.CreateFile: - return f"\n[light_green]{display_information.file_name}*[/light_green]" - + return "\n" + colored( + f"{display_information.file_name}*", color="light_green" + ) case FileActionType.DeleteFile: - return f"\n[bright_red]Deletion: {display_information.file_name}[/bright_red]" + return "\n" + colored( + f"Deletion: {display_information.file_name}", color="light_red" + ) case FileActionType.RenameFile: - return f"\n[yellow]Rename: {display_information.file_name} -> {display_information.new_name}[/yellow]" + return "\n" + colored( + f"Rename: {display_information.file_name} ->" + f" {display_information.new_name}", + color="yellow", + ) case FileActionType.UpdateFile: - return f"\n[bright_blue]{display_information.file_name}[/bright_blue]" + return "\n" + colored( + f"{display_information.file_name}", color="light_blue" + ) def get_added_lines( diff --git a/mentat/parsers/file_edit.py b/mentat/parsers/file_edit.py index 7de1c2515..6ad2ae459 100644 --- a/mentat/parsers/file_edit.py +++ b/mentat/parsers/file_edit.py @@ -3,7 +3,6 @@ from pathlib import Path from typing import Any -from rich import print import attr from mentat.errors import HistoryError, MentatError @@ -42,7 +41,10 @@ def __lt__(self, other: Replacement): async def _ask_user_change( text: str, ) -> bool: - print(f"[bright_blue]{text}[/]") + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + + stream.send(text, color="light_blue") return await ask_yes_no(default_yes=True) @@ -149,11 +151,17 @@ def is_valid(self) -> bool: if self.is_creation: if self.file_path.exists(): - print(f"[bright_yellow]File {display_path} already exists, canceling creation.[/]") + stream.send( + f"File {display_path} already exists, canceling creation.", + color="light_yellow", + ) return False else: if not self.file_path.exists(): - print(f"[bright_yellow]File {display_path} does not exist, canceling all edits to file.[/]") + stream.send( + f"File {display_path} does not exist, canceling all edits to file.", + color="light_yellow", + ) return False file_features_in_context = [ f for f in code_context.auto_features if f.path == self.file_path @@ -163,15 +171,21 @@ def is_valid(self) -> bool: for r in self.replacements for i in range(r.starting_line + 1, r.ending_line + 1) ): - print(f"[bright_yellow]File {display_path} not in context, canceling all edits to file.[/]") + stream.send( + f"File {display_path} not in context, canceling all edits to file.", + color="light_yellow", + ) return False if self.rename_file_path is not None and self.rename_file_path.exists(): rel_rename_path = None if self.rename_file_path.is_relative_to(session_context.cwd): rel_rename_path = self.rename_file_path.relative_to(session_context.cwd) - print(f"[bright_yellow]File {display_path} being renamed to existing file" - f" {rel_rename_path or self.rename_file_path}, canceling rename.[/]") + stream.send( + f"File {display_path} being renamed to existing file" + f" {rel_rename_path or self.rename_file_path}, canceling rename.", + color="light_yellow", + ) self.rename_file_path = None return True @@ -215,12 +229,15 @@ async def filter_replacements( ) def _print_resolution(self, first: Replacement, second: Replacement): - print("Change overlap detected, auto-merged back to back changes:\n") - print(self.file_path) - print(change_delimiter) + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + + stream.send("Change overlap detected, auto-merged back to back changes:\n") + stream.send(self.file_path) + stream.send(change_delimiter) for line in first.new_lines + second.new_lines: - print(f"[green]+ {line}[/green]") - print(change_delimiter) + stream.send("+ " + line, color="green") + stream.send(change_delimiter) def resolve_conflicts(self): self.replacements.sort(reverse=True) @@ -273,8 +290,8 @@ def undo(self): ctx.code_file_manager.delete_file(self.file_path) self._display_creation(prefix=prefix) - print( - f"[bright_blue]Creation of file {self.file_path} undone.[/bright_blue]" + ctx.stream.send( + f"Creation of file {self.file_path} undone", color="light_blue" ) return @@ -292,8 +309,9 @@ def undo(self): ctx.code_file_manager.rename_file(self.rename_file_path, self.file_path) self._display_rename(prefix=prefix) - print( - f"[bright_blue]Rename of file {self.file_path} to {self.rename_file_path} undone.[/bright_blue]" + ctx.stream.send( + f"Rename of file {self.file_path} to {self.rename_file_path} undone", + color="light_blue", ) if self.is_deletion: @@ -311,8 +329,8 @@ def undo(self): ) self._display_deletion(self.previous_file_lines, prefix=prefix) - print( - f"[bright_blue]Deletion of file {self.file_path} undone.[/bright_blue]" + ctx.stream.send( + f"Deletion of file {self.file_path} undone", color="light_red" ) elif self.replacements: if not self.file_path.exists(): @@ -327,6 +345,6 @@ def undo(self): f.write("\n".join(self.previous_file_lines)) self._display_replacements(self.previous_file_lines, prefix=prefix) - print( - f"[bright_blue]Edits to file {self.file_path} undone.[/bright_blue]" + ctx.stream.send( + f"Edits to file {self.file_path} undone", color="light_blue" ) diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index d3e352eb4..926a19aaa 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -4,11 +4,11 @@ from json import JSONDecodeError from pathlib import Path from typing import AsyncIterator, Dict -from rich import print from jsonschema import ValidationError, validate from openai.types.chat import ChatCompletionChunk from openai.types.chat.completion_create_params import ResponseFormat +from termcolor import colored from typing_extensions import override from mentat.errors import ModelError @@ -100,7 +100,7 @@ async def stream_and_parse_llm_response( self, response: AsyncIterator[ChatCompletionChunk] ) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() - + stream = session_context.stream printer = StreamingPrinter() printer_task = asyncio.create_task(printer.print_lines()) @@ -111,7 +111,10 @@ async def stream_and_parse_llm_response( if self.shutdown.is_set(): printer.shutdown_printer() await printer_task - print("\n\nInterrupted by user. Using the response up to this point.") + stream.send( + colored("") # Reset ANSI codes + + "\n\nInterrupted by user. Using the response up to this point." + ) break for content in chunk_to_lines(chunk): @@ -131,10 +134,12 @@ async def stream_and_parse_llm_response( validate(instance=response_json, schema=output_schema) except JSONDecodeError: # Should never happen with OpenAI's response_format set to json - print("[red]Error processing model response: Invalid JSON[/red]") + stream.send("Error processing model response: Invalid JSON", color="red") return ParsedLLMResponse(message, "", []) except ValidationError: - print("[red]Error processing model response: Invalid format given[/red]") + stream.send( + "Error processing model response: Invalid format given", color="red" + ) return ParsedLLMResponse(message, "", []) file_edits: Dict[Path, FileEdit] = {} diff --git a/mentat/parsers/parser.py b/mentat/parsers/parser.py index 618969a5c..cb8904606 100644 --- a/mentat/parsers/parser.py +++ b/mentat/parsers/parser.py @@ -7,11 +7,11 @@ from contextlib import asynccontextmanager from pathlib import Path from typing import AsyncIterator -from rich import print import attr from openai.types.chat import ChatCompletionChunk from openai.types.chat.completion_create_params import ResponseFormat +from termcolor import colored from mentat.code_file_manager import CodeFileManager from mentat.errors import ModelError @@ -85,6 +85,7 @@ async def stream_and_parse_llm_response( To make a parser that differs from these assumptions, override this method instead of the helper methods """ session_context = SESSION_CONTEXT.get() + stream = session_context.stream code_file_manager = session_context.code_file_manager printer = StreamingPrinter() @@ -114,7 +115,10 @@ async def stream_and_parse_llm_response( printer.shutdown_printer() if printer_task is not None: await printer_task - print("\n\nInterrupted by user. Using the response up to this point.") + stream.send( + colored("") # Reset ANSI codes + + "\n\nInterrupted by user. Using the response up to this point." + ) break for content in chunk_to_lines(chunk): @@ -354,8 +358,8 @@ def _code_line_beginning( """ The beginning of a code line; normally this means printing the + prefix """ - return ( - "[green]+" + " " * (display_information.line_number_buffer - 1) + "[/green]" + return colored( + "+" + " " * (display_information.line_number_buffer - 1), color="green" ) def _code_line_content( @@ -368,7 +372,7 @@ def _code_line_content( """ Part of a code line; normally this means printing in green """ - return f"[green]{content}[/green]" + return colored(content, color="green") # These methods must be overriden if using the default stream and parse function def _could_be_special(self, cur_line: str) -> bool: diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index 6234447bb..b9616943b 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -1,6 +1,7 @@ from enum import Enum from pathlib import Path +from termcolor import colored from typing_extensions import override from mentat.code_file_manager import CodeFileManager @@ -52,9 +53,9 @@ def _code_line_content( if cur_line == UnifiedDiffDelimiter.MidChange.value: return change_delimiter + "\n" elif cur_line.startswith("+"): - return f"[green]{content}[/]" + return colored(content, "green") elif cur_line.startswith("-"): - return f"[red]{content}[/]" + return colored(content, "red") else: return highlight_text(display_information, content) @@ -160,7 +161,9 @@ def _add_code_block( and not line.startswith("-") and not line.startswith(" ") ): - return "[red]Error: Invalid diff format given. Discarding this change.[/]" + return colored( + "Error: Invalid diff format given. Discarding this change." + ) cur_lines.append(line) if cur_lines: changes.append(cur_lines) @@ -183,7 +186,10 @@ def _add_code_block( start_index = matching_index(file_lines, search_lines) if start_index == -1: - return "[red]Error: Original lines not found. Discarding this change.[/]" + return colored( + "Error: Original lines not found. Discarding this change.", + color="red", + ) # Matching lines checks for matches that are missing whitespace only lines; # this will cause errors with line numbering if we don't add those lines into the change lines diff --git a/mentat/session.py b/mentat/session.py index 768434eea..61eddacb0 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -6,8 +6,6 @@ from pathlib import Path from typing import Any, Coroutine, List, Optional, Set from uuid import uuid4 -from rich import print -from rich.console import Console import attr import sentry_sdk @@ -22,19 +20,18 @@ from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.ctags import ensure_ctags_installed -from mentat.errors import MentatError, SessionExit, UserError, ContextSizeInsufficient +from mentat.errors import ContextSizeInsufficient, MentatError, SessionExit, UserError from mentat.git_handler import get_git_root_for_path from mentat.llm_api_handler import LlmApiHandler, is_test_environment from mentat.logging_config import setup_logging +from mentat.sampler.sampler import Sampler from mentat.sentry import sentry_init from mentat.session_context import SESSION_CONTEXT, SessionContext from mentat.session_input import collect_input_with_commands from mentat.session_stream import SessionStream from mentat.utils import check_version, mentat_dir_path from mentat.vision.vision_manager import VisionManager -from mentat.sampler.sampler import Sampler -console = Console() class Session: """ @@ -53,7 +50,6 @@ def __init__( pr_diff: Optional[str] = None, ): # All errors thrown here need to be caught here - self._errors = [] self.stopped = False if not mentat_dir_path.exists(): @@ -63,6 +59,8 @@ def __init__( self.id = uuid4() self._tasks: Set[asyncio.Task[None]] = set() + self._errors = [] + # Since we can't set the session_context until after all of the singletons are created, # any singletons used in the constructor of another singleton must be passed in git_root = get_git_root_for_path(cwd, raise_error=False) @@ -90,26 +88,34 @@ def __init__( sampler = Sampler() session_context = SessionContext( - cwd=cwd, - stream=stream, - llm_api_handler=llm_api_handler, - cost_tracker=cost_tracker, - code_context=code_context, - code_file_manager=code_file_manager, - conversation=conversation, - vision_manager=vision_manager, - agent_handler=agent_handler, - auto_completer=auto_completer, - sampler=sampler + cwd, + stream, + llm_api_handler, + cost_tracker, + code_context, + code_file_manager, + conversation, + vision_manager, + agent_handler, + auto_completer, + sampler, ) self.ctx = session_context SESSION_CONTEXT.set(session_context) + self.error = None # Functions that require session_context check_version() self.send_errors_to_stream() for path in paths: code_context.include(path, exclude_patterns=exclude_paths) + if ( + code_context.diff_context is not None + and len(code_context.include_files) == 0 + and (diff or pr_diff) + ): + for file in code_context.diff_context.diff_files(): + code_context.include(file) def _create_task(self, coro: Coroutine[None, None, Any]): """Utility method for running a Task in the background""" @@ -139,7 +145,7 @@ async def _main(self): code_context.display_context() await conversation.display_token_count() - print(f"Type 'q' or use Ctrl-C to quit at any time.") + stream.send("Type 'q' or use Ctrl-C to quit at any time.") need_user_request = True while True: try: @@ -148,9 +154,12 @@ async def _main(self): # edits made between user input to be collected together. if agent_handler.agent_enabled: code_file_manager.history.push_edits() - print(f"[green]Use /undo to undo all changes from agent mode since last input.[/green]") - - print(f"[blue]What can I do for you?[/blue]") + stream.send( + "Use /undo to undo all changes from agent mode since last" + " input.", + color="green", + ) + stream.send("\nWhat can I do for you?", color="light_blue") message = await collect_input_with_commands() if message.data.strip() == "": continue @@ -176,11 +185,10 @@ async def _main(self): applied_edits = await code_file_manager.write_changes_to_files( file_edits ) - - if applied_edits: - print(f"[blue]Changes applied.[/blue]") - else: - print(f"[blue]No Changes applied.[/blue]") + stream.send( + "Changes applied." if applied_edits else "No changes applied.", + color="light_blue", + ) if agent_handler.agent_enabled: if parsed_llm_response.interrupted: @@ -196,7 +204,7 @@ async def _main(self): need_user_request = True continue except (APITimeoutError, RateLimitError, BadRequestError) as e: - print(f"[red]Error accessing OpenAI API: {e.message}[/red]") + stream.send(f"Error accessing OpenAI API: {e.message}", color="red") break async def listen_for_session_exit(self): @@ -226,24 +234,21 @@ async def run_main(): with sentry_sdk.start_transaction( op="mentat_started", name="Mentat Started" ) as transaction: - #TODO: check if we need this as config should be gloabl now #transaction.set_tag("config", attr.asdict(config)) await self._main() except (SessionExit, CancelledError): pass except (MentatError, UserError) as e: - if is_test_environment(): - console.print_exception(show_locals=True) - print(f"[red]Unhandled Exception: {str(e)}[/red]") + self.stream.send(str(e), color="red") except Exception as e: # All unhandled exceptions end up here error = f"Unhandled Exception: {traceback.format_exc()}" # Helps us handle errors in tests if is_test_environment(): - console.print_exception(show_locals=True) + print(error) self.error = error sentry_sdk.capture_exception(e) - print(f"[red]{str(error)}[/red]") + self.stream.send(error, color="red") finally: await self._stop() sentry_sdk.flush() diff --git a/mentat/session_input.py b/mentat/session_input.py index db0e9aaba..f34c1473b 100644 --- a/mentat/session_input.py +++ b/mentat/session_input.py @@ -2,7 +2,6 @@ import logging import shlex from typing import Any, Coroutine -from rich import print from mentat.command.command import Command from mentat.errors import RemoteKeyboardInterrupt, SessionExit @@ -38,10 +37,12 @@ async def collect_user_input(plain: bool = False) -> StreamMessage: async def ask_yes_no(default_yes: bool) -> bool: + session_context = SESSION_CONTEXT.get() + stream = session_context.stream while True: # TODO: combine this into a single message (include content) - print("(Y/n)" if default_yes else "(y/N)") + stream.send("(Y/n)" if default_yes else "(y/N)") response = await collect_user_input(plain=True) content = response.data if content in ["y", "n", ""]: @@ -50,6 +51,8 @@ async def ask_yes_no(default_yes: bool) -> bool: async def collect_input_with_commands() -> StreamMessage: + session_context = SESSION_CONTEXT.get() + stream = session_context.stream response = await collect_user_input() while isinstance(response.data, str) and response.data.startswith("/"): @@ -59,7 +62,7 @@ async def collect_input_with_commands() -> StreamMessage: command = Command.create_command(response.data[1:].split(" ")[0]) await command.apply(*arguments) except ValueError as e: - print(f"[bright_red]Error processing command arguments: {e}[/]") + stream.send(f"Error processing command arguments: {e}", color="light_red") response = await collect_user_input() return response @@ -103,7 +106,7 @@ async def listen_for_interrupt( return wrapped_task.result() else: # Send a newline for terminal clients (remove later) - print("\n") + stream.send("\n") if raise_exception_on_interrupt: raise RemoteKeyboardInterrupt diff --git a/mentat/streaming_printer.py b/mentat/streaming_printer.py index 89a8c918f..d910f1a13 100644 --- a/mentat/streaming_printer.py +++ b/mentat/streaming_printer.py @@ -1,15 +1,15 @@ import asyncio from collections import deque -from rich import print -import re -from mentat.utils import dd, dump +from termcolor import colored + +from mentat.session_context import SESSION_CONTEXT class StreamingPrinter: def __init__(self): self.strings_to_print = deque[str]([]) - self.words_remaining = 0 + self.chars_remaining = 0 self.finishing = False self.shutdown = False @@ -17,33 +17,40 @@ def add_string(self, string: str, end: str = "\n", color: str | None = None): if self.finishing: return - words = string.split(" ") + if len(string) == 0: + return + string += end - for word in words: - if word: - if color is not None: - colored_word = f"[{color}]{word}[/{color}]" - else: - colored_word = word - self.strings_to_print.append(colored_word) - self.words_remaining += 1 + colored_string = colored(string, color) if color is not None else string - self.strings_to_print.append(end) - self.words_remaining += 1 + index = colored_string.index(string) + characters = list(string) + characters[0] = colored_string[:index] + characters[0] + characters[-1] = characters[-1] + colored_string[index + len(string) :] + + self.strings_to_print.extend(characters) + self.chars_remaining += len(characters) def sleep_time(self) -> float: max_finish_time = 1.0 - required_sleep_time = max_finish_time / (self.words_remaining + 1) + required_sleep_time = max_finish_time / (self.chars_remaining + 1) max_sleep = 0.002 if self.finishing else 0.006 min_sleep = 0.002 return max(min(max_sleep, required_sleep_time), min_sleep) async def print_lines(self): + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + while not self.shutdown: if self.strings_to_print: - next_word = self.strings_to_print.popleft() - print(next_word, end=" ", flush=True) - self.words_remaining -= 1 + if len(self.strings_to_print) > 500: + next_string = "".join(self.strings_to_print) + self.strings_to_print = deque[str]([]) + else: + next_string = self.strings_to_print.popleft() + stream.send(next_string, end="", flush=True) + self.chars_remaining -= 1 elif self.finishing: break await asyncio.sleep(self.sleep_time()) diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 6fa38e767..53ce8f206 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -49,7 +49,7 @@ def __init__( exclude_paths: List[str] = [], ignore_paths: List[str] = [], diff: str | None = None, - pr_diff: str | None = None + pr_diff: str | None = None, ): self.cwd = cwd self.paths = [Path(path) for path in paths] @@ -57,7 +57,6 @@ def __init__( self.ignore_paths = [Path(path) for path in ignore_paths] self.diff = diff self.pr_diff = pr_diff - self.config = config self._tasks: Set[asyncio.Task[None]] = set() self._should_exit = Event() @@ -117,7 +116,7 @@ async def _handle_input_requests(self): async def _listen_for_client_exit(self): """When the Session shuts down, it will send the client_exit signal for the client to shutdown.""" await self.session.stream.recv(channel="client_exit") - await asyncio.create_task(self._shutdown()) + asyncio.create_task(self._shutdown()) async def _listen_for_should_exit(self): """This listens for a user event signaling shutdown (like SigInt), and tells the session to shutdown.""" @@ -169,7 +168,7 @@ async def _run(self): mentat_completer = MentatCompleter(self.session.stream) self._prompt_session = MentatPromptSession( completer=mentat_completer, - style=Style(self.config.ui.input_style), + style=Style(config.ui.input_style), enable_suspend=True, ) @@ -185,7 +184,7 @@ def _(event: KeyPressEvent): self._plain_session = PromptSession[str]( message=[("class:prompt", ">>> ")], - style=Style(self.config.ui.input_style), + style=Style(config.ui.input_style), completer=None, key_bindings=plain_bindings, enable_suspend=True, @@ -209,6 +208,9 @@ async def _shutdown(self): task.cancel() self._stopped.set() + def run(self): + asyncio.run(self._run()) + # Event handlers for all the buttons. @@ -240,7 +242,7 @@ def start(paths, exclude_paths, ignore_paths, diff, pr_diff, cwd) -> None: pr_diff ) - asyncio.run(terminal_client._run()) + terminal_client.run() if __name__ == "__main__": diff --git a/mentat/terminal/output.py b/mentat/terminal/output.py index b35fd8263..3d770a963 100644 --- a/mentat/terminal/output.py +++ b/mentat/terminal/output.py @@ -4,7 +4,7 @@ from prompt_toolkit.formatted_text import FormattedText from mentat.session_stream import StreamMessage -from rich import print + def _print_stream_message_string( content: Any, From 38a4da22e58a19a6b3e64829248deb91a96dfbb3 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Thu, 28 Dec 2023 07:40:08 -0500 Subject: [PATCH 17/24] Updated configuration file path in Mentat The path for the configuration file in the Mentat project was updated to use the Git root, rather than the application root. This change also resulted in the removal of unnecessary code that previously loaded and merged the configuration. --- mentat/config.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mentat/config.py b/mentat/config.py index 21ce6ae17..d66f23986 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -103,7 +103,8 @@ def yaml_to_config(yaml_dict: dict): def init_config(): """Initialize the configuration file if it doesn't exist.""" default_conf_path = os.path.join(MENTAT_ROOT, 'resources', 'conf', '.mentatconf.yaml') - current_conf_path = os.path.join(APP_ROOT, '.mentatconf.yaml') + git_root = get_git_root_for_path(APP_ROOT, raise_error=False) + current_conf_path = os.path.join(git_root, '.mentatconf.yaml') if not os.path.exists(current_conf_path): shutil.copy(default_conf_path, current_conf_path) @@ -151,11 +152,6 @@ def load_settings(config_session_dict = None): git_config = yaml_to_config(yaml_dict) yaml_config = merge_configs(yaml_config, git_config) - if current_conf_path.exists(): - yaml_dict = load_yaml(str(current_conf_path)) - current_path_config = yaml_to_config(yaml_dict) - yaml_config = merge_configs(yaml_config, current_path_config) - if config_session_dict is not None and config_session_dict.get('file_exclude_glob_list') is not None: yaml_config["file_exclude_glob_list"].extend(config_session_dict['file_exclude_glob_list']) From 7dae315c33d1b95c6afd94b4c305ffd087bffbf7 Mon Sep 17 00:00:00 2001 From: Gregory Lifhits Date: Thu, 28 Dec 2023 15:36:02 -0500 Subject: [PATCH 18/24] Update code to fetch configuration from user session The code has been updated to fetch configuration from user_session instead of importing it directly from mentat.config. Several test cases have been altered to reflect this change. The Dumper and Dumper functions in utils have been enhanced for handling possible exceptions. An utility function is added in __init__.py to expose user_session as part of the package's public API. --- mentat/__init__.py | 10 + mentat/agent_handler.py | 16 +- mentat/code_context.py | 7 +- mentat/code_feature.py | 5 +- mentat/command/commands/config.py | 22 ++- mentat/command/commands/screenshot.py | 7 +- mentat/command/commands/search.py | 5 +- mentat/config.py | 185 ++++++++++++------ mentat/conversation.py | 14 +- mentat/embeddings.py | 4 +- mentat/feature_filters/default_filter.py | 8 +- mentat/feature_filters/llm_feature_filter.py | 17 +- mentat/llm_api_handler.py | 12 +- mentat/parsers/block_parser.py | 5 +- mentat/parsers/json_parser.py | 5 +- mentat/parsers/replacement_parser.py | 5 +- mentat/parsers/unified_diff_parser.py | 5 +- mentat/python_client/client.py | 6 +- mentat/resources/conf/.mentatconf.yaml | 6 +- mentat/sampler/sampler.py | 27 ++- mentat/session.py | 12 +- mentat/terminal/__init__.py | 3 + mentat/terminal/client.py | 66 ++++--- mentat/user_session.py | 18 ++ mentat/utils.py | 26 ++- poetry.lock | 88 ++------- pyproject.toml | 3 +- tests/code_context_test.py | 28 ++- tests/config_test.py | 145 +++++++------- tests/conftest.py | 18 +- tests/parser_tests/block_format_error_test.py | 4 +- tests/parser_tests/block_format_test.py | 4 +- .../replacement_format_error_test.py | 4 +- tests/parser_tests/replacement_format_test.py | 4 +- .../unified_diff_format_error_test.py | 4 +- .../parser_tests/unified_diff_format_test.py | 8 +- 36 files changed, 449 insertions(+), 357 deletions(-) create mode 100644 mentat/user_session.py diff --git a/mentat/__init__.py b/mentat/__init__.py index 1679792cb..c0dcc3e3a 100644 --- a/mentat/__init__.py +++ b/mentat/__init__.py @@ -1,2 +1,12 @@ +from mentat.user_session import user_session + +__all__ = [ + "user_session", +] + + +def __dir__(): + return __all__ + # Make sure to bump this on Release x.y.z PR's! __version__ = "1.0.7" diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index 891c13b22..05f19fcf5 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -9,24 +9,26 @@ ChatCompletionSystemMessageParam, ) +import mentat from mentat.llm_api_handler import prompt_tokens from mentat.prompts.prompts import read_prompt from mentat.session_context import SESSION_CONTEXT from mentat.session_input import ask_yes_no, collect_user_input from mentat.transcripts import ModelMessage -from mentat.config import config - -agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt") -agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt") class AgentHandler: + + config = mentat.user_session.get("config") + agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt", Path("text/agent_file_selection_prompt.txt")) + agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt", Path("text/agent_command_selection_prompt.txt")) + def __init__(self): self._agent_enabled = False self.agent_file_message = "" - self.agent_file_selection_prompt = read_prompt(agent_file_selection_prompt_path) - self.agent_command_prompt = read_prompt(agent_command_prompt_path) + self.agent_file_selection_prompt = read_prompt(self.agent_file_selection_prompt_path) + self.agent_command_prompt = read_prompt(self.agent_command_prompt_path) # Make this property readonly because we have to set things when we enable agent mode @property @@ -54,6 +56,7 @@ async def enable_agent_mode(self): ), ), ] + config = mentat.user_session.get("config") model = config.ai.model response = await ctx.llm_api_handler.call_llm_api(messages, model, False) content = response.choices[0].message.content or "" @@ -85,6 +88,7 @@ async def enable_agent_mode(self): async def _determine_commands(self) -> List[str]: ctx = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") model = config.ai.model messages = [ ChatCompletionSystemMessageParam( diff --git a/mentat/code_context.py b/mentat/code_context.py index 82fd46f30..80c6ece96 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -4,6 +4,7 @@ from pathlib import Path from typing import Dict, Iterable, List, Optional, Set, Union +import mentat from mentat.code_feature import ( CodeFeature, get_code_message_from_features, @@ -30,7 +31,7 @@ from mentat.llm_api_handler import count_tokens, get_max_tokens, is_context_sufficient from mentat.session_context import SESSION_CONTEXT from mentat.session_stream import SessionStream -from mentat.config import config +from mentat.utils import dd class CodeContext: @@ -61,6 +62,7 @@ def display_context(self): """Display the baseline context: included files and auto-context settings""" session_context = SESSION_CONTEXT.get() stream = session_context.stream + config = mentat.user_session.get("config") stream.send("Code Context:", color="blue") prefix = " " @@ -120,6 +122,7 @@ async def get_code_message( 'prompt_tokens' argument is the total number of tokens used by the prompt before the code message, used to ensure that the code message won't overflow the model's context size """ + config = mentat.user_session.get("config") model = config.ai.model # Setup code message metadata @@ -185,6 +188,7 @@ def get_all_features( Retrieves every CodeFeature under the cwd. If files_only is True the features won't be split into intervals """ session_context = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") abs_exclude_patterns: Set[Path] = set() for pattern in self.ignore_patterns.union( @@ -268,6 +272,7 @@ def include( A set of paths that have been successfully included in the context """ session_context = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") path = Path(path) diff --git a/mentat/code_feature.py b/mentat/code_feature.py index 74f8819ab..16c4a2d24 100644 --- a/mentat/code_feature.py +++ b/mentat/code_feature.py @@ -8,6 +8,7 @@ import attr +import mentat from mentat.ctags import get_ctag_lines_and_names from mentat.diff_context import annotate_file_message, parse_diff from mentat.errors import MentatError @@ -16,7 +17,7 @@ from mentat.llm_api_handler import count_tokens from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path -from mentat.config import config + MIN_INTERVAL_LINES = 10 @@ -131,6 +132,8 @@ def get_code_message(self, standalone: bool = True) -> list[str]: """ session_context = SESSION_CONTEXT.get() code_file_manager = session_context.code_file_manager + + config = mentat.user_session.get("config") parser = config.parser.parser code_context = session_context.code_context diff --git a/mentat/command/commands/config.py b/mentat/command/commands/config.py index b3871bb52..dae447285 100644 --- a/mentat/command/commands/config.py +++ b/mentat/command/commands/config.py @@ -10,17 +10,20 @@ class ConfigCommand(Command, command_name="config"): @override async def apply(self, *args: str) -> None: + from mentat.config import YamlConfig, update_config session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + + yaml_config = YamlConfig() + if len(args) == 0: stream.send("No config option specified", color="yellow") else: setting = args[0] - if hasattr(config, setting): + if hasattr(yaml_config, setting): if len(args) == 1: - value = getattr(config, setting) - description = attr.fields_dict(type(config))[setting].metadata.get( + value = getattr(yaml_config, setting) + description = attr.fields_dict(type(yaml_config))[setting].metadata.get( "description" ) stream.send(f"{setting}: {value}") @@ -28,7 +31,7 @@ async def apply(self, *args: str) -> None: stream.send(f"Description: {description}") elif len(args) == 2: value = args[1] - if attr.fields_dict(type(config))[setting].metadata.get( + if attr.fields_dict(type(yaml_config))[setting].metadata.get( "no_midsession_change" ): stream.send( @@ -38,7 +41,7 @@ async def apply(self, *args: str) -> None: ) return try: - setattr(config, setting, value) + update_config({setting: value}) stream.send(f"{setting} set to {value}", color="green") except (TypeError, ValueError): stream.send( @@ -62,14 +65,13 @@ def arguments(cls) -> List[CommandArgument]: def argument_autocompletions( cls, arguments: list[str], argument_position: int ) -> list[str]: - # Dodge circular imports - from mentat.config import Config + from mentat.config import YamlConfig if argument_position == 0: - return Config.get_fields() + return YamlConfig.get_fields() elif argument_position == 1: setting = arguments[0] - fields = attr.fields_dict(Config) + fields = attr.fields_dict(YamlConfig) if setting in fields: return fields[setting].metadata.get("auto_completions", []) else: diff --git a/mentat/command/commands/screenshot.py b/mentat/command/commands/screenshot.py index d05508dda..211d1ea1d 100644 --- a/mentat/command/commands/screenshot.py +++ b/mentat/command/commands/screenshot.py @@ -11,12 +11,13 @@ class ScreenshotCommand(Command, command_name="screenshot"): @override async def apply(self, *args: str) -> None: + from mentat.config import config, update_config + session_context = SESSION_CONTEXT.get() vision_manager = session_context.vision_manager stream = session_context.stream - config = session_context.config conversation = session_context.conversation - model = config.model + model = config.ai.model if "gpt" in model: if "vision" not in model: @@ -25,7 +26,7 @@ async def apply(self, *args: str) -> None: " gpt-4-vision-preview", color="yellow", ) - config.model = "gpt-4-vision-preview" + update_config({"model" : "gpt-4-vision-preview"}) else: stream.send( "Can't determine if this model supports vision. Attempting anyway.", diff --git a/mentat/command/commands/search.py b/mentat/command/commands/search.py index 3d771306b..c9a961eab 100644 --- a/mentat/command/commands/search.py +++ b/mentat/command/commands/search.py @@ -34,10 +34,11 @@ def _parse_include_input(user_input: str, max_num: int) -> Set[int] | None: class SearchCommand(Command, command_name="search"): @override async def apply(self, *args: str) -> None: + from mentat.config import config + session_context = SESSION_CONTEXT.get() stream = session_context.stream code_context = session_context.code_context - config = session_context.config if len(args) == 0: stream.send("No search query specified", color="yellow") @@ -57,7 +58,7 @@ async def apply(self, *args: str) -> None: file_name = colored(file_name, "blue", attrs=["bold"]) file_name += colored(feature.interval_string(), "light_cyan") - tokens = feature.count_tokens(config.model) + tokens = feature.count_tokens(config.ai.model) cumulative_tokens += tokens tokens_str = colored(f" ({tokens} tokens)", "yellow") file_name += tokens_str diff --git a/mentat/config.py b/mentat/config.py index d66f23986..3f1dbc7d5 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -4,6 +4,9 @@ from pathlib import Path import yaml import shutil +import attr + +from mentat import user_session from mentat.git_handler import get_git_root_for_path from mentat.parsers.parser_map import parser_map @@ -11,8 +14,7 @@ from mentat.utils import mentat_dir_path, dd from dataclasses import dataclass, field from dataclasses_json import DataClassJsonMixin -from typing import Tuple -from mentat.parsers.parser import Parser +from typing import Union from typing import Any, Dict, List, Optional config_file_name = Path(".mentat_config.yaml") @@ -21,6 +23,7 @@ APP_ROOT = Path.cwd() MENTAT_ROOT = Path(__file__).parent USER_MENTAT_ROOT = Path.home() / ".mentat" +GIT_ROOT = get_git_root_for_path(APP_ROOT, raise_error=False) def int_or_none(s: str | None) -> int | None: if s is not None: @@ -44,7 +47,17 @@ class AIModelSettings(DataClassJsonMixin): model: str = "gpt-4-1106-preview" feature_selection_model: str = "gpt-4-1106-preview" embedding_model: str = "text-embedding-ada-002" - prompts: Dict[str, Path] = None + prompts: Dict[str, Path] = field( + default_factory=lambda: { + "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), + "agent_command_selection_prompt": Path("text/agent_command_selection_prompt.txt"), + "block_parser_prompt": Path("text/block_parser_prompt.txt"), + "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), + "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), + "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), + "json_parser_prompt": Path("text/json_parser_prompt.txt") + } + ) temperature: float = 0.2 maximum_context: Optional[int] = None @@ -53,61 +66,99 @@ class AIModelSettings(DataClassJsonMixin): @dataclass() class UISettings(DataClassJsonMixin): - input_style: List[Tuple[str, str]] = field( - default_factory=lambda: [ - ["", "#9835bd"], - ["prompt", "#ffffff bold"], - ["continuation", "#ffffff bold"], - ] + input_style: Dict[str, str] = field( + default_factory=lambda: { + "": "#9835bd", + "prompt": "#ffffff bold", + "continuation": "#ffffff bold", + } ) @dataclass() class ParserSettings: # The type of parser that should be ued - parser: Parser = BlockParser(), + parser: Any = BlockParser(), parser_type: str = "block" +@dataclass +@attr.s(auto_attribs=True) +class YamlConfig: + file_exclude_glob_list: List[str] = field(default_factory=lambda:[]) + model: str = "gpt-4-1106-preview" + temperature: float = 0.2 + prompt_type: str = "text" + maximum_context: int = 16000 + auto_context_tokens: int = 0 + format: str = "block" + input_style: Dict[str, str] = field( + default_factory=lambda: { + "": "#9835bd", + "prompt": "#ffffff bold", + "continuation": "#ffffff bold", + } + ) + + def __getitem__(self, item: str) -> Any: + return self.__dict__[item] + + @classmethod + def get_fields(cls): + return list(cls.__annotations__.keys()) + @dataclass() class MentatConfig: # Directory where the mentat is running - root = APP_ROOT + root: Path = field(default_factory=lambda: APP_ROOT), + user_config_path: Path = field(default_factory=lambda: user_config_path) - run: RunSettings - ai: AIModelSettings - ui: UISettings - parser: ParserSettings + run: RunSettings = field(default_factory=RunSettings) + ai: AIModelSettings = field(default_factory=AIModelSettings) + ui: UISettings = field(default_factory=UISettings) + parser: ParserSettings = field(default_factory=ParserSettings) -def load_yaml(path: str) -> dict: +def load_yaml(path: str) -> dict[str, Any | None]: """Load the data from the YAML file.""" with open(path, 'r') as file: return yaml.safe_load(file) -def merge_configs(original: dict[str, Any | None], new: dict[str, Any | None]) -> dict[str, Any | None]: +def merge_configs(original: dict[str, Optional[Any]], new: dict[str, Optional[Any]]) -> dict[str, Optional[Any]]: """Merge two dictionaries, with the second one overwriting the values in the first one.""" original.update(new) # Update the original dict with the new one return original # Return the merged dict -def yaml_to_config(yaml_dict: dict): +def yaml_to_config(yaml_dict: dict[str, Any]) -> dict[str, Any | None]: """gets the allowed config settings from a YAML""" - return { - "model": yaml_dict.get("model"), + config = { + "model": yaml_dict.get("model", "gpt-3"), "prompt_type": yaml_dict.get("prompt_type", "text"), - "maximum_context": yaml_dict.get("maximum_context"), - "file_exclude_glob_list": yaml_dict.get("file_exclude_glob_list", []), - "input_style": yaml_dict.get("input_style"), - "format": yaml_dict.get("format") + "maximum_context": yaml_dict.get("maximum_context", 2048), + "input_style": yaml_dict.get("input_style", + [["", "#000000"], + ["prompt", "#000000 bold"], + ["continuation", "#000000 bold"]]), + "format": yaml_dict.get('format', 'block'), + "sampler_repo": yaml_dict.get('sampler', {}).get('repo', None), + "sampler_merge_base_target": yaml_dict.get('sampler', {}).get('merge_base_target', None) } -def init_config(): + if yaml_dict.get("file_exclude_glob_list") is None: + config["file_exclude_glob_list"] = [] + else: + config["file_exclude_glob_list"] = yaml_dict["file_exclude_glob_list"] + + return config + +def init_config() -> None: """Initialize the configuration file if it doesn't exist.""" - default_conf_path = os.path.join(MENTAT_ROOT, 'resources', 'conf', '.mentatconf.yaml') git_root = get_git_root_for_path(APP_ROOT, raise_error=False) - current_conf_path = os.path.join(git_root, '.mentatconf.yaml') + if git_root is not None: + default_conf_path = os.path.join(MENTAT_ROOT, 'resources', 'conf', '.mentatconf.yaml') + current_conf_path = os.path.join(git_root, '.mentatconf.yaml') - if not os.path.exists(current_conf_path): - shutil.copy(default_conf_path, current_conf_path) + if not os.path.exists(current_conf_path): + shutil.copy(default_conf_path, current_conf_path) def load_prompts(prompt_type: str): @@ -120,6 +171,7 @@ def load_prompts(prompt_type: str): "feature_selection_prompt" : Path("markdown/feature_selection_prompt.md"), "replacement_parser_prompt" : Path("markdown/replacement_parser_prompt.md"), "unified_diff_parser_prompt" : Path("markdown/unified_diff_parser_prompt.md"), + "json_parser_prompt" : Path("markdown/json_parser_prompt.md"), } return { @@ -129,82 +181,85 @@ def load_prompts(prompt_type: str): "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), + "json_parser_prompt": Path("text/json_parser_prompt.txt"), } -def load_settings(config_session_dict = None): +def load_settings(config_session_dict: Optional[dict[str, Any | None]] = None): """Load the configuration from the `.mentatconf.yaml` file.""" - current_conf_path = APP_ROOT / '.mentatconf.yaml' user_conf_path = USER_MENTAT_ROOT / '.mentatconf.yaml' git_root = get_git_root_for_path(APP_ROOT, raise_error=False) - yaml_config = {} + yaml_config = YamlConfig() if user_conf_path.exists(): yaml_dict = load_yaml(str(user_conf_path)) user_config = yaml_to_config(yaml_dict) - yaml_config = merge_configs(yaml_config, user_config) + yaml_config.__dict__.update(user_config) if git_root is not None: git_conf_path = Path(git_root) / '.mentatconf.yaml' if git_conf_path.exists(): yaml_dict = load_yaml(str(git_conf_path)) git_config = yaml_to_config(yaml_dict) - yaml_config = merge_configs(yaml_config, git_config) + yaml_config.__dict__.update(git_config) + + + + if config_session_dict is not None: + if 'file_exclude_glob_list' in config_session_dict and config_session_dict['file_exclude_glob_list'] is not None: + yaml_config.file_exclude_glob_list.extend(config_session_dict['file_exclude_glob_list']) + + if 'model' in config_session_dict and config_session_dict['model'] is not None: + yaml_config.model = str(config_session_dict['model']) + + if 'temperature' in config_session_dict and config_session_dict['temperature'] is not None: + yaml_config.temperature = str(config_session_dict['temperature']) - if config_session_dict is not None and config_session_dict.get('file_exclude_glob_list') is not None: - yaml_config["file_exclude_glob_list"].extend(config_session_dict['file_exclude_glob_list']) + if 'maximum_context' in config_session_dict and config_session_dict['maximum_context'] is not None: + yaml_config.maximum_context = str(config_session_dict['maximum_context']) - file_exclude_glob_list = yaml_config.get("file_exclude_glob_list", []) + file_exclude_glob_list: List[str] = yaml_config['file_exclude_glob_list'] #always ignore .mentatconf file_exclude_glob_list.append(".mentatconf.yaml") run_settings = RunSettings( - file_exclude_glob_list=[Path(p) for p in file_exclude_glob_list] + file_exclude_glob_list=[Path(p) for p in file_exclude_glob_list], # pyright: ignore[reportUnknownVariableType] + auto_context_tokens=yaml_config.auto_context_tokens ) ui_settings = UISettings( - input_style=yaml_config.get("input_style", []) + input_style=yaml_config.input_style or [] # pyright: ignore[reportGeneralTypeIssues] ) ai_model_settings = AIModelSettings( - model=yaml_config.get("model", "gpt-4-1106-preview"), - prompts=load_prompts(yaml_config.get("prompt_type", "text")), - feature_selection_model=yaml_config.get("model", "gpt-4-1106-preview"), - maximum_context=yaml_config.get("maximum_context", 16000) + model=yaml_config.model, + temperature=yaml_config.temperature, + prompts=load_prompts(yaml_config.prompt_type), + feature_selection_model=yaml_config.model, + maximum_context=yaml_config.maximum_context ) - parser_type = yaml_config.get("format", "block") + parser_type = yaml_config.format parser_settings = ParserSettings( parser_type=parser_type, parser=parser_map[parser_type] ) - return { - "run": run_settings, - "ai": ai_model_settings, - "ui": ui_settings, - "parser": parser_settings, - } + user_session.set("config", MentatConfig( + run=run_settings, + ai=ai_model_settings, + ui=ui_settings, + parser=parser_settings + )) -def update_config(session_config): +def update_config(session_config: Dict[str, Union[List[str], None, str, int, float]]) -> None: """Reload the configuration using the provided keyword arguments.""" - global config - if config is None: - return + load_settings(session_config) - settings = load_settings(session_config) - config = MentatConfig(**settings) - -def load_config() -> MentatConfig: +def load_config() -> None: init_config() - settings = load_settings() - config = MentatConfig(**settings) - - return config - - -config = load_config() + load_settings() diff --git a/mentat/conversation.py b/mentat/conversation.py index b0c89782c..a336314ce 100644 --- a/mentat/conversation.py +++ b/mentat/conversation.py @@ -15,6 +15,7 @@ ChatCompletionUserMessageParam, ) +import mentat from mentat.errors import MentatError from mentat.llm_api_handler import ( TOKEN_COUNT_WARNING, @@ -27,7 +28,6 @@ from mentat.session_context import SESSION_CONTEXT from mentat.transcripts import ModelMessage, TranscriptMessage, UserMessage from mentat.utils import add_newline -from mentat.config import config class Conversation: @@ -40,6 +40,7 @@ def __init__(self): async def display_token_count(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream + config = mentat.user_session.get("config") code_context = session_context.code_context llm_api_handler = session_context.llm_api_handler @@ -152,6 +153,7 @@ def get_messages( """Returns the messages in the conversation. The system message may change throughout the conversation so it is important to access the messages through this method. """ + config = mentat.user_session.get("config") if config.ai.no_parser_prompt or not include_system_prompt: return self._messages.copy() @@ -177,6 +179,7 @@ async def _stream_model_response( ) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream + config = mentat.user_session.get("config") parser = config.parser.parser llm_api_handler = session_context.llm_api_handler @@ -242,6 +245,7 @@ async def _stream_model_response( async def get_model_response(self) -> ParsedLLMResponse: session_context = SESSION_CONTEXT.get() stream = session_context.stream + config = mentat.user_session.get("config") code_context = session_context.code_context @@ -249,7 +253,7 @@ async def get_model_response(self) -> ParsedLLMResponse: # Get current code message loading_multiplier = 1.0 if config.run.auto_context_tokens > 0 else 0.0 - prompt = messages_snapshot[-1]["content"] + prompt = messages_snapshot[-1]["content"] # pyright: ignore[reportTypedDictNotRequiredAccess] if isinstance(prompt, list): text_prompts = [ p.get("text", "") for p in prompt if p.get("type") == "text" @@ -287,7 +291,7 @@ async def get_model_response(self) -> ParsedLLMResponse: return response def remaining_context(self) -> int | None: - ctx = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") return get_max_tokens() - prompt_tokens(self.get_messages(), config.ai.model) def can_add_to_context(self, message: str) -> bool: @@ -295,7 +299,7 @@ def can_add_to_context(self, message: str) -> bool: Whether or not the model has enough context remaining to add this message. Will take token buffer into account and uses full_message=True. """ - ctx = SESSION_CONTEXT.get() + config = mentat.user_session.get("config") remaining_context = self.remaining_context() return ( @@ -357,4 +361,4 @@ async def run_command(self, command: list[str]) -> bool: " to model context.", color="light_red", ) - return False \ No newline at end of file + return False diff --git a/mentat/embeddings.py b/mentat/embeddings.py index 5c2b46d88..2933405d6 100644 --- a/mentat/embeddings.py +++ b/mentat/embeddings.py @@ -6,6 +6,7 @@ import numpy as np +import mentat from mentat.code_feature import CodeFeature, count_feature_tokens from mentat.errors import MentatError from mentat.llm_api_handler import ( @@ -84,7 +85,8 @@ async def get_feature_similarity_scores( session_context = SESSION_CONTEXT.get() stream = session_context.stream cost_tracker = session_context.cost_tracker - embedding_model = session_context.config.embedding_model + config = mentat.user_session.get("config") + embedding_model = config.ai.embedding_model llm_api_handler = session_context.llm_api_handler max_model_tokens = model_context_size(embedding_model) diff --git a/mentat/feature_filters/default_filter.py b/mentat/feature_filters/default_filter.py index e2d4b74a2..438059157 100644 --- a/mentat/feature_filters/default_filter.py +++ b/mentat/feature_filters/default_filter.py @@ -1,5 +1,6 @@ from typing import Optional +import mentat from mentat.code_feature import CodeFeature from mentat.errors import ContextSizeInsufficient, ModelError from mentat.feature_filters.embedding_similarity_filter import EmbeddingSimilarityFilter @@ -27,7 +28,8 @@ def __init__( async def filter(self, features: list[CodeFeature]) -> list[CodeFeature]: ctx = SESSION_CONTEXT.get() - if ctx.config.auto_context_tokens > 0 and self.user_prompt != "": + config = mentat.user_session.get("config") + if config.run.auto_context_tokens > 0 and self.user_prompt != "": features = await EmbeddingSimilarityFilter( self.user_prompt, (0.5 if self.use_llm else 1) * self.loading_multiplier ).filter(features) @@ -46,10 +48,10 @@ async def filter(self, features: list[CodeFeature]) -> list[CodeFeature]: " instead." ) features = await TruncateFilter( - self.max_tokens, ctx.config.model + self.max_tokens, config.ai.model ).filter(features) else: - features = await TruncateFilter(self.max_tokens, ctx.config.model).filter( + features = await TruncateFilter(self.max_tokens, config.ai.model).filter( features ) diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index 547b93ae1..ca8db2f66 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -9,11 +9,11 @@ ChatCompletionSystemMessageParam, ) +import mentat from mentat.code_feature import ( CodeFeature, get_code_message_from_features, ) -from mentat.config import config from mentat.errors import ModelError, UserError from mentat.feature_filters.feature_filter import FeatureFilter from mentat.feature_filters.truncate_filter import TruncateFilter @@ -24,7 +24,8 @@ class LLMFeatureFilter(FeatureFilter): - feature_selection_prompt_path = config.ai.prompts.get("feature_selection_prompt") + config = mentat.user_session.get("config") + feature_selection_prompt_path = config.ai.prompts.get("feature_selection_prompt", Path("text/feature_selection_prompt.txt")) def __init__( self, @@ -44,21 +45,21 @@ async def filter( ) -> list[CodeFeature]: session_context = SESSION_CONTEXT.get() stream = session_context.stream - config = session_context.config + cost_tracker = session_context.cost_tracker llm_api_handler = session_context.llm_api_handler # Preselect as many features as fit in the context window - model = config.feature_selection_model + model = config.ai.feature_selection_model context_size = model_context_size(model) if context_size is None: raise UserError( "Unknown context size for feature selection model: " - f"{config.feature_selection_model}" + f"{config.ai.feature_selection_model}" ) system_prompt = read_prompt(self.feature_selection_prompt_path) system_prompt_tokens = count_tokens( - system_prompt, config.feature_selection_model, full_message=True + system_prompt, config.ai.feature_selection_model, full_message=True ) user_prompt_tokens = count_tokens(self.user_prompt, model, full_message=True) expected_edits_tokens = ( @@ -71,7 +72,7 @@ async def filter( - system_prompt_tokens - user_prompt_tokens - expected_edits_tokens - - config.token_buffer + - config.ai.token_buffer ) truncate_filter = TruncateFilter(preselect_max_tokens, model) preselected_features = await truncate_filter.filter(features) @@ -171,5 +172,5 @@ async def filter( named_features.add(parsed_feature) # Greedy again to enforce max_tokens - truncate_filter = TruncateFilter(self.max_tokens, config.model) + truncate_filter = TruncateFilter(self.max_tokens, config.ai.model) return await truncate_filter.filter(named_features) diff --git a/mentat/llm_api_handler.py b/mentat/llm_api_handler.py index b50ecaf5d..30bfa0e3a 100644 --- a/mentat/llm_api_handler.py +++ b/mentat/llm_api_handler.py @@ -38,9 +38,10 @@ from openai.types.chat.completion_create_params import ResponseFormat from PIL import Image +import mentat from mentat.errors import ContextSizeInsufficient, MentatError, UserError from mentat.session_context import SESSION_CONTEXT -from mentat.utils import mentat_dir_path +from mentat.utils import mentat_dir_path, dd TOKEN_COUNT_WARNING = 32000 @@ -189,15 +190,16 @@ def model_price_per_1000_tokens(model: str) -> Optional[tuple[float, float]]: def get_max_tokens() -> int: - from mentat.config import config + config = mentat.user_session.get("config") session_context = SESSION_CONTEXT.get() stream = session_context.stream context_size = model_context_size(config.ai.model) maximum_context = config.ai.maximum_context + if context_size is not None and maximum_context is not None: - return min(context_size, maximum_context) + return min(int(context_size), int(maximum_context)) elif context_size is not None: return context_size elif maximum_context is not None: @@ -212,7 +214,7 @@ def get_max_tokens() -> int: def is_context_sufficient(tokens: int) -> bool: - from mentat.config import config + config = mentat.user_session.get("config") ctx = SESSION_CONTEXT.get() max_tokens = get_max_tokens() @@ -285,7 +287,7 @@ async def call_llm_api( stream: bool, response_format: ResponseFormat = ResponseFormat(type="text"), ) -> ChatCompletion | AsyncIterator[ChatCompletionChunk]: - from mentat.config import config + config = mentat.user_session.get("config") session_context = SESSION_CONTEXT.get() cost_tracker = session_context.cost_tracker diff --git a/mentat/parsers/block_parser.py b/mentat/parsers/block_parser.py index 2c068fc98..f2cdaac3f 100644 --- a/mentat/parsers/block_parser.py +++ b/mentat/parsers/block_parser.py @@ -6,6 +6,7 @@ from typing_extensions import override +import mentat from mentat.code_file_manager import CodeFileManager from mentat.errors import ModelError from mentat.parsers.change_display_helper import DisplayInformation, FileActionType @@ -69,8 +70,8 @@ def __init__(self, json_data: dict[str, Any]): class BlockParser(Parser): @override def get_system_prompt(self) -> str: - from mentat.config import config - block_parser_prompt_filename = config.ai.prompts.get("block_parser_prompt") + config = mentat.user_session.get("config") + block_parser_prompt_filename = config.ai.prompts.get("block_parser_prompt", Path("text/block_parser_prompt.txt")) return read_prompt(block_parser_prompt_filename) @override diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index 926a19aaa..08ce1c2ad 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -11,6 +11,7 @@ from termcolor import colored from typing_extensions import override +import mentat from mentat.errors import ModelError from mentat.llm_api_handler import chunk_to_lines from mentat.parsers.file_edit import FileEdit, Replacement @@ -83,8 +84,8 @@ class JsonParser(Parser): @override def get_system_prompt(self) -> str: - from mentat.config import config - json_parser_prompt_filename = config.ai.prompts.get("json_parser_prompt") + config = mentat.user_session.get("config") + json_parser_prompt_filename = config.ai.prompts.get("json_parser_prompt", Path("text/json_parser_prompt.txt")) return read_prompt(json_parser_prompt_filename) @override diff --git a/mentat/parsers/replacement_parser.py b/mentat/parsers/replacement_parser.py index 0b41ba486..7c605d1ba 100644 --- a/mentat/parsers/replacement_parser.py +++ b/mentat/parsers/replacement_parser.py @@ -2,6 +2,7 @@ from typing_extensions import override +import mentat from mentat.code_file_manager import CodeFileManager from mentat.errors import ModelError from mentat.parsers.change_display_helper import DisplayInformation, FileActionType @@ -14,8 +15,8 @@ class ReplacementParser(Parser): @override def get_system_prompt(self) -> str: - from mentat.config import config - replacement_parser_prompt_filename = config.ai.prompts.get("replacement_parser_prompt") + config = mentat.user_session.get("config") + replacement_parser_prompt_filename = config.ai.prompts.get("replacement_parser_prompt", Path("text/replacement_parser_prompt.txt")) return read_prompt(replacement_parser_prompt_filename) @override diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index b9616943b..c083c079e 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -4,6 +4,7 @@ from termcolor import colored from typing_extensions import override +import mentat from mentat.code_file_manager import CodeFileManager from mentat.parsers.change_display_helper import ( DisplayInformation, @@ -28,8 +29,8 @@ class UnifiedDiffDelimiter(Enum): class UnifiedDiffParser(Parser): @override def get_system_prompt(self) -> str: - from mentat.config import config - unified_diff_parser_prompt_filename = config.ai.prompts.get("unified_diff_parser_prompt") + config = mentat.user_session.get("config") + unified_diff_parser_prompt_filename = config.ai.prompts.get("unified_diff_parser_prompt", Path("text/unified_diff_parser_prompt.txt")) return read_prompt(unified_diff_parser_prompt_filename) @override diff --git a/mentat/python_client/client.py b/mentat/python_client/client.py index 15e061e5f..cad677141 100644 --- a/mentat/python_client/client.py +++ b/mentat/python_client/client.py @@ -4,7 +4,6 @@ from pathlib import Path from typing import List -from mentat.config import Config from mentat.errors import MentatError from mentat.session import Session from mentat.session_stream import StreamMessageSource @@ -19,7 +18,6 @@ def __init__( ignore_paths: List[Path] = [], diff: str | None = None, pr_diff: str | None = None, - config: Config = Config(), ): self.cwd = cwd.expanduser().resolve() self.paths = paths @@ -27,7 +25,6 @@ def __init__( self.ignore_paths = ignore_paths self.diff = diff self.pr_diff = pr_diff - self.config = config self._accumulated_message = "" self.stopped = Event() @@ -80,8 +77,7 @@ async def startup(self): self.exclude_paths, self.ignore_paths, self.diff, - self.pr_diff, - self.config, + self.pr_diff ) self.session.start() self.acc_task = asyncio.create_task(self._accumulate_messages()) diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml index 39604fe65..71ea729f0 100644 --- a/mentat/resources/conf/.mentatconf.yaml +++ b/mentat/resources/conf/.mentatconf.yaml @@ -6,15 +6,15 @@ model: gpt-4-1106-preview # maximum_context: 16000 #the type of prompts that the agent should be using options are text and markdown -prompt_type: markdown +prompt_type: text # This list contains glob patterns. Mentat uses these patterns to exclude certain files when provided with a directory argument. # Mentat considers all files that do not match your .gitignore file and these patterns. # Glob patterns are interpreted from the git root location, so if you want to exclude all .py files, use "**/*.py" instead of "*.py". # This example excludes all hidden files and directories: file_exclude_glob_list: - - "**/.*" - - "**/.*/**" +# - "**/.*" +# - "**/.*/**" # This section contains key-value pairs for defining a custom Pygment Style for the Mentat prompt. input_style: diff --git a/mentat/sampler/sampler.py b/mentat/sampler/sampler.py index eed7a8c2e..2ce44894b 100644 --- a/mentat/sampler/sampler.py +++ b/mentat/sampler/sampler.py @@ -14,25 +14,31 @@ from mentat.session_context import SESSION_CONTEXT from mentat.session_input import collect_user_input from mentat.utils import get_relative_path +import mentat +def init_settings(repo:str | None = None, merge_base_target:str | None = None) -> None: + mentat.user_session.set("sampler_settings",{ + "repo" : repo, + "merge_base_target" : merge_base_target, + }) def parse_message(message: ChatCompletionMessageParam) -> dict[str, str]: - ctx = SESSION_CONTEXT.get() content = message.get("content") text, code = "", "" + config = mentat.user_session.get("config") if isinstance(content, str): if message.get("role") != "assistant": text = content output = list[str]() in_special = False for line in content.splitlines(): - if ctx.config.parser._starts_special(line): # type: ignore + if config.parser.parser._starts_special(line): # type: ignore in_special = True if not in_special: output.append(line) else: pass # TODO: Convert to git diff format, replace 'code' above - if ctx.config.parser._ends_code(line): # type: ignore + if config.parser.parser._ends_code(line): # type: ignore in_special = False while output[-1] == "": output.pop() @@ -70,9 +76,10 @@ async def create_sample(self) -> Sample: session_context = SESSION_CONTEXT.get() stream = session_context.stream code_context = session_context.code_context - config = session_context.config conversation = session_context.conversation + sampler_config = mentat.user_session.get("sampler_settings") + git_root = get_git_root_for_path(session_context.cwd, raise_error=False) if not git_root: raise SampleError("No git repo found") @@ -80,8 +87,8 @@ async def create_sample(self) -> Sample: stream.send("Input sample data", color="light_blue") git_repo = Repo(git_root) merge_base = None - if config.sample_merge_base_target: - target = config.sample_merge_base_target + if sampler_config.get("merge_base_target"): + target = sampler_config.get("merge_base_target") stream.send(f"Use merge base target from config ({target})? (y/N)") response = (await collect_user_input()).data.strip() if response == "y": @@ -108,7 +115,7 @@ async def create_sample(self) -> Sample: except (AssertionError, GitCommandError) as e: raise SampleError(f"Error getting diff for merge base: {e}") - repo = config.sample_repo + repo = sampler_config.get("repo") if not repo: remote_url = "" try: @@ -127,7 +134,11 @@ async def create_sample(self) -> Sample: repo = remote_url else: repo = response - config.sample_repo = repo + + mentat.user_session.set("sampler_settings", { + "repo" : repo, + "merge_base_target" : sampler_config.get("merge_base_target") + }) stream.send("Sample Title:") title = (await collect_user_input()).data.strip() or "" diff --git a/mentat/session.py b/mentat/session.py index 61eddacb0..8b2982ef9 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -7,16 +7,15 @@ from typing import Any, Coroutine, List, Optional, Set from uuid import uuid4 -import attr import sentry_sdk from openai import APITimeoutError, BadRequestError, RateLimitError +import mentat from mentat.agent_handler import AgentHandler from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_edit_feedback import get_user_feedback_on_edits from mentat.code_file_manager import CodeFileManager -from mentat.config import config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.ctags import ensure_ctags_installed @@ -59,7 +58,7 @@ def __init__( self.id = uuid4() self._tasks: Set[asyncio.Task[None]] = set() - self._errors = [] + self._errors: List[Any] = [] # Since we can't set the session_context until after all of the singletons are created, # any singletons used in the constructor of another singleton must be passed in @@ -136,6 +135,7 @@ async def _main(self): conversation = session_context.conversation code_file_manager = session_context.code_file_manager agent_handler = session_context.agent_handler + config = mentat.user_session.get("config") # check early for ctags so we can fail fast if config.run.auto_context_tokens > 0: @@ -229,12 +229,12 @@ def start(self): """ async def run_main(): - ctx = SESSION_CONTEXT.get() try: with sentry_sdk.start_transaction( op="mentat_started", name="Mentat Started" ) as transaction: - #transaction.set_tag("config", attr.asdict(config)) + #TODO: Does this need to be here? + transaction.set_tag("config", "config") await self._main() except (SessionExit, CancelledError): pass @@ -288,5 +288,5 @@ def send_errors_to_stream(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream for error in self._errors: - print(f"[light_yellow3]{error}[/light_yellow3]") + stream.send(error, color="yellow") self._errors = [] diff --git a/mentat/terminal/__init__.py b/mentat/terminal/__init__.py index e69de29bb..6c19e6013 100644 --- a/mentat/terminal/__init__.py +++ b/mentat/terminal/__init__.py @@ -0,0 +1,3 @@ +from mentat.config import load_config +#first thing we do is we init a default config +load_config() diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 53ce8f206..75d626bec 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -1,17 +1,17 @@ -import argparse import asyncio import logging import signal from asyncio import Event -from pathlib import Path from types import FrameType -from typing import Any, Coroutine, List, Set +from typing import Any, Coroutine, Set, Optional from prompt_toolkit import PromptSession -from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent -from prompt_toolkit.styles import Style +from prompt_toolkit.key_binding import KeyPressEvent -from mentat.config import config, update_config +import mentat +from mentat.config import update_config + +from mentat.sampler import sampler from mentat.session import Session from mentat.session_stream import StreamMessageSource from mentat.terminal.loading import LoadingHandler @@ -19,26 +19,12 @@ from mentat.terminal.prompt_completer import MentatCompleter from mentat.terminal.prompt_session import MentatPromptSession -from typing import List +from typing import List, Dict, Union from pathlib import Path import click -import anyio -import inspect - -from functools import partial, wraps - -from mentat.utils import dd -from asyncio import run as aiorun - -from prompt_toolkit.application import Application -from prompt_toolkit.application import Application -from prompt_toolkit.application.current import get_app from prompt_toolkit.key_binding import KeyBindings -from prompt_toolkit.key_binding.bindings.focus import focus_next, focus_previous -from prompt_toolkit.layout import HSplit, Layout, VSplit from prompt_toolkit.styles import Style -from prompt_toolkit.widgets import Box, Button, Frame, Label, TextArea class TerminalClient: @@ -155,6 +141,7 @@ def _init_signal_handlers(self): async def _run(self): self._init_signal_handlers() + self.session = Session( self.cwd, self.paths, @@ -165,10 +152,13 @@ async def _run(self): ) self.session.start() + config = mentat.user_session.get("config") + style = Style.from_dict(config.ui.input_style) + mentat_completer = MentatCompleter(self.session.stream) self._prompt_session = MentatPromptSession( completer=mentat_completer, - style=Style(config.ui.input_style), + style=style, enable_suspend=True, ) @@ -184,7 +174,7 @@ def _(event: KeyPressEvent): self._plain_session = PromptSession[str]( message=[("class:prompt", ">>> ")], - style=Style(config.ui.input_style), + style=style, completer=None, key_bindings=plain_bindings, enable_suspend=True, @@ -220,21 +210,43 @@ def run(self): @click.option('-d', '--diff', default=None, show_default='HEAD', help='A git tree-ish (e.g. commit, branch, tag) to diff against.') @click.option('-p', '--pr-diff', default=None, help='A git tree-ish to diff against the latest common ancestor of.') @click.option('--cwd', default=str(Path.cwd()), help='The current working directory.') +@click.option('--model', default=None, help='The Model to use.') +@click.option('--temperature', default=None, help='The Model Temperature to use.') +@click.option('--maximum-context', default=None, help='The Maximum Context') @click.argument('paths', nargs=-1, required=True) -def start(paths, exclude_paths, ignore_paths, diff, pr_diff, cwd) -> None: +def start(paths: list[str], exclude_paths: list[str], ignore_paths: list[str], diff: Optional[str], pr_diff: Optional[str], cwd: Optional[str], model: Optional[str], temperature: Optional[float], maximum_context: Optional[int]) -> None: # Check if these variables are set and pass them to update_config function as kwargs - session_config = {'file_exclude_glob_list': []} + session_config: Dict[str, Union[List[str], None, str, int, float]] = { + 'file_exclude_glob_list': [], + 'model': None, + 'temperature': None, + 'maximum_context': None + } if exclude_paths: session_config["file_exclude_glob_list"] = exclude_paths + if model: + session_config["model"] = model + + if temperature: + session_config["temperature"] = temperature + + if maximum_context: + session_config["maximum_context"] = maximum_context + + + sampler.init_settings() + update_config(session_config) - cwd = Path(cwd).expanduser().resolve() + current_working_directory = Path.cwd() + if cwd: + current_working_directory = Path(cwd).expanduser().resolve() terminal_client = TerminalClient( - cwd, + current_working_directory, paths, exclude_paths, ignore_paths, diff --git a/mentat/user_session.py b/mentat/user_session.py new file mode 100644 index 000000000..6d919a6f1 --- /dev/null +++ b/mentat/user_session.py @@ -0,0 +1,18 @@ +from typing import Dict, Any + +user_session_store: Dict[str, Any] = {} + +class UserSession: + """ + Developer facing user session class. + Useful for the developer to store user specific data between calls. + """ + + def get(self, key: str, default: Any=None) -> Any: + return user_session_store.get(key, default) + + def set(self, key: str, value: Any) -> None: + user_session_store[key] = value + + +user_session = UserSession() diff --git a/mentat/utils.py b/mentat/utils.py index 82ebe56ed..530f5da22 100644 --- a/mentat/utils.py +++ b/mentat/utils.py @@ -7,7 +7,7 @@ from importlib import resources from importlib.abc import Traversable from pathlib import Path -from typing import TYPE_CHECKING, AsyncIterator, List, Literal, Optional, Union +from typing import TYPE_CHECKING, AsyncIterator, List, Literal, Optional, Union, Any import packaging.version import requests @@ -15,7 +15,6 @@ from jinja2 import Environment, PackageLoader, select_autoescape from openai.types.chat import ChatCompletionChunk from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta -from rich import inspect from mentat import __version__ from mentat.session_context import SESSION_CONTEXT @@ -182,7 +181,7 @@ def get_relative_path(path: Path, target: Path) -> Path: return relative_path -def dd(args): +def dd(args: Any): """ This method dd takes an argument args and performs the following operations: @@ -200,11 +199,22 @@ def dd(args): args = [1, 2, 3] dd(args) """ - inspect(args, methods=True) - # Exit the program - sys.exit() + try: + # Throw an exception if needed + if not args: + raise ValueError("No args provided") + + # Pretty print the argument + pprint.pprint(args) + + except Exception as e: + print(f"Exception occurred: {e}") + + finally: + # Exit the program + sys.exit() -def dump(args): +def dump(args: Any): """ This method dd takes an argument args and performs the following operations: @@ -216,7 +226,7 @@ def dump(args): args = [1, 2, 3] dump(args) """ - inspect(args, methods=True) + pprint.pprint(args) CLONE_TO_DIR = Path(__file__).parent.parent / "benchmark_repos" diff --git a/poetry.lock b/poetry.lock index 7a1fd9d59..b40f0faae 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "aiomultiprocess" @@ -561,30 +561,6 @@ files = [ [package.dependencies] referencing = ">=0.31.0" -[[package]] -name = "markdown-it-py" -version = "3.0.0" -description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false -python-versions = ">=3.8" -files = [ - {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, - {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, -] - -[package.dependencies] -mdurl = ">=0.1,<1.0" - -[package.extras] -benchmarking = ["psutil", "pytest", "pytest-benchmark"] -code-style = ["pre-commit (>=3.0,<4.0)"] -compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] -linkify = ["linkify-it-py (>=1,<3)"] -plugins = ["mdit-py-plugins"] -profiling = ["gprof2dot"] -rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] -testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] - [[package]] name = "markupsafe" version = "2.1.3" @@ -674,17 +650,6 @@ docs = ["alabaster (==0.7.13)", "autodocsumm (==0.2.11)", "sphinx (==7.0.1)", "s lint = ["flake8 (==6.0.0)", "flake8-bugbear (==23.7.10)", "mypy (==1.4.1)", "pre-commit (>=2.4,<4.0)"] tests = ["pytest", "pytz", "simplejson"] -[[package]] -name = "mdurl" -version = "0.1.2" -description = "Markdown URL utilities" -optional = false -python-versions = ">=3.7" -files = [ - {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, - {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, -] - [[package]] name = "mypy-extensions" version = "1.0.0" @@ -1451,24 +1416,6 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] -[[package]] -name = "rich" -version = "13.7.0" -description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"}, - {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"}, -] - -[package.dependencies] -markdown-it-py = ">=2.2.0" -pygments = ">=2.13.0,<3.0.0" - -[package.extras] -jupyter = ["ipywidgets (>=7.5.1,<9)"] - [[package]] name = "rpds-py" version = "0.15.2" @@ -1894,27 +1841,6 @@ exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} trio = ">=0.11" wsproto = ">=0.14" -[[package]] -name = "typer" -version = "0.9.0" -description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -optional = false -python-versions = ">=3.6" -files = [ - {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, - {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"}, -] - -[package.dependencies] -click = ">=7.1.1,<9.0.0" -typing-extensions = ">=3.7.4.3" - -[package.extras] -all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] -dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"] -doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"] -test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"] - [[package]] name = "typing-extensions" version = "4.8.0" @@ -1960,6 +1886,16 @@ brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uuid" +version = "1.30" +description = "UUID object and generation functions (Python 2.3 or higher)" +optional = false +python-versions = "*" +files = [ + {file = "uuid-1.30.tar.gz", hash = "sha256:1f87cc004ac5120466f36c5beae48b4c48cc411968eed0eaecd3da82aa96193f"}, +] + [[package]] name = "wcwidth" version = "0.2.12" @@ -2004,4 +1940,4 @@ h11 = ">=0.9.0,<1" [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "4b1e59982d7096c184b82f709a213952de378796ea74c6d96ea4cadd67e8d27c" +content-hash = "d5fa1008c5330905c0a36bf1283c5b8c53c1b3f57802ea776a9c6eb1e848dce4" diff --git a/pyproject.toml b/pyproject.toml index b18fc5abb..6191d746a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,9 +36,8 @@ tqdm = "4.66.1" webdriver_manager = "4.0.1" dataclasses-json = "^0.6.3" pyyaml = "^6.0.1" -rich = "^13.7.0" -typer = "^0.9.0" click = "^8.1.7" +uuid = "^1.30" [tool.poetry.group.dev.dependencies] aiomultiprocess = "^0.9.0" diff --git a/tests/code_context_test.py b/tests/code_context_test.py index 41877d665..2984c40f5 100644 --- a/tests/code_context_test.py +++ b/tests/code_context_test.py @@ -6,14 +6,16 @@ import pytest +import mentat from mentat.code_context import CodeContext -from mentat.config import Config +from mentat.config import RunSettings, update_config, load_config from mentat.errors import ContextSizeInsufficient from mentat.feature_filters.default_filter import DefaultFilter from mentat.git_handler import get_non_gitignored_files from mentat.include_files import is_file_text_encoded from mentat.interval import Interval from mentat.llm_api_handler import count_tokens +from mentat.utils import dd from tests.conftest import run_git_command @@ -76,9 +78,10 @@ async def test_bracket_file(temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_config_glob_exclude(mocker, temp_testbed, mock_code_context): # Makes sure glob exclude config works - mocker.patch.object( - Config, "file_exclude_glob_list", new=[os.path.join("glob_test", "**", "*.py")] - ) + config = mentat.user_session.get("config") + config.run.file_exclude_glob_list = [Path("glob_test") / "**" / "*.py"] + mentat.user_session.set("config", config) + glob_exclude_path = os.path.join("glob_test", "bagel", "apple", "exclude_me.py") glob_include_path = os.path.join("glob_test", "bagel", "apple", "include_me.ts") @@ -110,6 +113,9 @@ async def test_config_glob_exclude(mocker, temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_glob_include(temp_testbed, mock_code_context): + #reset the config context + load_config() + # Make sure glob include works glob_include_path = os.path.join("glob_test", "bagel", "apple", "include_me.py") glob_include_path2 = os.path.join("glob_test", "bagel", "apple", "include_me2.py") @@ -188,6 +194,8 @@ async def test_text_encoding_checking(temp_testbed, mock_session_context): @pytest.mark.asyncio @pytest.mark.clear_testbed async def test_max_auto_tokens(mocker, temp_testbed, mock_session_context): + update_config({"maximum_context" : 8000}) + with open("file_1.py", "w") as f: f.write(dedent("""\ def func_1(x, y): @@ -214,7 +222,7 @@ def func_4(string): ) code_context.include("file_1.py") code_context.use_llm = False - mock_session_context.config.auto_context_tokens = 8000 + filter_mock = AsyncMock(side_effect=lambda features: features) mocker.patch.object(DefaultFilter, "filter", side_effect=filter_mock) @@ -222,7 +230,7 @@ async def _count_max_tokens_where(tokens_used: int) -> int: code_message = await code_context.get_code_message(tokens_used, prompt="prompt") return count_tokens(code_message, "gpt-4", full_message=True) - assert await _count_max_tokens_where(0) == 89 # Code + assert await _count_max_tokens_where(0) == 46 # Code with pytest.raises(ContextSizeInsufficient): await _count_max_tokens_where(1e6) @@ -258,8 +266,12 @@ def test_get_all_features(temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_get_code_message_ignore(mocker, temp_testbed, mock_session_context): - mock_session_context.config.auto_context_tokens = 8000 - mocker.patch.object(Config, "maximum_context", new=7000) + + config = mentat.user_session.get("config") + config.ai.maximum_context = 7000 + config.run.auto_context_tokens = 8000 + mentat.user_session.set("config", config) + filter_mock = AsyncMock(side_effect=lambda features: features) mocker.patch.object(DefaultFilter, "filter", side_effect=filter_mock) code_context = CodeContext( diff --git a/tests/config_test.py b/tests/config_test.py index 85ce02d11..dcedf7ca3 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -5,92 +5,79 @@ import pytest import mentat.config -from mentat.config import Config, config_file_name +from mentat.config import update_config from mentat.parsers.replacement_parser import ReplacementParser +from pathlib import Path +import pytest +import yaml +from mentat import config +from unittest.mock import patch +from unittest.mock import MagicMock +from io import StringIO +from yaml import dump +import os + +from mentat.utils import dd @pytest.fixture -def mock_config_errors(mocker): - errors = [] - mocker.patch.object(Config, "error", lambda self, message: errors.append(message)) - return errors +def mock_open(mocker): + mock_open = mocker.patch('builtins.open', new_callable=MagicMock) + return mock_open + +@pytest.mark.asyncio +async def test_load_yaml(mock_open): + data = {'test_key': 'test_value'} + mock_open.return_value.__enter__.return_value = StringIO(yaml.dump(data)) + assert config.load_yaml('test_path') == data + mock_open.assert_called_with('test_path', 'r') + +@pytest.mark.asyncio +async def test_merge_configs(): + original = {'key1': 'value1', 'key2': 'value2'} + new = {'key2': 'new_value2', 'key3': 'new_value3'} + merged = config.merge_configs(original, new) + assert merged == {'key1': 'value1', 'key2': 'new_value2', 'key3': 'new_value3'} @pytest.mark.asyncio -async def test_config_creation(): - "This test verifies the Config adds the parameters to the argparse object." - "Those take precedence over the config files and the project config takes" - "precedence over the user config." - parser = argparse.ArgumentParser() - Config.add_fields_to_argparse(parser) - args = parser.parse_args( - [ - "--model", - "model", - "--temperature", - "0.2", - "--maximum-context", - "1", - "-a", - "2000", - ] - ) - assert args.model == "model" - assert args.temperature == 0.2 - assert args.maximum_context == "1" - assert args.parser is None - assert args.auto_context_tokens == 2000 - - with open(config_file_name, "w") as project_config_file: - project_config_file.write(dedent("""\ - { - "input_style": [[ "project", "yes" ]] - }""")) - - mentat.config.user_config_path = Path(str(config_file_name) + "1") - with open(mentat.config.user_config_path, "w") as user_config_file: - user_config_file.write(dedent("""\ - { - "model": "test", - "parser": "replacement", - "input_style": [[ "user", "yes" ]] - }""")) - - config = Config.create(Path.cwd(), args) - - assert config.model == "model" - assert config.temperature == 0.2 - assert config.maximum_context == 1 - assert type(config.parser) == ReplacementParser - assert config.auto_context_tokens == 2000 - assert config.input_style == [["project", "yes"]] +async def test_default_config(): + "This test verifies that a config is created with default settings required for the run." + config = mentat.user_session.get("config") + + assert config.ai.model == "gpt-4-1106-preview" + assert config.ai.maximum_context == None + + assert config.run.auto_tokens == 8000 + assert config.run.auto_context == False + + assert config.ui.input_style == [["", "#9835bd"], + ["prompt", "#ffffff bold"], + ["continuation", "#ffffff bold"]] + + assert config.parser.parser_type == 'block' + @pytest.mark.asyncio -async def test_invalid_config(mock_config_errors): - # If invalid config file is found, it should use next config - with open(config_file_name, "w") as project_config_file: - project_config_file.write(dedent("""\ - { - "model": "project", - "format": "I have a trailing comma", - }""")) - - mentat.config.user_config_path = Path(str(config_file_name) + "1") - with open(mentat.config.user_config_path, "w") as user_config_file: - user_config_file.write(dedent("""\ - { - "model": "test", - "foobar": "Not a real setting" - }""")) - - config = Config.create(cwd=Path.cwd()) - assert ( - mock_config_errors[0] - == "Warning: Config .mentat_config.json1 contains unrecognized setting: foobar" - ) - assert ( - "contains invalid json; ignoring user configuration file" - in mock_config_errors[1] - ) - assert config.model == "test" +async def test_update_config(): + "This test verifies that a config is created with default settings required for the run." + config = mentat.user_session.get("config") + + #assert that default settings are in place before we change them. + assert config.ai.model == "gpt-4-1106-preview" + assert config.ai.maximum_context == None + + session_config = { + 'model': 'abc-123', + 'maximum_context': 16000 + } + + update_config(session_config) + + assert config.config.ai.model == "abc-123" + assert config.config.ai.maximum_context == 16000 + + + + diff --git a/tests/conftest.py b/tests/conftest.py index 1d4b2bc9b..a84c02563 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,12 +16,17 @@ from openai.types.chat.chat_completion_chunk import Choice as AsyncChoice from openai.types.chat.chat_completion_chunk import ChoiceDelta -from mentat import config + +from mentat.config import load_config +#first thing we do is we init a default config +load_config() + +import mentat from mentat.agent_handler import AgentHandler from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_file_manager import CodeFileManager -from mentat.config import Config, config_file_name +from mentat.config import config_file_name, MentatConfig from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.git_handler import get_git_root_for_path @@ -30,6 +35,7 @@ from mentat.session_context import SESSION_CONTEXT, SessionContext from mentat.session_stream import SessionStream, StreamMessage, StreamMessageSource from mentat.streaming_printer import StreamingPrinter +from mentat.utils import dd from mentat.vision.vision_manager import VisionManager pytest_plugins = ("pytest_reportlog",) @@ -260,14 +266,15 @@ def mock_session_context(temp_testbed): set by a Session if the test creates a Session. If you create a Session or Client in your test, do NOT use this SessionContext! """ + #reset the config context + load_config() + git_root = get_git_root_for_path(temp_testbed, raise_error=False) stream = SessionStream() cost_tracker = CostTracker() - config = Config() - llm_api_handler = LlmApiHandler() code_context = CodeContext(stream, git_root) @@ -288,7 +295,6 @@ def mock_session_context(temp_testbed): stream, llm_api_handler, cost_tracker, - config, code_context, code_file_manager, conversation, @@ -381,7 +387,9 @@ def temp_testbed(monkeypatch, get_marks): # it will be unset unless a specific test wants to make a config in the testbed @pytest.fixture(autouse=True) def mock_user_config(mocker): + config = mentat.user_session.get("config") config.user_config_path = Path(config_file_name) + mentat.user_session.set("config", config) @pytest.fixture(autouse=True) diff --git a/tests/parser_tests/block_format_error_test.py b/tests/parser_tests/block_format_error_test.py index 9be29dd9a..2e77e3cf9 100644 --- a/tests/parser_tests/block_format_error_test.py +++ b/tests/parser_tests/block_format_error_test.py @@ -3,14 +3,14 @@ import pytest -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.block_parser import BlockParser from mentat.session import Session @pytest.fixture(autouse=True) def block_parser(mocker): - mocker.patch.object(Config, "parser", new=BlockParser()) + mocker.patch.object(ParserSettings, "parser", new=BlockParser()) temp_file_name = "temp.py" diff --git a/tests/parser_tests/block_format_test.py b/tests/parser_tests/block_format_test.py index 3310aab75..96f2e8cbc 100644 --- a/tests/parser_tests/block_format_test.py +++ b/tests/parser_tests/block_format_test.py @@ -4,7 +4,7 @@ import pytest -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.block_parser import BlockParser from mentat.session import Session from tests.parser_tests.inverse import verify_inverse @@ -12,7 +12,7 @@ @pytest.fixture def block_parser(mocker): - mocker.patch.object(Config, "parser", new=BlockParser()) + mocker.patch.object(ParserSettings, "parser", new=BlockParser()) @pytest.mark.asyncio diff --git a/tests/parser_tests/replacement_format_error_test.py b/tests/parser_tests/replacement_format_error_test.py index f6e0013d7..e37869367 100644 --- a/tests/parser_tests/replacement_format_error_test.py +++ b/tests/parser_tests/replacement_format_error_test.py @@ -3,14 +3,14 @@ import pytest -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session @pytest.fixture(autouse=True) def replacement_parser(mocker): - mocker.patch.object(Config, "parser", new=ReplacementParser()) + mocker.patch.object(ParserSettings, "parser", new=ReplacementParser()) @pytest.mark.asyncio diff --git a/tests/parser_tests/replacement_format_test.py b/tests/parser_tests/replacement_format_test.py index 3d57dd6e2..855c43569 100644 --- a/tests/parser_tests/replacement_format_test.py +++ b/tests/parser_tests/replacement_format_test.py @@ -3,7 +3,7 @@ import pytest -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session from tests.parser_tests.inverse import verify_inverse @@ -11,7 +11,7 @@ @pytest.fixture def replacement_parser(mocker): - mocker.patch.object(Config, "parser", new=ReplacementParser()) + mocker.patch.object(ParserSettings, "parser", new=ReplacementParser()) @pytest.mark.asyncio diff --git a/tests/parser_tests/unified_diff_format_error_test.py b/tests/parser_tests/unified_diff_format_error_test.py index c4a955cf1..82e02ee89 100644 --- a/tests/parser_tests/unified_diff_format_error_test.py +++ b/tests/parser_tests/unified_diff_format_error_test.py @@ -1,7 +1,7 @@ from pathlib import Path from textwrap import dedent -from mentat.config import Config +from mentat.config import ParserSettings from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session import Session from tests.conftest import pytest @@ -9,7 +9,7 @@ @pytest.fixture(autouse=True) def unified_diff_parser(mocker): - mocker.patch.object(Config, "parser", new=UnifiedDiffParser()) + mocker.patch.object(ParserSettings, "parser", new=UnifiedDiffParser()) @pytest.mark.asyncio diff --git a/tests/parser_tests/unified_diff_format_test.py b/tests/parser_tests/unified_diff_format_test.py index 54feb3d0a..d62e4bad0 100644 --- a/tests/parser_tests/unified_diff_format_test.py +++ b/tests/parser_tests/unified_diff_format_test.py @@ -3,14 +3,16 @@ import pytest -from mentat.config import Config +import mentat +from mentat.config import ParserSettings from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session import Session +from mentat.utils import dd @pytest.fixture(autouse=True) def unified_diff_parser(mocker): - mocker.patch.object(Config, "parser", new=UnifiedDiffParser()) + mocker.patch.object(ParserSettings, "parser", new=UnifiedDiffParser()) @pytest.mark.asyncio @@ -18,6 +20,7 @@ async def test_replacement( mock_call_llm_api, mock_collect_user_input, ): + temp_file_name = Path("temp.py").absolute() with open(temp_file_name, "w") as f: f.write(dedent("""\ @@ -55,6 +58,7 @@ async def test_replacement( # This is # your captain speaking # 4 lines""") + assert content == expected_content From be1b313ad64d68b0e28f2bf90fbfa60a5e9902de Mon Sep 17 00:00:00 2001 From: Greg L Date: Fri, 29 Dec 2023 16:50:58 -0500 Subject: [PATCH 19/24] Refactor config handling This commit refactors the way configuration is handled in the application. This includes implementing settings for parsers as well as AI models, and modifying how the configuration is retrieved throughout the app. Mid-session configuration changes are now more consistent and manageable, enhancing overall usability and code readability. --- mentat/code_context.py | 4 +- mentat/command/commands/config.py | 58 +-- mentat/command/commands/screenshot.py | 6 +- mentat/command/commands/search.py | 3 +- mentat/config.py | 336 +++++++++++------- mentat/feature_filters/llm_feature_filter.py | 10 +- mentat/llm_api_handler.py | 4 +- mentat/resources/conf/.mentatconf.yaml | 25 +- mentat/session.py | 13 +- mentat/session_stream.py | 1 - mentat/terminal/client.py | 30 +- mentat/terminal/output.py | 1 - tests/code_context_test.py | 4 +- tests/commands_test.py | 24 +- tests/config_test.py | 34 +- tests/conftest.py | 15 +- tests/conversation_test.py | 19 +- .../llm_feature_filter_test.py | 2 + .../replacement_format_error_test.py | 21 +- tests/parser_tests/replacement_format_test.py | 10 +- .../parser_tests/unified_diff_format_test.py | 7 +- tests/sampler_test.py | 9 +- 22 files changed, 348 insertions(+), 288 deletions(-) diff --git a/mentat/code_context.py b/mentat/code_context.py index 80c6ece96..0b416e731 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -31,7 +31,6 @@ from mentat.llm_api_handler import count_tokens, get_max_tokens, is_context_sufficient from mentat.session_context import SESSION_CONTEXT from mentat.session_stream import SessionStream -from mentat.utils import dd class CodeContext: @@ -154,6 +153,7 @@ async def get_code_message( tokens_used = ( prompt_tokens + meta_tokens + include_files_tokens + config.ai.token_buffer ) + if not is_context_sufficient(tokens_used): raise ContextSizeInsufficient() auto_tokens = min(get_max_tokens() - tokens_used, config.run.auto_context_tokens) @@ -284,6 +284,7 @@ def include( *config.run.file_exclude_glob_list, ] ) + for pattern in all_exclude_patterns: if not Path(pattern).is_absolute(): abs_exclude_patterns.add(session_context.cwd / pattern) @@ -296,6 +297,7 @@ def include( cwd=session_context.cwd, exclude_patterns=abs_exclude_patterns, ) + except PathValidationError as e: session_context.stream.send(str(e), color="light_red") return set() diff --git a/mentat/command/commands/config.py b/mentat/command/commands/config.py index dae447285..b322fd2f0 100644 --- a/mentat/command/commands/config.py +++ b/mentat/command/commands/config.py @@ -1,6 +1,5 @@ from typing import List -import attr from typing_extensions import override from mentat.command.command import Command, CommandArgument @@ -10,47 +9,26 @@ class ConfigCommand(Command, command_name="config"): @override async def apply(self, *args: str) -> None: - from mentat.config import YamlConfig, update_config + from mentat.config import mid_session_config, update_config, get_config session_context = SESSION_CONTEXT.get() stream = session_context.stream - yaml_config = YamlConfig() if len(args) == 0: stream.send("No config option specified", color="yellow") - else: + elif len(args) == 1 or len(args) == 2: + setting = args[0] - if hasattr(yaml_config, setting): - if len(args) == 1: - value = getattr(yaml_config, setting) - description = attr.fields_dict(type(yaml_config))[setting].metadata.get( - "description" - ) - stream.send(f"{setting}: {value}") - if description: - stream.send(f"Description: {description}") - elif len(args) == 2: + if setting in mid_session_config: + if len(args) == 2: value = args[1] - if attr.fields_dict(type(yaml_config))[setting].metadata.get( - "no_midsession_change" - ): - stream.send( - f"Cannot change {setting} mid-session. Please restart" - " Mentat to change this setting.", - color="yellow", - ) - return - try: - update_config({setting: value}) - stream.send(f"{setting} set to {value}", color="green") - except (TypeError, ValueError): - stream.send( - f"Illegal value for {setting}: {value}", color="red" - ) + update_config(setting=setting, value=value) else: - stream.send("Too many arguments", color="yellow") + get_config(setting=setting) else: stream.send(f"Unrecognized config option: {setting}", color="red") + else: + stream.send("Too many arguments", color="yellow") @override @classmethod @@ -65,17 +43,19 @@ def arguments(cls) -> List[CommandArgument]: def argument_autocompletions( cls, arguments: list[str], argument_position: int ) -> list[str]: - from mentat.config import YamlConfig if argument_position == 0: - return YamlConfig.get_fields() + return [ + "model", + "temperature", + "prompt_type", + "format", + "maximum_context", + "auto_context_tokens" + ] elif argument_position == 1: - setting = arguments[0] - fields = attr.fields_dict(YamlConfig) - if setting in fields: - return fields[setting].metadata.get("auto_completions", []) - else: - return [] + #TODO: Figure out a better way of doing this. + return [] else: return [] diff --git a/mentat/command/commands/screenshot.py b/mentat/command/commands/screenshot.py index 211d1ea1d..f937dbed7 100644 --- a/mentat/command/commands/screenshot.py +++ b/mentat/command/commands/screenshot.py @@ -2,6 +2,7 @@ from typing_extensions import override +import mentat from mentat.auto_completer import get_command_filename_completions from mentat.command.command import Command, CommandArgument from mentat.session_context import SESSION_CONTEXT @@ -11,7 +12,7 @@ class ScreenshotCommand(Command, command_name="screenshot"): @override async def apply(self, *args: str) -> None: - from mentat.config import config, update_config + config = mentat.user_session.get("config") session_context = SESSION_CONTEXT.get() vision_manager = session_context.vision_manager @@ -26,7 +27,8 @@ async def apply(self, *args: str) -> None: " gpt-4-vision-preview", color="yellow", ) - update_config({"model" : "gpt-4-vision-preview"}) + config.ai.model = "gpt-4-vision-preview" + mentat.user_session.set("config", config) else: stream.send( "Can't determine if this model supports vision. Attempting anyway.", diff --git a/mentat/command/commands/search.py b/mentat/command/commands/search.py index c9a961eab..0da9c24b6 100644 --- a/mentat/command/commands/search.py +++ b/mentat/command/commands/search.py @@ -3,6 +3,7 @@ from termcolor import colored from typing_extensions import override +import mentat from mentat.command.command import Command, CommandArgument from mentat.errors import UserError from mentat.session_context import SESSION_CONTEXT @@ -34,7 +35,7 @@ def _parse_include_input(user_input: str, max_num: int) -> Set[int] | None: class SearchCommand(Command, command_name="search"): @override async def apply(self, *args: str) -> None: - from mentat.config import config + config = mentat.user_session.get('config') session_context = SESSION_CONTEXT.get() stream = session_context.stream diff --git a/mentat/config.py b/mentat/config.py index 3f1dbc7d5..a32e2b0ae 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -4,17 +4,18 @@ from pathlib import Path import yaml import shutil -import attr +import mentat from mentat import user_session from mentat.git_handler import get_git_root_for_path -from mentat.parsers.parser_map import parser_map from mentat.parsers.block_parser import BlockParser -from mentat.utils import mentat_dir_path, dd -from dataclasses import dataclass, field +from mentat.parsers.replacement_parser import ReplacementParser +from mentat.parsers.unified_diff_parser import UnifiedDiffParser +from mentat.session_context import SESSION_CONTEXT +from mentat.utils import mentat_dir_path +from dataclasses import dataclass, field, fields from dataclasses_json import DataClassJsonMixin -from typing import Union from typing import Any, Dict, List, Optional config_file_name = Path(".mentat_config.yaml") @@ -25,46 +26,94 @@ USER_MENTAT_ROOT = Path.home() / ".mentat" GIT_ROOT = get_git_root_for_path(APP_ROOT, raise_error=False) -def int_or_none(s: str | None) -> int | None: - if s is not None: - return int(s) - return None - - bool_autocomplete = ["True", "False"] - -@dataclass() +@dataclass class RunSettings(DataClassJsonMixin): file_exclude_glob_list: List[Path] = field(default_factory=list) auto_context: bool = False auto_tokens: int = 8000 - #Automatically selects code files for every request to include in context. Adds this many tokens to context each request. auto_context_tokens: int = 0 -@dataclass() + def __init__(self, + file_exclude_glob_list: Optional[List[Path]] = None, + auto_context: Optional[bool] = None, + auto_tokens: Optional[int] = None, + auto_context_tokens: Optional[int] = None) -> None: + if file_exclude_glob_list is not None: + self.file_exclude_glob_list = file_exclude_glob_list + if auto_context is not None: + self.auto_context = auto_context + if auto_tokens is not None: + self.auto_tokens = auto_tokens + if auto_context_tokens is not None: + self.auto_context_tokens = auto_context_tokens + + +@dataclass class AIModelSettings(DataClassJsonMixin): - model: str = "gpt-4-1106-preview" - feature_selection_model: str = "gpt-4-1106-preview" - embedding_model: str = "text-embedding-ada-002" - prompts: Dict[str, Path] = field( - default_factory=lambda: { - "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), - "agent_command_selection_prompt": Path("text/agent_command_selection_prompt.txt"), - "block_parser_prompt": Path("text/block_parser_prompt.txt"), - "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), - "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), - "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), - "json_parser_prompt": Path("text/json_parser_prompt.txt") + model: str + feature_selection_model: str + embedding_model: str + prompts: Dict[str, Path] + temperature: float + maximum_context: Optional[int] + token_buffer: int + no_parser_prompt: bool + + def __init__(self, + model: Optional[str] = "gpt-4-1106-preview", + feature_selection_model: Optional[str] = "gpt-4-1106-preview", + embedding_model: Optional[str] = "text-embedding-ada-002", + prompts: Optional[str] = "text", + temperature: Optional[float] = 0.2, + maximum_context: Optional[int] = None, + token_buffer: Optional[int] = 1000, + no_parser_prompt: Optional[bool] = False): + if model is not None: + self.model = model + if feature_selection_model is not None: + self.feature_selection_model = feature_selection_model + if embedding_model is not None: + self.embedding_model = embedding_model + if prompts is not None: + self.load_prompts(prompts) + if temperature is not None: + self.temperature = temperature + if maximum_context is not None: + self.maximum_context = maximum_context + if token_buffer is not None: + self.token_buffer = token_buffer + if no_parser_prompt is not None: + self.no_parser_prompt = no_parser_prompt + + + def load_prompts(self, prompt_type: str) -> None: + prompts_type = { + "markdown": { + "agent_file_selection_prompt": Path("markdown/agent_file_selection_prompt.md"), + "agent_command_selection_prompt": Path("markdown/agent_command_selection_prompt.md"), + "block_parser_prompt": Path("markdown/block_parser_prompt.md"), + "feature_selection_prompt": Path("markdown/feature_selection_prompt.md"), + "replacement_parser_prompt": Path("markdown/replacement_parser_prompt.md"), + "unified_diff_parser_prompt": Path("markdown/unified_diff_parser_prompt.md"), + "json_parser_prompt": Path("markdown/json_parser_prompt.md") + }, + "text": { + "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), + "agent_command_selection_prompt": Path("text/agent_command_selection_prompt.txt"), + "block_parser_prompt": Path("text/block_parser_prompt.txt"), + "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), + "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), + "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), + "json_parser_prompt": Path("text/json_parser_prompt.txt") + } } - ) - temperature: float = 0.2 - maximum_context: Optional[int] = None - token_buffer: int = 1000 - no_parser_prompt: bool = False + self.prompts = prompts_type.get(prompt_type, {}) -@dataclass() + +@dataclass class UISettings(DataClassJsonMixin): input_style: Dict[str, str] = field( default_factory=lambda: { @@ -74,42 +123,62 @@ class UISettings(DataClassJsonMixin): } ) -@dataclass() -class ParserSettings: - # The type of parser that should be ued - parser: Any = BlockParser(), - parser_type: str = "block" + def __init__(self, input_style: Optional[Dict[str, str]] = None) -> None: + if input_style is not None: + self.input_style = input_style @dataclass -@attr.s(auto_attribs=True) -class YamlConfig: - file_exclude_glob_list: List[str] = field(default_factory=lambda:[]) - model: str = "gpt-4-1106-preview" - temperature: float = 0.2 - prompt_type: str = "text" - maximum_context: int = 16000 - auto_context_tokens: int = 0 - format: str = "block" - input_style: Dict[str, str] = field( - default_factory=lambda: { - "": "#9835bd", - "prompt": "#ffffff bold", - "continuation": "#ffffff bold", +class ParserSettings(DataClassJsonMixin): + parser: Any = BlockParser() + parser_type: str = "block" + + def __init__(self, parser_type: Optional[str] = "block"): + if parser_type is not None: + self.load_parser(parser_type) + else: + self.load_parser("block") + + + def load_parser(self, parser_type: str) -> None: + parsers = { + "block": BlockParser, + "replacement": ReplacementParser, + "unified-diff": UnifiedDiffParser } - ) - def __getitem__(self, item: str) -> Any: - return self.__dict__[item] + if parser := parsers.get(parser_type): + self.parser_type = parser_type + self.parser = parser() + else: + self.parser_type = "block" + self.parser = parsers["block"]() + + +@dataclass +class RunningSessionConfig(DataClassJsonMixin): + model: Optional[str] = "gpt-4-1106-preview" + temperature: Optional[float] = 0.2 + prompt_type: Optional[str] = "text" + file_exclude_glob_list: Optional[List[str]] = field(default_factory=list) # Use default factory for list + format: Optional[str] = "block" + input_style: Optional[Dict[str, str]] = field(default_factory=lambda: { # Use default factory for dict + "": "#9835bd", + "prompt": "#ffffff bold", + "continuation": "#ffffff bold", + }) + maximum_context: Optional[int] = None + auto_context_tokens: Optional[int] = 0 @classmethod - def get_fields(cls): - return list(cls.__annotations__.keys()) + def get_fields(cls) -> List[str]: + return [f.name for f in fields(cls)] + -@dataclass() +@dataclass class MentatConfig: # Directory where the mentat is running - root: Path = field(default_factory=lambda: APP_ROOT), + root: Path = field(default_factory=lambda: APP_ROOT), # pyright: ignore[reportGeneralTypeIssues] user_config_path: Path = field(default_factory=lambda: user_config_path) run: RunSettings = field(default_factory=RunSettings) @@ -117,38 +186,12 @@ class MentatConfig: ui: UISettings = field(default_factory=UISettings) parser: ParserSettings = field(default_factory=ParserSettings) + def load_yaml(path: str) -> dict[str, Any | None]: """Load the data from the YAML file.""" with open(path, 'r') as file: return yaml.safe_load(file) -def merge_configs(original: dict[str, Optional[Any]], new: dict[str, Optional[Any]]) -> dict[str, Optional[Any]]: - """Merge two dictionaries, with the second one overwriting the values in the first one.""" - original.update(new) # Update the original dict with the new one - return original # Return the merged dict - -def yaml_to_config(yaml_dict: dict[str, Any]) -> dict[str, Any | None]: - """gets the allowed config settings from a YAML""" - - config = { - "model": yaml_dict.get("model", "gpt-3"), - "prompt_type": yaml_dict.get("prompt_type", "text"), - "maximum_context": yaml_dict.get("maximum_context", 2048), - "input_style": yaml_dict.get("input_style", - [["", "#000000"], - ["prompt", "#000000 bold"], - ["continuation", "#000000 bold"]]), - "format": yaml_dict.get('format', 'block'), - "sampler_repo": yaml_dict.get('sampler', {}).get('repo', None), - "sampler_merge_base_target": yaml_dict.get('sampler', {}).get('merge_base_target', None) - } - - if yaml_dict.get("file_exclude_glob_list") is None: - config["file_exclude_glob_list"] = [] - else: - config["file_exclude_glob_list"] = yaml_dict["file_exclude_glob_list"] - - return config def init_config() -> None: """Initialize the configuration file if it doesn't exist.""" @@ -161,91 +204,67 @@ def init_config() -> None: shutil.copy(default_conf_path, current_conf_path) -def load_prompts(prompt_type: str): - - if prompt_type == "markdown": - return { - "agent_file_selection_prompt" : Path("markdown/agent_file_selection_prompt.md"), - "agent_command_selection_prompt" : Path("markdown/agent_command_selection_prompt.md"), - "block_parser_prompt" : Path("markdown/block_parser_prompt.md"), - "feature_selection_prompt" : Path("markdown/feature_selection_prompt.md"), - "replacement_parser_prompt" : Path("markdown/replacement_parser_prompt.md"), - "unified_diff_parser_prompt" : Path("markdown/unified_diff_parser_prompt.md"), - "json_parser_prompt" : Path("markdown/json_parser_prompt.md"), - } - - return { - "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), - "agent_command_selection_prompt": Path("text/agent_command_selection_prompt.txt"), - "block_parser_prompt": Path("text/block_parser_prompt.txt"), - "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), - "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), - "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), - "json_parser_prompt": Path("text/json_parser_prompt.txt"), - } - -def load_settings(config_session_dict: Optional[dict[str, Any | None]] = None): +def load_settings(config_session: Optional[RunningSessionConfig] = None): """Load the configuration from the `.mentatconf.yaml` file.""" user_conf_path = USER_MENTAT_ROOT / '.mentatconf.yaml' git_root = get_git_root_for_path(APP_ROOT, raise_error=False) - yaml_config = YamlConfig() + yaml_config = RunningSessionConfig() if user_conf_path.exists(): - yaml_dict = load_yaml(str(user_conf_path)) - user_config = yaml_to_config(yaml_dict) - yaml_config.__dict__.update(user_config) + data = load_yaml(str(user_conf_path)) + yaml_config = yaml_config.from_dict(kvs=data, infer_missing=True) # pyright: ignore[reportUnknownMemberType] if git_root is not None: git_conf_path = Path(git_root) / '.mentatconf.yaml' if git_conf_path.exists(): - yaml_dict = load_yaml(str(git_conf_path)) - git_config = yaml_to_config(yaml_dict) - yaml_config.__dict__.update(git_config) + data = load_yaml(str(git_conf_path)) + yaml_config = yaml_config.from_dict(kvs=data, infer_missing=True) # pyright: ignore[reportUnknownMemberType] + # safety checks for missing values + if yaml_config.file_exclude_glob_list is None: + yaml_config.file_exclude_glob_list = [] + if yaml_config.temperature is None: + yaml_config.temperature = 0.2 - if config_session_dict is not None: - if 'file_exclude_glob_list' in config_session_dict and config_session_dict['file_exclude_glob_list'] is not None: - yaml_config.file_exclude_glob_list.extend(config_session_dict['file_exclude_glob_list']) + if config_session is not None: + if config_session.file_exclude_glob_list is not None: + yaml_config.file_exclude_glob_list.extend(config_session.file_exclude_glob_list) - if 'model' in config_session_dict and config_session_dict['model'] is not None: - yaml_config.model = str(config_session_dict['model']) + if config_session.model is not None: + yaml_config.model = str(config_session.model) - if 'temperature' in config_session_dict and config_session_dict['temperature'] is not None: - yaml_config.temperature = str(config_session_dict['temperature']) + if config_session.temperature is not None: + yaml_config.temperature = float(config_session.temperature) - if 'maximum_context' in config_session_dict and config_session_dict['maximum_context'] is not None: - yaml_config.maximum_context = str(config_session_dict['maximum_context']) + if config_session.maximum_context is not None: + yaml_config.maximum_context = int(config_session.maximum_context) - file_exclude_glob_list: List[str] = yaml_config['file_exclude_glob_list'] + file_exclude_glob_list: List[str] = yaml_config.file_exclude_glob_list or [] - #always ignore .mentatconf + # always ignore .mentatconf file_exclude_glob_list.append(".mentatconf.yaml") run_settings = RunSettings( - file_exclude_glob_list=[Path(p) for p in file_exclude_glob_list], # pyright: ignore[reportUnknownVariableType] + file_exclude_glob_list=[Path(p) for p in file_exclude_glob_list], # pyright: ignore[reportUnknownVariableType] auto_context_tokens=yaml_config.auto_context_tokens ) ui_settings = UISettings( - input_style=yaml_config.input_style or [] # pyright: ignore[reportGeneralTypeIssues] + input_style=yaml_config.input_style # pyright: ignore[reportGeneralTypeIssues] ) ai_model_settings = AIModelSettings( model=yaml_config.model, temperature=yaml_config.temperature, - prompts=load_prompts(yaml_config.prompt_type), feature_selection_model=yaml_config.model, maximum_context=yaml_config.maximum_context ) parser_type = yaml_config.format - parser_settings = ParserSettings( - parser_type=parser_type, - parser=parser_map[parser_type] - ) + parser_settings = ParserSettings(parser_type=parser_type) user_session.set("config", MentatConfig( run=run_settings, @@ -255,9 +274,52 @@ def load_settings(config_session_dict: Optional[dict[str, Any | None]] = None): )) -def update_config(session_config: Dict[str, Union[List[str], None, str, int, float]]) -> None: +mid_session_config = ["model", + "temperature", + "format", + "maximum_context", + "auto_context_tokens"] + + +def update_config(setting: str, value: str | float | int) -> None: + """Reload the configuration using the provided keyword arguments.""" + config = mentat.user_session.get("config") + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + + try: + if setting == "model": + config.ai.model = value + elif setting == "temperature": + config.ai.temperature = float(value) + elif setting == "format": + config.parser.load_parser(value) + elif setting == "maximum_context": + config.ai.maximum_context = int(value) + elif setting == "auto_context_tokens": + config.run.auto_context_tokens = value + + stream.send(f"{setting} set to {value}", color="green") + except (TypeError, ValueError) as e: + stream.send( + f"Illegal value for {setting}: {value}. Error: {str(e)}", color="red" + ) +def get_config(setting: str) -> None: """Reload the configuration using the provided keyword arguments.""" - load_settings(session_config) + config = mentat.user_session.get("config") + session_context = SESSION_CONTEXT.get() + stream = session_context.stream + + if setting == "model": + stream.send(f"{setting}: {config.ai.model}", color="green") + elif setting == "temperature": + stream.send(f"{setting}: {config.ai.temperature}", color="green") + elif setting == "format": + stream.send(f"{setting}:{config.parser.parser_type}", color="green") + elif setting == "maximum_context": + stream.send(f"{setting}: {config.ai.maximum_context}", color="green") + elif setting == "auto_context_tokens": + stream.send(f"{setting}: {config.run.auto_context_tokens}", color="green") def load_config() -> None: diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index ca8db2f66..247420652 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -50,16 +50,16 @@ async def filter( llm_api_handler = session_context.llm_api_handler # Preselect as many features as fit in the context window - model = config.ai.feature_selection_model + model = self.config.ai.feature_selection_model context_size = model_context_size(model) if context_size is None: raise UserError( "Unknown context size for feature selection model: " - f"{config.ai.feature_selection_model}" + f"{self.config.ai.feature_selection_model}" ) system_prompt = read_prompt(self.feature_selection_prompt_path) system_prompt_tokens = count_tokens( - system_prompt, config.ai.feature_selection_model, full_message=True + system_prompt, self.config.ai.feature_selection_model, full_message=True ) user_prompt_tokens = count_tokens(self.user_prompt, model, full_message=True) expected_edits_tokens = ( @@ -72,7 +72,7 @@ async def filter( - system_prompt_tokens - user_prompt_tokens - expected_edits_tokens - - config.ai.token_buffer + - self.config.ai.token_buffer ) truncate_filter = TruncateFilter(preselect_max_tokens, model) preselected_features = await truncate_filter.filter(features) @@ -172,5 +172,5 @@ async def filter( named_features.add(parsed_feature) # Greedy again to enforce max_tokens - truncate_filter = TruncateFilter(self.max_tokens, config.ai.model) + truncate_filter = TruncateFilter(self.max_tokens, self.config.ai.model) return await truncate_filter.filter(named_features) diff --git a/mentat/llm_api_handler.py b/mentat/llm_api_handler.py index 30bfa0e3a..ad15ea391 100644 --- a/mentat/llm_api_handler.py +++ b/mentat/llm_api_handler.py @@ -41,7 +41,7 @@ import mentat from mentat.errors import ContextSizeInsufficient, MentatError, UserError from mentat.session_context import SESSION_CONTEXT -from mentat.utils import mentat_dir_path, dd +from mentat.utils import mentat_dir_path TOKEN_COUNT_WARNING = 32000 @@ -197,7 +197,6 @@ def get_max_tokens() -> int: context_size = model_context_size(config.ai.model) maximum_context = config.ai.maximum_context - if context_size is not None and maximum_context is not None: return min(int(context_size), int(maximum_context)) elif context_size is not None: @@ -218,6 +217,7 @@ def is_context_sufficient(tokens: int) -> bool: ctx = SESSION_CONTEXT.get() max_tokens = get_max_tokens() + if max_tokens - tokens < config.ai.token_buffer: ctx.stream.send( f"The context size is limited to {max_tokens} tokens and your current" diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml index 71ea729f0..497a78677 100644 --- a/mentat/resources/conf/.mentatconf.yaml +++ b/mentat/resources/conf/.mentatconf.yaml @@ -1,13 +1,18 @@ +#settings related to the AI are below + # This field is for specifying the model name. You can find the list of valid options at https://platform.openai.com/docs/models/overview model: gpt-4-1106-preview +temperature: 0.2 # For models other than gpt-3.5 and gpt-4, the model's context size can't be inferred. # In such cases, you need to specify the maximum context manually. -# maximum_context: 16000 +maximum_context: #the type of prompts that the agent should be using options are text and markdown prompt_type: text +#settings related to each "run" + # This list contains glob patterns. Mentat uses these patterns to exclude certain files when provided with a directory argument. # Mentat considers all files that do not match your .gitignore file and these patterns. # Glob patterns are interpreted from the git root location, so if you want to exclude all .py files, use "**/*.py" instead of "*.py". @@ -15,18 +20,20 @@ prompt_type: text file_exclude_glob_list: # - "**/.*" # - "**/.*/**" +auto_context_tokens: -# This section contains key-value pairs for defining a custom Pygment Style for the Mentat prompt. -input_style: - - - "" - - "#9835bd" - - - "prompt" - - "#ffffff bold" - - - "continuation" - - "#ffffff bold" +#settings related to the "parser" # Mentat parses files following a specific format, which you can set here. # Multiple formats are available, though the default one is expected to be the best fit for most cases. # You can experiment with different formats as per your need. # Available formats include: block, replacement, unified-diff. format: block + +#settings related to each "UI" + +# This section contains key-value pairs for defining a custom Pygment Style for the Mentat prompt. +input_style: + "" : "#9835bd" + "prompt" : "#ffffff bold" + "continuation" : "#ffffff bold" diff --git a/mentat/session.py b/mentat/session.py index 8b2982ef9..35bd9dc5d 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -38,7 +38,7 @@ class Session: To stop, send a message on the session_exit channel. A message will be sent on the client_exit channel when ready for client to quit. """ - + _errors: List[str] = [] # pyright: ignore[reportGeneralTypeIssues] def __init__( self, cwd: Path, @@ -58,8 +58,6 @@ def __init__( self.id = uuid4() self._tasks: Set[asyncio.Task[None]] = set() - self._errors: List[Any] = [] - # Since we can't set the session_context until after all of the singletons are created, # any singletons used in the constructor of another singleton must be passed in git_root = get_git_root_for_path(cwd, raise_error=False) @@ -108,6 +106,7 @@ def __init__( self.send_errors_to_stream() for path in paths: code_context.include(path, exclude_patterns=exclude_paths) + if ( code_context.diff_context is not None and len(code_context.include_files) == 0 @@ -166,11 +165,13 @@ async def _main(self): conversation.add_user_message(message.data) parsed_llm_response = await conversation.get_model_response() + file_edits = [ file_edit for file_edit in parsed_llm_response.file_edits if file_edit.is_valid() ] + if file_edits: if not agent_handler.agent_enabled: file_edits, need_user_request = ( @@ -228,12 +229,14 @@ def start(self): the main loop which runs until an Exception or session_exit signal is encountered. """ + self.stream.send("ABC", color="red") + async def run_main(): try: with sentry_sdk.start_transaction( op="mentat_started", name="Mentat Started" ) as transaction: - #TODO: Does this need to be here? + #transaction.set_tag("config", attr.asdict(ctx.config)) transaction.set_tag("config", "config") await self._main() except (SessionExit, CancelledError): @@ -288,5 +291,5 @@ def send_errors_to_stream(self): session_context = SESSION_CONTEXT.get() stream = session_context.stream for error in self._errors: - stream.send(error, color="yellow") + stream.send(str(error), color="yellow") self._errors = [] diff --git a/mentat/session_stream.py b/mentat/session_stream.py index 09751d3f9..7bf47a211 100644 --- a/mentat/session_stream.py +++ b/mentat/session_stream.py @@ -82,7 +82,6 @@ def send( created_at=datetime.utcnow(), extra=kwargs, ) - self.messages.append(message) self._broadcast.publish(channel=channel, message=message) diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 75d626bec..9a87cf583 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -19,7 +19,7 @@ from mentat.terminal.prompt_completer import MentatCompleter from mentat.terminal.prompt_session import MentatPromptSession -from typing import List, Dict, Union +from typing import List from pathlib import Path import click @@ -216,30 +216,14 @@ def run(self): @click.argument('paths', nargs=-1, required=True) def start(paths: list[str], exclude_paths: list[str], ignore_paths: list[str], diff: Optional[str], pr_diff: Optional[str], cwd: Optional[str], model: Optional[str], temperature: Optional[float], maximum_context: Optional[int]) -> None: - # Check if these variables are set and pass them to update_config function as kwargs - session_config: Dict[str, Union[List[str], None, str, int, float]] = { - 'file_exclude_glob_list': [], - 'model': None, - 'temperature': None, - 'maximum_context': None - } - - if exclude_paths: - session_config["file_exclude_glob_list"] = exclude_paths - - if model: - session_config["model"] = model - - if temperature: - session_config["temperature"] = temperature - - if maximum_context: - session_config["maximum_context"] = maximum_context - - sampler.init_settings() - update_config(session_config) + if model is not None: + update_config("model", model) + if temperature is not None: + update_config("temperature", temperature) + if maximum_context is not None: + update_config("maximum_context", maximum_context) current_working_directory = Path.cwd() if cwd: diff --git a/mentat/terminal/output.py b/mentat/terminal/output.py index 3d770a963..ee8d5ae76 100644 --- a/mentat/terminal/output.py +++ b/mentat/terminal/output.py @@ -36,7 +36,6 @@ def print_stream_message(message: StreamMessage): color = message.extra["color"] if isinstance(message.extra.get("flush"), bool): flush = message.extra["flush"] - _print_stream_message_string( content=message.data, end=end, diff --git a/tests/code_context_test.py b/tests/code_context_test.py index 2984c40f5..b44bdbe1d 100644 --- a/tests/code_context_test.py +++ b/tests/code_context_test.py @@ -194,7 +194,9 @@ async def test_text_encoding_checking(temp_testbed, mock_session_context): @pytest.mark.asyncio @pytest.mark.clear_testbed async def test_max_auto_tokens(mocker, temp_testbed, mock_session_context): - update_config({"maximum_context" : 8000}) + config = mentat.user_session.get("config") + config.ai.maximum_context = 8000 + mentat.user_session.set("config", config) with open("file_1.py", "w") as f: f.write(dedent("""\ diff --git a/tests/commands_test.py b/tests/commands_test.py index e5c4d4a61..26c82424f 100644 --- a/tests/commands_test.py +++ b/tests/commands_test.py @@ -4,12 +4,14 @@ import pytest +import mentat from mentat.code_feature import CodeFeature from mentat.command.command import Command, InvalidCommand from mentat.command.commands.context import ContextCommand from mentat.command.commands.help import HelpCommand from mentat.session import Session from mentat.session_context import SESSION_CONTEXT +from mentat.utils import dd def test_invalid_command(): @@ -290,16 +292,16 @@ async def test_context_command(temp_testbed, mock_call_llm_api): @pytest.mark.asyncio async def test_config_command(mock_call_llm_api): session_context = SESSION_CONTEXT.get() - config = session_context.config stream = session_context.stream command = Command.create_command("config") await command.apply("test") assert stream.messages[-1].data == "Unrecognized config option: test" await command.apply("model") assert stream.messages[-1].data.startswith("model: ") - await command.apply("model", "test") - assert stream.messages[-1].data == "model set to test" - assert config.model == "test" + await command.apply("model", "gpt-4-32k") + assert stream.messages[-1].data == "model set to gpt-4-32k" + config = mentat.user_session.get("config") + assert config.ai.model == "gpt-4-32k" await command.apply("model", "test", "lol") assert stream.messages[-1].data == "Too many arguments" @@ -310,11 +312,11 @@ async def test_screenshot_command(mocker): session_context = SESSION_CONTEXT.get() mock_vision_manager = mocker.MagicMock() session_context.vision_manager = mock_vision_manager - config = session_context.config + config = mentat.user_session.get("config") stream = session_context.stream conversation = session_context.conversation - assert config.model != "gpt-4-vision-preview" + assert config.ai.model != "gpt-4-vision-preview" mock_vision_manager.screenshot.return_value = "fake_image_data" @@ -322,7 +324,7 @@ async def test_screenshot_command(mocker): await screenshot_command.apply("fake_path") mock_vision_manager.screenshot.assert_called_once_with("fake_path") - assert config.model == "gpt-4-vision-preview" + assert config.ai.model == "gpt-4-vision-preview" assert stream.messages[-1].data == "Screenshot taken for: fake_path." assert conversation._messages[-1] == { "role": "user", @@ -333,11 +335,11 @@ async def test_screenshot_command(mocker): } # Test non-gpt models aren't changed - config.model = "test" + config.ai.model = "test" await screenshot_command.apply("fake_path") - assert config.model == "test" + assert config.ai.model == "test" # Test other models containing vision aren't changed - config.model = "gpt-vision" + config.ai.model = "gpt-vision" await screenshot_command.apply("fake_path") - assert config.model == "gpt-vision" + assert config.ai.model == "gpt-vision" diff --git a/tests/config_test.py b/tests/config_test.py index dcedf7ca3..041bb2233 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -32,52 +32,20 @@ async def test_load_yaml(mock_open): assert config.load_yaml('test_path') == data mock_open.assert_called_with('test_path', 'r') -@pytest.mark.asyncio -async def test_merge_configs(): - original = {'key1': 'value1', 'key2': 'value2'} - new = {'key2': 'new_value2', 'key3': 'new_value3'} - merged = config.merge_configs(original, new) - assert merged == {'key1': 'value1', 'key2': 'new_value2', 'key3': 'new_value3'} - - @pytest.mark.asyncio async def test_default_config(): "This test verifies that a config is created with default settings required for the run." config = mentat.user_session.get("config") assert config.ai.model == "gpt-4-1106-preview" - assert config.ai.maximum_context == None + assert config.ai.maximum_context == 16000 assert config.run.auto_tokens == 8000 assert config.run.auto_context == False - assert config.ui.input_style == [["", "#9835bd"], - ["prompt", "#ffffff bold"], - ["continuation", "#ffffff bold"]] - assert config.parser.parser_type == 'block' -@pytest.mark.asyncio -async def test_update_config(): - "This test verifies that a config is created with default settings required for the run." - config = mentat.user_session.get("config") - - #assert that default settings are in place before we change them. - assert config.ai.model == "gpt-4-1106-preview" - assert config.ai.maximum_context == None - - session_config = { - 'model': 'abc-123', - 'maximum_context': 16000 - } - - update_config(session_config) - - assert config.config.ai.model == "abc-123" - assert config.config.ai.maximum_context == 16000 - - diff --git a/tests/conftest.py b/tests/conftest.py index a84c02563..485f0e2fa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -18,6 +18,8 @@ from mentat.config import load_config +from mentat.parsers.block_parser import BlockParser + #first thing we do is we init a default config load_config() @@ -35,7 +37,6 @@ from mentat.session_context import SESSION_CONTEXT, SessionContext from mentat.session_stream import SessionStream, StreamMessage, StreamMessageSource from mentat.streaming_printer import StreamingPrinter -from mentat.utils import dd from mentat.vision.vision_manager import VisionManager pytest_plugins = ("pytest_reportlog",) @@ -211,6 +212,7 @@ def set_unstreamed_values(value): Choice( finish_reason="stop", index=0, + logprobs=None, message=ChatCompletionMessage( content=value, role="assistant", @@ -269,6 +271,17 @@ def mock_session_context(temp_testbed): #reset the config context load_config() + #autoset some settings to conform to tests + config = mentat.user_session.get("config") + config.root = temp_testbed + config.run.file_exclude_glob_list = [] + config.ai.maximum_context = 16000 + config.ai.load_prompts('text') + config.parser.parser_type = "block" + config.parser.parser = BlockParser() + mentat.user_session.set("config", config) + + git_root = get_git_root_for_path(temp_testbed, raise_error=False) stream = SessionStream() diff --git a/tests/conversation_test.py b/tests/conversation_test.py index 6106c44b8..d5cf18b23 100644 --- a/tests/conversation_test.py +++ b/tests/conversation_test.py @@ -1,3 +1,4 @@ +import mentat from mentat.parsers.block_parser import BlockParser from mentat.parsers.replacement_parser import ReplacementParser from mentat.session_context import SESSION_CONTEXT @@ -5,15 +6,22 @@ def test_midconveration_parser_change(mock_call_llm_api): session_context = SESSION_CONTEXT.get() - config = session_context.config conversation = session_context.conversation - config.parser = "block" + config = mentat.user_session.get("config") + + config.parser.parser_type = "block" + config.parser.parser = BlockParser() + mentat.user_session.set("config", config) + assert ( conversation.get_messages()[0]["content"] == BlockParser().get_system_prompt() ) - config.parser = "replacement" + config.parser.parser_type = "replacement" + config.parser.parser = ReplacementParser() + mentat.user_session.set("config", config) + assert ( conversation.get_messages()[0]["content"] == ReplacementParser().get_system_prompt() @@ -22,11 +30,12 @@ def test_midconveration_parser_change(mock_call_llm_api): def test_no_parser_prompt(mock_call_llm_api): session_context = SESSION_CONTEXT.get() - config = session_context.config + config = mentat.user_session.get("config") conversation = session_context.conversation assert len(conversation.get_messages()) == 1 - config.no_parser_prompt = True + config.ai.no_parser_prompt = True + mentat.user_session.set("config", config) assert len(conversation.get_messages()) == 0 diff --git a/tests/feature_filters/llm_feature_filter_test.py b/tests/feature_filters/llm_feature_filter_test.py index db6501912..5b209f790 100644 --- a/tests/feature_filters/llm_feature_filter_test.py +++ b/tests/feature_filters/llm_feature_filter_test.py @@ -1,7 +1,9 @@ import pytest +import mentat from mentat.code_feature import CodeFeature from mentat.feature_filters.llm_feature_filter import LLMFeatureFilter +from mentat.utils import dd @pytest.mark.asyncio diff --git a/tests/parser_tests/replacement_format_error_test.py b/tests/parser_tests/replacement_format_error_test.py index e37869367..41098f1fa 100644 --- a/tests/parser_tests/replacement_format_error_test.py +++ b/tests/parser_tests/replacement_format_error_test.py @@ -3,9 +3,11 @@ import pytest +import mentat from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session +from mentat.utils import dd @pytest.fixture(autouse=True) @@ -18,8 +20,14 @@ async def test_invalid_line_numbers( mock_call_llm_api, mock_collect_user_input, ): - temp_file_name = "temp.py" - with open(temp_file_name, "w") as f: + temp_file_name ="temp.py" + temp_file_location = Path.cwd() / temp_file_name + + config = mentat.user_session.get("config") + config.parser.parser = ReplacementParser() + mentat.user_session.set('config', config) + + with open(temp_file_location, "w") as f: f.write(dedent("""\ # This is a temporary file # with 2 lines""")) @@ -44,15 +52,16 @@ async def test_invalid_line_numbers( # I also will not be used @""")]) - session = Session(cwd=Path.cwd(), paths=[temp_file_name]) + session = Session(cwd=Path.cwd(), paths=[Path(temp_file_location)]) session.start() await session.stream.recv(channel="client_exit") - with open(temp_file_name, "r") as f: + with open(temp_file_location, "r") as f: content = f.read() expected_content = dedent("""\ # This is a temporary file # I inserted this comment # with 2 lines""") + assert content == expected_content @@ -61,6 +70,10 @@ async def test_invalid_special_line( mock_call_llm_api, mock_collect_user_input, ): + config = mentat.user_session.get("config") + config.parser.parser = ReplacementParser() + mentat.user_session.set('config', config) + temp_file_name = "temp.py" with open(temp_file_name, "w") as f: f.write(dedent("""\ diff --git a/tests/parser_tests/replacement_format_test.py b/tests/parser_tests/replacement_format_test.py index 855c43569..f84d2d76e 100644 --- a/tests/parser_tests/replacement_format_test.py +++ b/tests/parser_tests/replacement_format_test.py @@ -3,15 +3,19 @@ import pytest +import mentat from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session from tests.parser_tests.inverse import verify_inverse -@pytest.fixture -def replacement_parser(mocker): - mocker.patch.object(ParserSettings, "parser", new=ReplacementParser()) +@pytest.fixture() +def replacement_parser(): + config = mentat.user_session.get("config") + config.parser.parser = ReplacementParser() + mentat.user_session.set('config', config) + @pytest.mark.asyncio diff --git a/tests/parser_tests/unified_diff_format_test.py b/tests/parser_tests/unified_diff_format_test.py index d62e4bad0..9ea411514 100644 --- a/tests/parser_tests/unified_diff_format_test.py +++ b/tests/parser_tests/unified_diff_format_test.py @@ -11,8 +11,11 @@ @pytest.fixture(autouse=True) -def unified_diff_parser(mocker): - mocker.patch.object(ParserSettings, "parser", new=UnifiedDiffParser()) +def unified_diff_parser(): + config = mentat.user_session.get("config") + config.parser.parser = UnifiedDiffParser() + mentat.user_session.set('config', config) + @pytest.mark.asyncio diff --git a/tests/sampler_test.py b/tests/sampler_test.py index 173289b4e..d46dc2ef1 100644 --- a/tests/sampler_test.py +++ b/tests/sampler_test.py @@ -17,9 +17,10 @@ from mentat.python_client.client import PythonClient from mentat.sampler import __version__ from mentat.sampler.sample import Sample -from mentat.sampler.sampler import Sampler +from mentat.sampler.sampler import Sampler, init_settings from mentat.sampler.utils import get_active_snapshot_commit from mentat.session import Session +from mentat.utils import dd from scripts.evaluate_samples import evaluate_sample @@ -35,7 +36,7 @@ async def test_sample_from_context( mock_session_context, mock_collect_user_input, ): - mock_session_context.config.sample_repo = "test_sample_repo" + init_settings(repo="test_sample_repo",merge_base_target="") mocker.patch( "mentat.conversation.Conversation.get_messages", @@ -99,6 +100,8 @@ def is_sha1(string: str) -> bool: @pytest.mark.asyncio async def test_sample_command(temp_testbed, mock_collect_user_input, mock_call_llm_api): + init_settings(repo=None) + mock_collect_user_input.set_stream_messages( [ "Request", @@ -327,6 +330,7 @@ def get_updates_as_parsed_llm_message(cwd): async def test_sampler_integration( temp_testbed, mock_session_context, mock_call_llm_api ): + init_settings(repo=None) # Setup the environemnt repo = Repo(temp_testbed) (temp_testbed / "test_file.py").write_text("permanent commit") @@ -403,6 +407,7 @@ async def test_sampler_integration( # Evaluate the sample using Mentat sample_files = list(temp_testbed.glob("sample_*.json")) assert len(sample_files) == 1 + sample = Sample.load(sample_files[0]) assert sample.title == "test_title" assert sample.description == "test_description" From 46a407052c913bdbdc03449a9a23e20a23188f91 Mon Sep 17 00:00:00 2001 From: Greg L Date: Fri, 29 Dec 2023 16:53:19 -0500 Subject: [PATCH 20/24] Update code style for better readability and maintainability Various changes have been made to improve the readability and maintainability of the code. These changes include reformatting lists and function arguments for better visibility, updating syntax to meet PEP8 standards, and making sure all diffs are correctly formatted. --- .github/workflows/benchmarks.yml | 2 +- .github/workflows/lint_and_test.yml | 2 +- mentat/__init__.py | 1 + mentat/agent_handler.py | 13 +- mentat/code_context.py | 24 +-- mentat/code_feature.py | 1 - mentat/command/commands/config.py | 8 +- mentat/command/commands/search.py | 2 +- mentat/config.py | 166 +++++++++++------- mentat/conversation.py | 8 +- mentat/feature_filters/llm_feature_filter.py | 9 +- mentat/git_handler.py | 1 - mentat/include_files.py | 1 - mentat/parsers/block_parser.py | 4 +- mentat/parsers/change_display_helper.py | 54 +++--- mentat/parsers/json_parser.py | 5 +- mentat/parsers/replacement_parser.py | 4 +- mentat/parsers/unified_diff_parser.py | 5 +- mentat/python_client/client.py | 2 +- mentat/sampler/sampler.py | 30 ++-- mentat/session.py | 6 +- mentat/terminal/__init__.py | 3 +- mentat/terminal/client.py | 81 ++++++--- mentat/user_session.py | 5 +- mentat/utils.py | 8 +- scripts/git_log_to_transcripts.py | 10 +- .../benchmarks/mentat/license_update.py | 26 ++- tests/code_context_test.py | 5 +- tests/code_file_manager_test.py | 48 +++-- tests/commands_test.py | 98 +++++------ tests/config_test.py | 33 ++-- tests/conftest.py | 12 +- tests/embeddings_test.py | 14 +- tests/llm_api_handler_test.py | 10 +- tests/parser_tests/block_format_error_test.py | 12 +- tests/parser_tests/block_format_test.py | 122 ++++++------- .../replacement_format_error_test.py | 30 ++-- tests/parser_tests/replacement_format_test.py | 89 ++++------ .../unified_diff_format_error_test.py | 24 ++- .../parser_tests/unified_diff_format_test.py | 101 +++++------ tests/record_benchmark.py | 14 +- tests/sampler_test.py | 40 ++--- tests/system_test.py | 52 +++--- 43 files changed, 580 insertions(+), 605 deletions(-) diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index 6eff657de..58eddc72d 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -21,7 +21,7 @@ jobs: poetry install - name: Run and upload benchmarks - run: ./scripts/run_and_upload_benchmarks.sh + run: poetry run ./scripts/run_and_upload_benchmarks.sh env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 241a4c4ec..1371d4660 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -60,7 +60,7 @@ jobs: # Ensure that python doesn't import local mentat folder and that 'mentat' command calls mentat instead of switching folders. working-directory: ./testbed run: | - mentat + poetry run mentat license-check: runs-on: ubuntu-latest diff --git a/mentat/__init__.py b/mentat/__init__.py index c0dcc3e3a..cf1f4ccd9 100644 --- a/mentat/__init__.py +++ b/mentat/__init__.py @@ -8,5 +8,6 @@ def __dir__(): return __all__ + # Make sure to bump this on Release x.y.z PR's! __version__ = "1.0.7" diff --git a/mentat/agent_handler.py b/mentat/agent_handler.py index 05f19fcf5..255b100be 100644 --- a/mentat/agent_handler.py +++ b/mentat/agent_handler.py @@ -20,14 +20,21 @@ class AgentHandler: config = mentat.user_session.get("config") - agent_file_selection_prompt_path = config.ai.prompts.get("agent_file_selection_prompt", Path("text/agent_file_selection_prompt.txt")) - agent_command_prompt_path = config.ai.prompts.get("agent_command_selection_prompt", Path("text/agent_command_selection_prompt.txt")) + agent_file_selection_prompt_path = config.ai.prompts.get( + "agent_file_selection_prompt", Path("text/agent_file_selection_prompt.txt") + ) + agent_command_prompt_path = config.ai.prompts.get( + "agent_command_selection_prompt", + Path("text/agent_command_selection_prompt.txt"), + ) def __init__(self): self._agent_enabled = False self.agent_file_message = "" - self.agent_file_selection_prompt = read_prompt(self.agent_file_selection_prompt_path) + self.agent_file_selection_prompt = read_prompt( + self.agent_file_selection_prompt_path + ) self.agent_command_prompt = read_prompt(self.agent_command_prompt_path) # Make this property readonly because we have to set things when we enable agent mode diff --git a/mentat/code_context.py b/mentat/code_context.py index 0b416e731..345793dac 100644 --- a/mentat/code_context.py +++ b/mentat/code_context.py @@ -72,7 +72,9 @@ def display_context(self): if config.run.auto_context_tokens > 0: stream.send(f"{prefix}Auto-Context: Enabled") - stream.send(f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}") + stream.send( + f"{prefix}Auto-Context Tokens: {config.run.auto_context_tokens}" + ) else: stream.send(f"{prefix}Auto-Context: Disabled") @@ -156,7 +158,9 @@ async def get_code_message( if not is_context_sufficient(tokens_used): raise ContextSizeInsufficient() - auto_tokens = min(get_max_tokens() - tokens_used, config.run.auto_context_tokens) + auto_tokens = min( + get_max_tokens() - tokens_used, config.run.auto_context_tokens + ) # Get auto included features if config.run.auto_context_tokens > 0 and prompt: @@ -191,9 +195,7 @@ def get_all_features( config = mentat.user_session.get("config") abs_exclude_patterns: Set[Path] = set() - for pattern in self.ignore_patterns.union( - config.run.file_exclude_glob_list - ): + for pattern in self.ignore_patterns.union(config.run.file_exclude_glob_list): if not Path(pattern).is_absolute(): abs_exclude_patterns.add(session_context.cwd / pattern) else: @@ -277,13 +279,11 @@ def include( path = Path(path) abs_exclude_patterns: Set[Path] = set() - all_exclude_patterns: Set[Union[str, Path]] = set( - [ - *exclude_patterns, - *self.ignore_patterns, - *config.run.file_exclude_glob_list, - ] - ) + all_exclude_patterns: Set[Union[str, Path]] = set([ + *exclude_patterns, + *self.ignore_patterns, + *config.run.file_exclude_glob_list, + ]) for pattern in all_exclude_patterns: if not Path(pattern).is_absolute(): diff --git a/mentat/code_feature.py b/mentat/code_feature.py index 16c4a2d24..350246b53 100644 --- a/mentat/code_feature.py +++ b/mentat/code_feature.py @@ -18,7 +18,6 @@ from mentat.session_context import SESSION_CONTEXT from mentat.utils import get_relative_path - MIN_INTERVAL_LINES = 10 diff --git a/mentat/command/commands/config.py b/mentat/command/commands/config.py index b322fd2f0..fd6b5ec40 100644 --- a/mentat/command/commands/config.py +++ b/mentat/command/commands/config.py @@ -9,11 +9,11 @@ class ConfigCommand(Command, command_name="config"): @override async def apply(self, *args: str) -> None: - from mentat.config import mid_session_config, update_config, get_config + from mentat.config import get_config, mid_session_config, update_config + session_context = SESSION_CONTEXT.get() stream = session_context.stream - if len(args) == 0: stream.send("No config option specified", color="yellow") elif len(args) == 1 or len(args) == 2: @@ -51,10 +51,10 @@ def argument_autocompletions( "prompt_type", "format", "maximum_context", - "auto_context_tokens" + "auto_context_tokens", ] elif argument_position == 1: - #TODO: Figure out a better way of doing this. + # TODO: Figure out a better way of doing this. return [] else: return [] diff --git a/mentat/command/commands/search.py b/mentat/command/commands/search.py index 0da9c24b6..5efc3c1f5 100644 --- a/mentat/command/commands/search.py +++ b/mentat/command/commands/search.py @@ -35,7 +35,7 @@ def _parse_include_input(user_input: str, max_num: int) -> Set[int] | None: class SearchCommand(Command, command_name="search"): @override async def apply(self, *args: str) -> None: - config = mentat.user_session.get('config') + config = mentat.user_session.get("config") session_context = SESSION_CONTEXT.get() stream = session_context.stream diff --git a/mentat/config.py b/mentat/config.py index a32e2b0ae..4b77f8ccc 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -1,22 +1,22 @@ from __future__ import annotations import os +import shutil +from dataclasses import dataclass, field, fields from pathlib import Path +from typing import Any, Dict, List, Optional + import yaml -import shutil +from dataclasses_json import DataClassJsonMixin import mentat from mentat import user_session - from mentat.git_handler import get_git_root_for_path from mentat.parsers.block_parser import BlockParser from mentat.parsers.replacement_parser import ReplacementParser from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session_context import SESSION_CONTEXT from mentat.utils import mentat_dir_path -from dataclasses import dataclass, field, fields -from dataclasses_json import DataClassJsonMixin -from typing import Any, Dict, List, Optional config_file_name = Path(".mentat_config.yaml") user_config_path = mentat_dir_path / config_file_name @@ -28,6 +28,7 @@ bool_autocomplete = ["True", "False"] + @dataclass class RunSettings(DataClassJsonMixin): file_exclude_glob_list: List[Path] = field(default_factory=list) @@ -35,11 +36,13 @@ class RunSettings(DataClassJsonMixin): auto_tokens: int = 8000 auto_context_tokens: int = 0 - def __init__(self, - file_exclude_glob_list: Optional[List[Path]] = None, - auto_context: Optional[bool] = None, - auto_tokens: Optional[int] = None, - auto_context_tokens: Optional[int] = None) -> None: + def __init__( + self, + file_exclude_glob_list: Optional[List[Path]] = None, + auto_context: Optional[bool] = None, + auto_tokens: Optional[int] = None, + auto_context_tokens: Optional[int] = None, + ) -> None: if file_exclude_glob_list is not None: self.file_exclude_glob_list = file_exclude_glob_list if auto_context is not None: @@ -61,15 +64,17 @@ class AIModelSettings(DataClassJsonMixin): token_buffer: int no_parser_prompt: bool - def __init__(self, - model: Optional[str] = "gpt-4-1106-preview", - feature_selection_model: Optional[str] = "gpt-4-1106-preview", - embedding_model: Optional[str] = "text-embedding-ada-002", - prompts: Optional[str] = "text", - temperature: Optional[float] = 0.2, - maximum_context: Optional[int] = None, - token_buffer: Optional[int] = 1000, - no_parser_prompt: Optional[bool] = False): + def __init__( + self, + model: Optional[str] = "gpt-4-1106-preview", + feature_selection_model: Optional[str] = "gpt-4-1106-preview", + embedding_model: Optional[str] = "text-embedding-ada-002", + prompts: Optional[str] = "text", + temperature: Optional[float] = 0.2, + maximum_context: Optional[int] = None, + token_buffer: Optional[int] = 1000, + no_parser_prompt: Optional[bool] = False, + ): if model is not None: self.model = model if feature_selection_model is not None: @@ -87,27 +92,42 @@ def __init__(self, if no_parser_prompt is not None: self.no_parser_prompt = no_parser_prompt - def load_prompts(self, prompt_type: str) -> None: prompts_type = { "markdown": { - "agent_file_selection_prompt": Path("markdown/agent_file_selection_prompt.md"), - "agent_command_selection_prompt": Path("markdown/agent_command_selection_prompt.md"), + "agent_file_selection_prompt": Path( + "markdown/agent_file_selection_prompt.md" + ), + "agent_command_selection_prompt": Path( + "markdown/agent_command_selection_prompt.md" + ), "block_parser_prompt": Path("markdown/block_parser_prompt.md"), - "feature_selection_prompt": Path("markdown/feature_selection_prompt.md"), - "replacement_parser_prompt": Path("markdown/replacement_parser_prompt.md"), - "unified_diff_parser_prompt": Path("markdown/unified_diff_parser_prompt.md"), - "json_parser_prompt": Path("markdown/json_parser_prompt.md") + "feature_selection_prompt": Path( + "markdown/feature_selection_prompt.md" + ), + "replacement_parser_prompt": Path( + "markdown/replacement_parser_prompt.md" + ), + "unified_diff_parser_prompt": Path( + "markdown/unified_diff_parser_prompt.md" + ), + "json_parser_prompt": Path("markdown/json_parser_prompt.md"), }, "text": { - "agent_file_selection_prompt": Path("text/agent_file_selection_prompt.txt"), - "agent_command_selection_prompt": Path("text/agent_command_selection_prompt.txt"), + "agent_file_selection_prompt": Path( + "text/agent_file_selection_prompt.txt" + ), + "agent_command_selection_prompt": Path( + "text/agent_command_selection_prompt.txt" + ), "block_parser_prompt": Path("text/block_parser_prompt.txt"), "feature_selection_prompt": Path("text/feature_selection_prompt.txt"), "replacement_parser_prompt": Path("text/replacement_parser_prompt.txt"), - "unified_diff_parser_prompt": Path("text/unified_diff_parser_prompt.txt"), - "json_parser_prompt": Path("text/json_parser_prompt.txt") - } + "unified_diff_parser_prompt": Path( + "text/unified_diff_parser_prompt.txt" + ), + "json_parser_prompt": Path("text/json_parser_prompt.txt"), + }, } self.prompts = prompts_type.get(prompt_type, {}) @@ -139,12 +159,11 @@ def __init__(self, parser_type: Optional[str] = "block"): else: self.load_parser("block") - def load_parser(self, parser_type: str) -> None: parsers = { "block": BlockParser, "replacement": ReplacementParser, - "unified-diff": UnifiedDiffParser + "unified-diff": UnifiedDiffParser, } if parser := parsers.get(parser_type): @@ -160,13 +179,17 @@ class RunningSessionConfig(DataClassJsonMixin): model: Optional[str] = "gpt-4-1106-preview" temperature: Optional[float] = 0.2 prompt_type: Optional[str] = "text" - file_exclude_glob_list: Optional[List[str]] = field(default_factory=list) # Use default factory for list + file_exclude_glob_list: Optional[List[str]] = field( + default_factory=list + ) # Use default factory for list format: Optional[str] = "block" - input_style: Optional[Dict[str, str]] = field(default_factory=lambda: { # Use default factory for dict - "": "#9835bd", - "prompt": "#ffffff bold", - "continuation": "#ffffff bold", - }) + input_style: Optional[Dict[str, str]] = field( + default_factory=lambda: { # Use default factory for dict + "": "#9835bd", + "prompt": "#ffffff bold", + "continuation": "#ffffff bold", + } + ) maximum_context: Optional[int] = None auto_context_tokens: Optional[int] = 0 @@ -178,7 +201,9 @@ def get_fields(cls) -> List[str]: @dataclass class MentatConfig: # Directory where the mentat is running - root: Path = field(default_factory=lambda: APP_ROOT), # pyright: ignore[reportGeneralTypeIssues] + root: Path = ( + field(default_factory=lambda: APP_ROOT), + ) # pyright: ignore[reportGeneralTypeIssues] user_config_path: Path = field(default_factory=lambda: user_config_path) run: RunSettings = field(default_factory=RunSettings) @@ -189,7 +214,7 @@ class MentatConfig: def load_yaml(path: str) -> dict[str, Any | None]: """Load the data from the YAML file.""" - with open(path, 'r') as file: + with open(path, "r") as file: return yaml.safe_load(file) @@ -197,8 +222,10 @@ def init_config() -> None: """Initialize the configuration file if it doesn't exist.""" git_root = get_git_root_for_path(APP_ROOT, raise_error=False) if git_root is not None: - default_conf_path = os.path.join(MENTAT_ROOT, 'resources', 'conf', '.mentatconf.yaml') - current_conf_path = os.path.join(git_root, '.mentatconf.yaml') + default_conf_path = os.path.join( + MENTAT_ROOT, "resources", "conf", ".mentatconf.yaml" + ) + current_conf_path = os.path.join(git_root, ".mentatconf.yaml") if not os.path.exists(current_conf_path): shutil.copy(default_conf_path, current_conf_path) @@ -207,20 +234,24 @@ def init_config() -> None: def load_settings(config_session: Optional[RunningSessionConfig] = None): """Load the configuration from the `.mentatconf.yaml` file.""" - user_conf_path = USER_MENTAT_ROOT / '.mentatconf.yaml' + user_conf_path = USER_MENTAT_ROOT / ".mentatconf.yaml" git_root = get_git_root_for_path(APP_ROOT, raise_error=False) yaml_config = RunningSessionConfig() if user_conf_path.exists(): data = load_yaml(str(user_conf_path)) - yaml_config = yaml_config.from_dict(kvs=data, infer_missing=True) # pyright: ignore[reportUnknownMemberType] + yaml_config = yaml_config.from_dict( + kvs=data, infer_missing=True + ) # pyright: ignore[reportUnknownMemberType] if git_root is not None: - git_conf_path = Path(git_root) / '.mentatconf.yaml' + git_conf_path = Path(git_root) / ".mentatconf.yaml" if git_conf_path.exists(): data = load_yaml(str(git_conf_path)) - yaml_config = yaml_config.from_dict(kvs=data, infer_missing=True) # pyright: ignore[reportUnknownMemberType] + yaml_config = yaml_config.from_dict( + kvs=data, infer_missing=True + ) # pyright: ignore[reportUnknownMemberType] # safety checks for missing values if yaml_config.file_exclude_glob_list is None: @@ -231,7 +262,9 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): if config_session is not None: if config_session.file_exclude_glob_list is not None: - yaml_config.file_exclude_glob_list.extend(config_session.file_exclude_glob_list) + yaml_config.file_exclude_glob_list.extend( + config_session.file_exclude_glob_list + ) if config_session.model is not None: yaml_config.model = str(config_session.model) @@ -248,8 +281,10 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): file_exclude_glob_list.append(".mentatconf.yaml") run_settings = RunSettings( - file_exclude_glob_list=[Path(p) for p in file_exclude_glob_list], # pyright: ignore[reportUnknownVariableType] - auto_context_tokens=yaml_config.auto_context_tokens + file_exclude_glob_list=[ + Path(p) for p in file_exclude_glob_list + ], # pyright: ignore[reportUnknownVariableType] + auto_context_tokens=yaml_config.auto_context_tokens, ) ui_settings = UISettings( @@ -260,25 +295,30 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): model=yaml_config.model, temperature=yaml_config.temperature, feature_selection_model=yaml_config.model, - maximum_context=yaml_config.maximum_context + maximum_context=yaml_config.maximum_context, ) parser_type = yaml_config.format parser_settings = ParserSettings(parser_type=parser_type) - user_session.set("config", MentatConfig( - run=run_settings, - ai=ai_model_settings, - ui=ui_settings, - parser=parser_settings - )) + user_session.set( + "config", + MentatConfig( + run=run_settings, + ai=ai_model_settings, + ui=ui_settings, + parser=parser_settings, + ), + ) -mid_session_config = ["model", - "temperature", - "format", - "maximum_context", - "auto_context_tokens"] +mid_session_config = [ + "model", + "temperature", + "format", + "maximum_context", + "auto_context_tokens", +] def update_config(setting: str, value: str | float | int) -> None: @@ -304,6 +344,8 @@ def update_config(setting: str, value: str | float | int) -> None: stream.send( f"Illegal value for {setting}: {value}. Error: {str(e)}", color="red" ) + + def get_config(setting: str) -> None: """Reload the configuration using the provided keyword arguments.""" config = mentat.user_session.get("config") diff --git a/mentat/conversation.py b/mentat/conversation.py index a336314ce..04447bf59 100644 --- a/mentat/conversation.py +++ b/mentat/conversation.py @@ -227,7 +227,9 @@ async def _stream_model_response( cost_tracker.log_api_call_stats( num_prompt_tokens, count_tokens( - parsed_llm_response.full_response, config.ai.model, full_message=False + parsed_llm_response.full_response, + config.ai.model, + full_message=False, ), config.ai.model, display=True, @@ -253,7 +255,9 @@ async def get_model_response(self) -> ParsedLLMResponse: # Get current code message loading_multiplier = 1.0 if config.run.auto_context_tokens > 0 else 0.0 - prompt = messages_snapshot[-1]["content"] # pyright: ignore[reportTypedDictNotRequiredAccess] + prompt = messages_snapshot[-1][ + "content" + ] # pyright: ignore[reportTypedDictNotRequiredAccess] if isinstance(prompt, list): text_prompts = [ p.get("text", "") for p in prompt if p.get("type") == "text" diff --git a/mentat/feature_filters/llm_feature_filter.py b/mentat/feature_filters/llm_feature_filter.py index 247420652..ba66a2ab1 100644 --- a/mentat/feature_filters/llm_feature_filter.py +++ b/mentat/feature_filters/llm_feature_filter.py @@ -10,10 +10,7 @@ ) import mentat -from mentat.code_feature import ( - CodeFeature, - get_code_message_from_features, -) +from mentat.code_feature import CodeFeature, get_code_message_from_features from mentat.errors import ModelError, UserError from mentat.feature_filters.feature_filter import FeatureFilter from mentat.feature_filters.truncate_filter import TruncateFilter @@ -25,7 +22,9 @@ class LLMFeatureFilter(FeatureFilter): config = mentat.user_session.get("config") - feature_selection_prompt_path = config.ai.prompts.get("feature_selection_prompt", Path("text/feature_selection_prompt.txt")) + feature_selection_prompt_path = config.ai.prompts.get( + "feature_selection_prompt", Path("text/feature_selection_prompt.txt") + ) def __init__( self, diff --git a/mentat/git_handler.py b/mentat/git_handler.py index bd919cd73..5409db784 100644 --- a/mentat/git_handler.py +++ b/mentat/git_handler.py @@ -72,7 +72,6 @@ def get_paths_with_git_diffs(git_root: Path) -> set[Path]: def get_git_root_for_path(path: Path, raise_error: bool = True) -> Optional[Path]: - if os.path.isdir(path): dir_path = path else: diff --git a/mentat/include_files.py b/mentat/include_files.py index fac1048e7..6c5232fea 100644 --- a/mentat/include_files.py +++ b/mentat/include_files.py @@ -11,7 +11,6 @@ from mentat.git_handler import get_git_root_for_path, get_non_gitignored_files from mentat.interval import parse_intervals, split_intervals_from_path from mentat.session_context import SESSION_CONTEXT - from mentat.utils import is_file_text_encoded diff --git a/mentat/parsers/block_parser.py b/mentat/parsers/block_parser.py index f2cdaac3f..a35450312 100644 --- a/mentat/parsers/block_parser.py +++ b/mentat/parsers/block_parser.py @@ -71,7 +71,9 @@ class BlockParser(Parser): @override def get_system_prompt(self) -> str: config = mentat.user_session.get("config") - block_parser_prompt_filename = config.ai.prompts.get("block_parser_prompt", Path("text/block_parser_prompt.txt")) + block_parser_prompt_filename = config.ai.prompts.get( + "block_parser_prompt", Path("text/block_parser_prompt.txt") + ) return read_prompt(block_parser_prompt_filename) @override diff --git a/mentat/parsers/change_display_helper.py b/mentat/parsers/change_display_helper.py index 10db7f44d..403aaee20 100644 --- a/mentat/parsers/change_display_helper.py +++ b/mentat/parsers/change_display_helper.py @@ -95,12 +95,10 @@ def _remove_extra_empty_lines(lines: list[str]) -> list[str]: def _prefixed_lines(line_number_buffer: int, lines: list[str], prefix: str): - return "\n".join( - [ - prefix + " " * (line_number_buffer - len(prefix)) + line.strip("\n") - for line in lines - ] - ) + return "\n".join([ + prefix + " " * (line_number_buffer - len(prefix)) + line.strip("\n") + for line in lines + ]) def _get_code_block( @@ -202,18 +200,16 @@ def get_previous_lines( ) -> str: if display_information.first_changed_line < 0: return "" - lines = _remove_extra_empty_lines( - [ - display_information.file_lines[i] - for i in range( - max(0, display_information.first_changed_line - num), - min( - display_information.first_changed_line, - len(display_information.file_lines), - ), - ) - ] - ) + lines = _remove_extra_empty_lines([ + display_information.file_lines[i] + for i in range( + max(0, display_information.first_changed_line - num), + min( + display_information.first_changed_line, + len(display_information.file_lines), + ), + ) + ]) numbered = [ (str(display_information.first_changed_line - len(lines) + i + 1) + ":").ljust( display_information.line_number_buffer @@ -232,18 +228,16 @@ def get_later_lines( ) -> str: if display_information.last_changed_line < 0: return "" - lines = _remove_extra_empty_lines( - [ - display_information.file_lines[i] - for i in range( - max(0, display_information.last_changed_line), - min( - display_information.last_changed_line + num, - len(display_information.file_lines), - ), - ) - ] - ) + lines = _remove_extra_empty_lines([ + display_information.file_lines[i] + for i in range( + max(0, display_information.last_changed_line), + min( + display_information.last_changed_line + num, + len(display_information.file_lines), + ), + ) + ]) numbered = [ (str(display_information.last_changed_line + 1 + i) + ":").ljust( display_information.line_number_buffer diff --git a/mentat/parsers/json_parser.py b/mentat/parsers/json_parser.py index 08ce1c2ad..559aeb38f 100644 --- a/mentat/parsers/json_parser.py +++ b/mentat/parsers/json_parser.py @@ -20,7 +20,6 @@ from mentat.session_context import SESSION_CONTEXT from mentat.streaming_printer import StreamingPrinter - comment_schema = { "type": "object", "properties": {"type": {"enum": ["comment"]}, "content": {"type": "string"}}, @@ -85,7 +84,9 @@ class JsonParser(Parser): @override def get_system_prompt(self) -> str: config = mentat.user_session.get("config") - json_parser_prompt_filename = config.ai.prompts.get("json_parser_prompt", Path("text/json_parser_prompt.txt")) + json_parser_prompt_filename = config.ai.prompts.get( + "json_parser_prompt", Path("text/json_parser_prompt.txt") + ) return read_prompt(json_parser_prompt_filename) @override diff --git a/mentat/parsers/replacement_parser.py b/mentat/parsers/replacement_parser.py index 7c605d1ba..56650da32 100644 --- a/mentat/parsers/replacement_parser.py +++ b/mentat/parsers/replacement_parser.py @@ -16,7 +16,9 @@ class ReplacementParser(Parser): @override def get_system_prompt(self) -> str: config = mentat.user_session.get("config") - replacement_parser_prompt_filename = config.ai.prompts.get("replacement_parser_prompt", Path("text/replacement_parser_prompt.txt")) + replacement_parser_prompt_filename = config.ai.prompts.get( + "replacement_parser_prompt", Path("text/replacement_parser_prompt.txt") + ) return read_prompt(replacement_parser_prompt_filename) @override diff --git a/mentat/parsers/unified_diff_parser.py b/mentat/parsers/unified_diff_parser.py index c083c079e..aa3a8561e 100644 --- a/mentat/parsers/unified_diff_parser.py +++ b/mentat/parsers/unified_diff_parser.py @@ -18,7 +18,6 @@ from mentat.prompts.prompts import read_prompt - class UnifiedDiffDelimiter(Enum): SpecialStart = "---" SpecialEnd = "+++" @@ -30,7 +29,9 @@ class UnifiedDiffParser(Parser): @override def get_system_prompt(self) -> str: config = mentat.user_session.get("config") - unified_diff_parser_prompt_filename = config.ai.prompts.get("unified_diff_parser_prompt", Path("text/unified_diff_parser_prompt.txt")) + unified_diff_parser_prompt_filename = config.ai.prompts.get( + "unified_diff_parser_prompt", Path("text/unified_diff_parser_prompt.txt") + ) return read_prompt(unified_diff_parser_prompt_filename) @override diff --git a/mentat/python_client/client.py b/mentat/python_client/client.py index cad677141..34c024a32 100644 --- a/mentat/python_client/client.py +++ b/mentat/python_client/client.py @@ -77,7 +77,7 @@ async def startup(self): self.exclude_paths, self.ignore_paths, self.diff, - self.pr_diff + self.pr_diff, ) self.session.start() self.acc_task = asyncio.create_task(self._accumulate_messages()) diff --git a/mentat/sampler/sampler.py b/mentat/sampler/sampler.py index 2ce44894b..e87726f1c 100644 --- a/mentat/sampler/sampler.py +++ b/mentat/sampler/sampler.py @@ -5,6 +5,7 @@ from git import GitCommandError, Repo # type: ignore from openai.types.chat import ChatCompletionMessageParam +import mentat from mentat.code_feature import get_consolidated_feature_refs from mentat.errors import SampleError from mentat.git_handler import get_git_diff, get_git_root_for_path, get_hexsha_active @@ -14,13 +15,19 @@ from mentat.session_context import SESSION_CONTEXT from mentat.session_input import collect_user_input from mentat.utils import get_relative_path -import mentat -def init_settings(repo:str | None = None, merge_base_target:str | None = None) -> None: - mentat.user_session.set("sampler_settings",{ - "repo" : repo, - "merge_base_target" : merge_base_target, - }) + +def init_settings( + repo: str | None = None, merge_base_target: str | None = None +) -> None: + mentat.user_session.set( + "sampler_settings", + { + "repo": repo, + "merge_base_target": merge_base_target, + }, + ) + def parse_message(message: ChatCompletionMessageParam) -> dict[str, str]: content = message.get("content") @@ -135,10 +142,13 @@ async def create_sample(self) -> Sample: else: repo = response - mentat.user_session.set("sampler_settings", { - "repo" : repo, - "merge_base_target" : sampler_config.get("merge_base_target") - }) + mentat.user_session.set( + "sampler_settings", + { + "repo": repo, + "merge_base_target": sampler_config.get("merge_base_target"), + }, + ) stream.send("Sample Title:") title = (await collect_user_input()).data.strip() or "" diff --git a/mentat/session.py b/mentat/session.py index 35bd9dc5d..bf5f4a626 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -38,7 +38,9 @@ class Session: To stop, send a message on the session_exit channel. A message will be sent on the client_exit channel when ready for client to quit. """ - _errors: List[str] = [] # pyright: ignore[reportGeneralTypeIssues] + + _errors: List[str] = [] # pyright: ignore[reportGeneralTypeIssues] + def __init__( self, cwd: Path, @@ -236,7 +238,7 @@ async def run_main(): with sentry_sdk.start_transaction( op="mentat_started", name="Mentat Started" ) as transaction: - #transaction.set_tag("config", attr.asdict(ctx.config)) + # transaction.set_tag("config", attr.asdict(ctx.config)) transaction.set_tag("config", "config") await self._main() except (SessionExit, CancelledError): diff --git a/mentat/terminal/__init__.py b/mentat/terminal/__init__.py index 6c19e6013..5a58fafc0 100644 --- a/mentat/terminal/__init__.py +++ b/mentat/terminal/__init__.py @@ -1,3 +1,4 @@ from mentat.config import load_config -#first thing we do is we init a default config + +# first thing we do is we init a default config load_config() diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 9a87cf583..42a3e23d2 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -2,15 +2,17 @@ import logging import signal from asyncio import Event +from pathlib import Path from types import FrameType -from typing import Any, Coroutine, Set, Optional +from typing import Any, Coroutine, List, Optional, Set +import click from prompt_toolkit import PromptSession -from prompt_toolkit.key_binding import KeyPressEvent +from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent +from prompt_toolkit.styles import Style import mentat from mentat.config import update_config - from mentat.sampler import sampler from mentat.session import Session from mentat.session_stream import StreamMessageSource @@ -19,13 +21,6 @@ from mentat.terminal.prompt_completer import MentatCompleter from mentat.terminal.prompt_session import MentatPromptSession -from typing import List -from pathlib import Path -import click - -from prompt_toolkit.key_binding import KeyBindings -from prompt_toolkit.styles import Style - class TerminalClient: def __init__( @@ -148,7 +143,7 @@ async def _run(self): self.exclude_paths, self.ignore_paths, self.diff, - self.pr_diff + self.pr_diff, ) self.session.start() @@ -204,17 +199,54 @@ def run(self): # Event handlers for all the buttons. + @click.command() -@click.option('-e', '--exclude-paths', multiple=True, default=[], help='List of file paths, directory paths, or glob patterns to exclude.') -@click.option('-g', '--ignore-paths', multiple=True, default=[], help='List of file paths, directory paths, or glob patterns to ignore in auto-context.') -@click.option('-d', '--diff', default=None, show_default='HEAD', help='A git tree-ish (e.g. commit, branch, tag) to diff against.') -@click.option('-p', '--pr-diff', default=None, help='A git tree-ish to diff against the latest common ancestor of.') -@click.option('--cwd', default=str(Path.cwd()), help='The current working directory.') -@click.option('--model', default=None, help='The Model to use.') -@click.option('--temperature', default=None, help='The Model Temperature to use.') -@click.option('--maximum-context', default=None, help='The Maximum Context') -@click.argument('paths', nargs=-1, required=True) -def start(paths: list[str], exclude_paths: list[str], ignore_paths: list[str], diff: Optional[str], pr_diff: Optional[str], cwd: Optional[str], model: Optional[str], temperature: Optional[float], maximum_context: Optional[int]) -> None: +@click.option( + "-e", + "--exclude-paths", + multiple=True, + default=[], + help="List of file paths, directory paths, or glob patterns to exclude.", +) +@click.option( + "-g", + "--ignore-paths", + multiple=True, + default=[], + help=( + "List of file paths, directory paths, or glob patterns to ignore in" + " auto-context." + ), +) +@click.option( + "-d", + "--diff", + default=None, + show_default="HEAD", + help="A git tree-ish (e.g. commit, branch, tag) to diff against.", +) +@click.option( + "-p", + "--pr-diff", + default=None, + help="A git tree-ish to diff against the latest common ancestor of.", +) +@click.option("--cwd", default=str(Path.cwd()), help="The current working directory.") +@click.option("--model", default=None, help="The Model to use.") +@click.option("--temperature", default=None, help="The Model Temperature to use.") +@click.option("--maximum-context", default=None, help="The Maximum Context") +@click.argument("paths", nargs=-1, required=True) +def start( + paths: list[str], + exclude_paths: list[str], + ignore_paths: list[str], + diff: Optional[str], + pr_diff: Optional[str], + cwd: Optional[str], + model: Optional[str], + temperature: Optional[float], + maximum_context: Optional[int], +) -> None: sampler.init_settings() @@ -230,12 +262,7 @@ def start(paths: list[str], exclude_paths: list[str], ignore_paths: list[str], d current_working_directory = Path(cwd).expanduser().resolve() terminal_client = TerminalClient( - current_working_directory, - paths, - exclude_paths, - ignore_paths, - diff, - pr_diff + current_working_directory, paths, exclude_paths, ignore_paths, diff, pr_diff ) terminal_client.run() diff --git a/mentat/user_session.py b/mentat/user_session.py index 6d919a6f1..97388ced8 100644 --- a/mentat/user_session.py +++ b/mentat/user_session.py @@ -1,14 +1,15 @@ -from typing import Dict, Any +from typing import Any, Dict user_session_store: Dict[str, Any] = {} + class UserSession: """ Developer facing user session class. Useful for the developer to store user specific data between calls. """ - def get(self, key: str, default: Any=None) -> Any: + def get(self, key: str, default: Any = None) -> Any: return user_session_store.get(key, default) def set(self, key: str, value: Any) -> None: diff --git a/mentat/utils.py b/mentat/utils.py index 530f5da22..dd3413730 100644 --- a/mentat/utils.py +++ b/mentat/utils.py @@ -3,11 +3,13 @@ import asyncio import hashlib import os +import pprint +import sys import time from importlib import resources from importlib.abc import Traversable from pathlib import Path -from typing import TYPE_CHECKING, AsyncIterator, List, Literal, Optional, Union, Any +from typing import TYPE_CHECKING, Any, AsyncIterator, List, Literal, Optional, Union import packaging.version import requests @@ -19,9 +21,6 @@ from mentat import __version__ from mentat.session_context import SESSION_CONTEXT -import pprint -import sys - if TYPE_CHECKING: from mentat.transcripts import Transcript @@ -214,6 +213,7 @@ def dd(args: Any): # Exit the program sys.exit() + def dump(args: Any): """ This method dd takes an argument args and performs the following operations: diff --git a/scripts/git_log_to_transcripts.py b/scripts/git_log_to_transcripts.py index 13f9639ca..75e592d50 100755 --- a/scripts/git_log_to_transcripts.py +++ b/scripts/git_log_to_transcripts.py @@ -159,12 +159,10 @@ async def translate_commits_to_transcripts(repo, count=10): "args": {}, "prompt": prompt, "expected_edits": llmResponse, - "edited_features": list( - { - str(f.relative_to(git_root)) - for f in bound_files(parsedLLMResponse.file_edits, padding=0) - } - ), + "edited_features": list({ + str(f.relative_to(git_root)) + for f in bound_files(parsedLLMResponse.file_edits, padding=0) + }), "selected_features": [], } try: diff --git a/tests/benchmarks/benchmarks/mentat/license_update.py b/tests/benchmarks/benchmarks/mentat/license_update.py index 7e63f2744..b356c7cd9 100644 --- a/tests/benchmarks/benchmarks/mentat/license_update.py +++ b/tests/benchmarks/benchmarks/mentat/license_update.py @@ -34,19 +34,17 @@ def verify(): import benchmark_repos.mentat.tests.license_check as license_check importlib.reload(license_check) - return set(license_check.accepted_licenses) == set( - [ - "BSD License", - "Apache Software License", - "MIT License", - "MIT", - "Mozilla Public License 2.0 (MPL 2.0)", - "Python Software Foundation License", - "Apache 2.0", - "BSD 3-Clause", - "ISC License (ISCL)", - "HPND", - ] - ) + return set(license_check.accepted_licenses) == set([ + "BSD License", + "Apache Software License", + "MIT License", + "MIT", + "Mozilla Public License 2.0 (MPL 2.0)", + "Python Software Foundation License", + "Apache 2.0", + "BSD 3-Clause", + "ISC License (ISCL)", + "HPND", + ]) except IndentationError: return False diff --git a/tests/code_context_test.py b/tests/code_context_test.py index b44bdbe1d..dcab4a942 100644 --- a/tests/code_context_test.py +++ b/tests/code_context_test.py @@ -8,7 +8,7 @@ import mentat from mentat.code_context import CodeContext -from mentat.config import RunSettings, update_config, load_config +from mentat.config import RunSettings, load_config, update_config from mentat.errors import ContextSizeInsufficient from mentat.feature_filters.default_filter import DefaultFilter from mentat.git_handler import get_non_gitignored_files @@ -82,7 +82,6 @@ async def test_config_glob_exclude(mocker, temp_testbed, mock_code_context): config.run.file_exclude_glob_list = [Path("glob_test") / "**" / "*.py"] mentat.user_session.set("config", config) - glob_exclude_path = os.path.join("glob_test", "bagel", "apple", "exclude_me.py") glob_include_path = os.path.join("glob_test", "bagel", "apple", "include_me.ts") directly_added_glob_excluded_path = Path( @@ -113,7 +112,7 @@ async def test_config_glob_exclude(mocker, temp_testbed, mock_code_context): @pytest.mark.asyncio async def test_glob_include(temp_testbed, mock_code_context): - #reset the config context + # reset the config context load_config() # Make sure glob include works diff --git a/tests/code_file_manager_test.py b/tests/code_file_manager_test.py index 9131b7330..b43324245 100644 --- a/tests/code_file_manager_test.py +++ b/tests/code_file_manager_test.py @@ -62,16 +62,14 @@ async def test_run_from_subdirectory( """Run mentat from a subdirectory of the git root""" # Change to the subdirectory os.chdir("multifile_calculator") - mock_collect_user_input.set_stream_messages( - [ - ( - "Insert the comment # Hello on the first line of" - " multifile_calculator/calculator.py and scripts/echo.py" - ), - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + ( + "Insert the comment # Hello on the first line of" + " multifile_calculator/calculator.py and scripts/echo.py" + ), + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment in both files. @@ -119,16 +117,14 @@ async def test_run_from_superdirectory( ): """Run mentat from outside the git root""" # Change to the subdirectory - mock_collect_user_input.set_stream_messages( - [ - ( - "Insert the comment # Hello on the first line of" - " multifile_calculator/calculator.py and scripts/echo.py" - ), - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + ( + "Insert the comment # Hello on the first line of" + " multifile_calculator/calculator.py and scripts/echo.py" + ), + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment in both files. @@ -175,13 +171,11 @@ async def test_change_after_creation( mock_call_llm_api, ): file_name = Path("hello_world.py") - mock_collect_user_input.set_stream_messages( - [ - "Conversation", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Conversation", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/commands_test.py b/tests/commands_test.py index 26c82424f..488b00bbc 100644 --- a/tests/commands_test.py +++ b/tests/commands_test.py @@ -31,12 +31,10 @@ async def test_commit_command(temp_testbed, mock_collect_user_input): with open(file_name, "w") as f: f.write("# Commit me!") - mock_collect_user_input.set_stream_messages( - [ - "/commit", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "/commit", + "q", + ]) session = Session(cwd=temp_testbed, paths=[]) session.start() @@ -48,12 +46,10 @@ async def test_commit_command(temp_testbed, mock_collect_user_input): # TODO: test without git @pytest.mark.asyncio async def test_include_command(temp_testbed, mock_collect_user_input): - mock_collect_user_input.set_stream_messages( - [ - "/include scripts", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "/include scripts", + "q", + ]) session = Session(cwd=temp_testbed) session.start() @@ -68,12 +64,10 @@ async def test_include_command(temp_testbed, mock_collect_user_input): # TODO: test without git @pytest.mark.asyncio async def test_exclude_command(temp_testbed, mock_collect_user_input): - mock_collect_user_input.set_stream_messages( - [ - "/exclude scripts", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "/exclude scripts", + "q", + ]) session = Session(cwd=temp_testbed, paths=["scripts"]) session.start() @@ -91,14 +85,12 @@ async def test_undo_command(temp_testbed, mock_collect_user_input, mock_call_llm # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "Edit the file", - "y", - "/undo", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Edit the file", + "y", + "/undo", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -134,15 +126,13 @@ async def test_redo_command(temp_testbed, mock_collect_user_input, mock_call_llm # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "Edit the file", - "y", - "/undo", - "/redo", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Edit the file", + "y", + "/undo", + "/redo", + "q", + ]) new_file_name = "new_temp.py" mock_call_llm_api.set_streamed_values([dedent(f"""\ @@ -197,14 +187,12 @@ async def test_undo_all_command( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "", - "y", - "/undo-all", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "", + "y", + "/undo-all", + "q", + ]) # TODO: Make a way to set multiple return values for call_llm_api and reset multiple edits at once mock_call_llm_api.set_streamed_values([dedent(f"""\ @@ -235,13 +223,11 @@ async def test_undo_all_command( @pytest.mark.asyncio async def test_clear_command(temp_testbed, mock_collect_user_input, mock_call_llm_api): - mock_collect_user_input.set_stream_messages( - [ - "Request", - "/clear", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Request", + "/clear", + "q", + ]) mock_call_llm_api.set_streamed_values(["Answer"]) session = Session(cwd=Path.cwd()) @@ -257,13 +243,11 @@ async def test_clear_command(temp_testbed, mock_collect_user_input, mock_call_ll async def test_search_command( mocker, temp_testbed, mock_call_llm_api, mock_collect_user_input ): - mock_collect_user_input.set_stream_messages( - [ - "Request", - "/search Query", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Request", + "/search Query", + "q", + ]) mock_call_llm_api.set_streamed_values(["Answer"]) mock_feature = CodeFeature( Path(temp_testbed) / "multifile_calculator" / "calculator.py" diff --git a/tests/config_test.py b/tests/config_test.py index 041bb2233..8f9800a1c 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,36 +1,34 @@ import argparse +import os +from io import StringIO from pathlib import Path from textwrap import dedent +from unittest.mock import MagicMock, patch import pytest +import yaml +from yaml import dump import mentat.config +from mentat import config from mentat.config import update_config from mentat.parsers.replacement_parser import ReplacementParser -from pathlib import Path -import pytest -import yaml -from mentat import config -from unittest.mock import patch -from unittest.mock import MagicMock -from io import StringIO -from yaml import dump -import os - from mentat.utils import dd @pytest.fixture def mock_open(mocker): - mock_open = mocker.patch('builtins.open', new_callable=MagicMock) + mock_open = mocker.patch("builtins.open", new_callable=MagicMock) return mock_open + @pytest.mark.asyncio async def test_load_yaml(mock_open): - data = {'test_key': 'test_value'} + data = {"test_key": "test_value"} mock_open.return_value.__enter__.return_value = StringIO(yaml.dump(data)) - assert config.load_yaml('test_path') == data - mock_open.assert_called_with('test_path', 'r') + assert config.load_yaml("test_path") == data + mock_open.assert_called_with("test_path", "r") + @pytest.mark.asyncio async def test_default_config(): @@ -43,9 +41,4 @@ async def test_default_config(): assert config.run.auto_tokens == 8000 assert config.run.auto_context == False - assert config.parser.parser_type == 'block' - - - - - + assert config.parser.parser_type == "block" diff --git a/tests/conftest.py b/tests/conftest.py index 485f0e2fa..cffcbc29b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,11 +16,10 @@ from openai.types.chat.chat_completion_chunk import Choice as AsyncChoice from openai.types.chat.chat_completion_chunk import ChoiceDelta - from mentat.config import load_config from mentat.parsers.block_parser import BlockParser -#first thing we do is we init a default config +# first thing we do is we init a default config load_config() import mentat @@ -28,7 +27,7 @@ from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_file_manager import CodeFileManager -from mentat.config import config_file_name, MentatConfig +from mentat.config import MentatConfig, config_file_name from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.git_handler import get_git_root_for_path @@ -268,20 +267,19 @@ def mock_session_context(temp_testbed): set by a Session if the test creates a Session. If you create a Session or Client in your test, do NOT use this SessionContext! """ - #reset the config context + # reset the config context load_config() - #autoset some settings to conform to tests + # autoset some settings to conform to tests config = mentat.user_session.get("config") config.root = temp_testbed config.run.file_exclude_glob_list = [] config.ai.maximum_context = 16000 - config.ai.load_prompts('text') + config.ai.load_prompts("text") config.parser.parser_type = "block" config.parser.parser = BlockParser() mentat.user_session.set("config", config) - git_root = get_git_root_for_path(temp_testbed, raise_error=False) stream = SessionStream() diff --git a/tests/embeddings_test.py b/tests/embeddings_test.py index 12bb99180..c1401e5ff 100644 --- a/tests/embeddings_test.py +++ b/tests/embeddings_test.py @@ -19,14 +19,12 @@ async def test_get_feature_similarity_scores(mocker, mock_call_embedding_api): _make_code_feature(Path(f"file{i}.txt").resolve(), f"File {i}") for i in range(3) ] - mock_call_embedding_api.set_embedding_values( - [ - [0.7, 0.7, 0.7], # The prompt - [0.4, 0.4, 0.4], - [0.5, 0.6, 0.7], - [0.69, 0.7, 0.71], - ] - ) + mock_call_embedding_api.set_embedding_values([ + [0.7, 0.7, 0.7], # The prompt + [0.4, 0.4, 0.4], + [0.5, 0.6, 0.7], + [0.69, 0.7, 0.71], + ]) result = await get_feature_similarity_scores(prompt, features) assert len(result) == 3 assert max(result) == result[0] # The first feature is most similar diff --git a/tests/llm_api_handler_test.py b/tests/llm_api_handler_test.py index 31fd1d691..fcbe8137f 100644 --- a/tests/llm_api_handler_test.py +++ b/tests/llm_api_handler_test.py @@ -23,11 +23,9 @@ def test_prompt_tokens(): img_base64 = base64.b64encode(buffer.getvalue()).decode() image_url = f"data:image/png;base64,{img_base64}" - messages.append( - { - "role": "user", - "content": [{"type": "image_url", "image_url": {"url": image_url}}], - } - ) + messages.append({ + "role": "user", + "content": [{"type": "image_url", "image_url": {"url": image_url}}], + }) assert prompt_tokens(messages, model) == 24 + 6 * 170 + 85 + 5 diff --git a/tests/parser_tests/block_format_error_test.py b/tests/parser_tests/block_format_error_test.py index 2e77e3cf9..a45366159 100644 --- a/tests/parser_tests/block_format_error_test.py +++ b/tests/parser_tests/block_format_error_test.py @@ -54,13 +54,11 @@ async def error_test_template( with open(temp_file_name, "w") as f: f.write("") - mock_collect_user_input.set_stream_messages( - [ - "Go!", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Go!", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([changes]) session = Session(cwd=Path.cwd(), paths=[temp_file_name]) diff --git a/tests/parser_tests/block_format_test.py b/tests/parser_tests/block_format_test.py index 96f2e8cbc..cec5bc853 100644 --- a/tests/parser_tests/block_format_test.py +++ b/tests/parser_tests/block_format_test.py @@ -24,13 +24,11 @@ async def test_insert(mock_call_llm_api, mock_collect_user_input, block_parser): # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment between both lines. @@ -72,13 +70,11 @@ async def test_replace(mock_call_llm_api, mock_collect_user_input, block_parser) # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will replace both lines with one comment @@ -120,13 +116,11 @@ async def test_delete(mock_call_llm_api, mock_collect_user_input, block_parser): # with 4 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will delete the middle two lines @@ -160,13 +154,11 @@ async def test_delete(mock_call_llm_api, mock_collect_user_input, block_parser): async def test_create_file(mock_call_llm_api, mock_collect_user_input, block_parser): # Create a temporary file temp_file_name = "new_dir/temp.py" - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will create a new file called temp.py @@ -201,14 +193,12 @@ async def test_delete_file(mock_call_llm_api, mock_collect_user_input, block_par with open(temp_file_name, "w") as f: f.write("# I am not long for this world") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will delete the file @@ -239,13 +229,11 @@ async def test_rename_file(mock_call_llm_api, mock_collect_user_input, block_par with open(temp_file_name, "w") as f: f.write("# Move me!") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ I will rename the file @@ -280,13 +268,11 @@ async def test_change_then_rename_file( with open(temp_file_name, "w") as f: f.write("# Move me!") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ I will insert a comment then rename the file @@ -333,13 +319,11 @@ async def test_rename_file_then_change( with open(temp_file_name, "w") as f: f.write("# Move me!") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ I will rename the file then insert a comment @@ -389,13 +373,11 @@ async def test_multiple_blocks( # just for # good measure""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment between the first two lines @@ -455,13 +437,11 @@ async def test_json_strings(mock_call_llm_api, mock_collect_user_input, block_pa f.write(dedent("""\ # This is a temporary file""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment at the start. diff --git a/tests/parser_tests/replacement_format_error_test.py b/tests/parser_tests/replacement_format_error_test.py index 41098f1fa..75c42d648 100644 --- a/tests/parser_tests/replacement_format_error_test.py +++ b/tests/parser_tests/replacement_format_error_test.py @@ -20,25 +20,23 @@ async def test_invalid_line_numbers( mock_call_llm_api, mock_collect_user_input, ): - temp_file_name ="temp.py" + temp_file_name = "temp.py" temp_file_location = Path.cwd() / temp_file_name config = mentat.user_session.get("config") config.parser.parser = ReplacementParser() - mentat.user_session.set('config', config) + mentat.user_session.set("config", config) with open(temp_file_location, "w") as f: f.write(dedent("""\ # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -72,7 +70,7 @@ async def test_invalid_special_line( ): config = mentat.user_session.get("config") config.parser.parser = ReplacementParser() - mentat.user_session.set('config', config) + mentat.user_session.set("config", config) temp_file_name = "temp.py" with open(temp_file_name, "w") as f: @@ -80,13 +78,11 @@ async def test_invalid_special_line( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/parser_tests/replacement_format_test.py b/tests/parser_tests/replacement_format_test.py index f84d2d76e..6bbb47eda 100644 --- a/tests/parser_tests/replacement_format_test.py +++ b/tests/parser_tests/replacement_format_test.py @@ -14,8 +14,7 @@ def replacement_parser(): config = mentat.user_session.get("config") config.parser.parser = ReplacementParser() - mentat.user_session.set('config', config) - + mentat.user_session.set("config", config) @pytest.mark.asyncio @@ -26,13 +25,11 @@ async def test_insert(mock_call_llm_api, mock_collect_user_input, replacement_pa # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -60,13 +57,11 @@ async def test_delete(mock_call_llm_api, mock_collect_user_input, replacement_pa # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -91,13 +86,11 @@ async def test_replace(mock_call_llm_api, mock_collect_user_input, replacement_p # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -121,13 +114,11 @@ async def test_create_file( mock_call_llm_api, mock_collect_user_input, replacement_parser ): temp_file_name = "temp.py" - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -156,14 +147,12 @@ async def test_delete_file( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -186,13 +175,11 @@ async def test_rename_file( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -221,13 +208,11 @@ async def test_change_then_rename_then_change( # This is a temporary file # with 2 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/parser_tests/unified_diff_format_error_test.py b/tests/parser_tests/unified_diff_format_error_test.py index 82e02ee89..e9714f23f 100644 --- a/tests/parser_tests/unified_diff_format_error_test.py +++ b/tests/parser_tests/unified_diff_format_error_test.py @@ -26,13 +26,11 @@ async def test_not_matching( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -72,13 +70,11 @@ async def test_no_prefix( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/parser_tests/unified_diff_format_test.py b/tests/parser_tests/unified_diff_format_test.py index 9ea411514..29128b4e9 100644 --- a/tests/parser_tests/unified_diff_format_test.py +++ b/tests/parser_tests/unified_diff_format_test.py @@ -14,8 +14,7 @@ def unified_diff_parser(): config = mentat.user_session.get("config") config.parser.parser = UnifiedDiffParser() - mentat.user_session.set('config', config) - + mentat.user_session.set("config", config) @pytest.mark.asyncio @@ -32,13 +31,11 @@ async def test_replacement( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -82,13 +79,11 @@ async def test_multiple_replacements( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -142,13 +137,11 @@ async def test_multiple_replacement_spots( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -197,13 +190,11 @@ async def test_little_context_addition( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -245,13 +236,11 @@ async def test_empty_file( with open(temp_file_name, "w") as f: f.write("") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -281,13 +270,11 @@ async def test_creation( ): temp_file_name = Path("temp.py") - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -325,14 +312,12 @@ async def test_deletion( # 8 # lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -359,13 +344,11 @@ async def test_no_ending_marker( # with # 4 lines""")) - mock_collect_user_input.set_stream_messages( - [ - "test", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "test", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation diff --git a/tests/record_benchmark.py b/tests/record_benchmark.py index 051d1cfc9..16f275684 100644 --- a/tests/record_benchmark.py +++ b/tests/record_benchmark.py @@ -26,14 +26,12 @@ def main(run=False, threshold=0.75, count=1): if run: for _ in range(count): os.environ["MENTAT_BENCHMARKS_RUNNING"] = "true" - pytest.main( - [ - benchmark_location, - "--benchmark", - "--report-log", - benchmark_log_location, - ] - ) + pytest.main([ + benchmark_location, + "--benchmark", + "--report-log", + benchmark_log_location, + ]) os.environ["MENTAT_BENCHMARKS_RUNNING"] = "false" print() nodes = [] diff --git a/tests/sampler_test.py b/tests/sampler_test.py index d46dc2ef1..c7aeee713 100644 --- a/tests/sampler_test.py +++ b/tests/sampler_test.py @@ -36,7 +36,7 @@ async def test_sample_from_context( mock_session_context, mock_collect_user_input, ): - init_settings(repo="test_sample_repo",merge_base_target="") + init_settings(repo="test_sample_repo", merge_base_target="") mocker.patch( "mentat.conversation.Conversation.get_messages", @@ -63,14 +63,12 @@ async def test_sample_from_context( with open("test_file.py", "w") as f: f.write("test_file_content\n") - mock_collect_user_input.set_stream_messages( - [ - "", - "test_title", - "test_description", - "test_test_command", - ] - ) + mock_collect_user_input.set_stream_messages([ + "", + "test_title", + "test_description", + "test_test_command", + ]) sampler = Sampler() sample = await sampler.create_sample() assert sample.title == "test_title" @@ -102,19 +100,17 @@ def is_sha1(string: str) -> bool: async def test_sample_command(temp_testbed, mock_collect_user_input, mock_call_llm_api): init_settings(repo=None) - mock_collect_user_input.set_stream_messages( - [ - "Request", - "y", - f"/sample {temp_testbed.as_posix()}", - "", - "test_url", - "test_title", - "test_description", - "test_test_command", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Request", + "y", + f"/sample {temp_testbed.as_posix()}", + "", + "test_url", + "test_title", + "test_description", + "test_test_command", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will insert a comment in both files. diff --git a/tests/system_test.py b/tests/system_test.py index 353252b7f..55f1e50c7 100644 --- a/tests/system_test.py +++ b/tests/system_test.py @@ -15,14 +15,12 @@ async def test_system(mock_call_llm_api, mock_collect_user_input): with open(temp_file_name, "w") as f: f.write("# This is a temporary file.") - mock_collect_user_input.set_stream_messages( - [ - "Add changes to the file", - "i", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Add changes to the file", + "i", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will add a print statement. @@ -70,16 +68,14 @@ async def test_interactive_change_selection(mock_call_llm_api, mock_collect_user with open(temp_file_name, "w") as f: f.write("# This is a temporary file for interactive test.") - mock_collect_user_input.set_stream_messages( - [ - "Add changes to the file", - "i", - "y", - "n", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Add changes to the file", + "i", + "y", + "n", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent("""\ I will make three changes to the file. @@ -179,13 +175,11 @@ async def test_sub_directory( with monkeypatch.context() as m: m.chdir("scripts") file_name = "calculator.py" - mock_collect_user_input.set_stream_messages( - [ - "Add changes to the file", - "y", - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "Add changes to the file", + "y", + "q", + ]) mock_call_llm_api.set_streamed_values([dedent(f"""\ Conversation @@ -237,11 +231,9 @@ async def test_recursive_git_repositories(temp_testbed, mock_collect_user_input) f.write("") files.append(temp_testbed / file_path) - mock_collect_user_input.set_stream_messages( - [ - "q", - ] - ) + mock_collect_user_input.set_stream_messages([ + "q", + ]) session = Session(cwd=temp_testbed, paths=[Path(".")]) session.start() From 352b64e7ec8847f08b0ce1185e746b4e0f2dea5d Mon Sep 17 00:00:00 2001 From: Greg L Date: Fri, 29 Dec 2023 17:52:04 -0500 Subject: [PATCH 21/24] Adjustments for Ruff --- mentat/__init__.py | 6 ++++++ mentat/config.py | 12 ++++++++---- tests/code_context_test.py | 3 +-- tests/commands_test.py | 1 - tests/config_test.py | 12 ++---------- tests/conftest.py | 9 ++------- tests/feature_filters/llm_feature_filter_test.py | 2 -- tests/parser_tests/replacement_format_error_test.py | 1 - tests/parser_tests/replacement_format_test.py | 1 - tests/parser_tests/unified_diff_format_test.py | 2 -- tests/sampler_test.py | 1 - 11 files changed, 19 insertions(+), 31 deletions(-) diff --git a/mentat/__init__.py b/mentat/__init__.py index cf1f4ccd9..025f957d4 100644 --- a/mentat/__init__.py +++ b/mentat/__init__.py @@ -11,3 +11,9 @@ def __dir__(): # Make sure to bump this on Release x.y.z PR's! __version__ = "1.0.7" + + +# the very first thing we need to do is load_config so we don't have an empty object while booting. +from mentat.config import load_config # noqa: E402 + +load_config() diff --git a/mentat/config.py b/mentat/config.py index 4b77f8ccc..dbc599b4a 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -241,17 +241,21 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): if user_conf_path.exists(): data = load_yaml(str(user_conf_path)) - yaml_config = yaml_config.from_dict( + # fmt: off + yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] kvs=data, infer_missing=True - ) # pyright: ignore[reportUnknownMemberType] + ) + # fmt: on if git_root is not None: git_conf_path = Path(git_root) / ".mentatconf.yaml" if git_conf_path.exists(): data = load_yaml(str(git_conf_path)) - yaml_config = yaml_config.from_dict( + # fmt: off + yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] kvs=data, infer_missing=True - ) # pyright: ignore[reportUnknownMemberType] + ) + # fmt: on # safety checks for missing values if yaml_config.file_exclude_glob_list is None: diff --git a/tests/code_context_test.py b/tests/code_context_test.py index dcab4a942..dc939e0b9 100644 --- a/tests/code_context_test.py +++ b/tests/code_context_test.py @@ -8,14 +8,13 @@ import mentat from mentat.code_context import CodeContext -from mentat.config import RunSettings, load_config, update_config +from mentat.config import load_config from mentat.errors import ContextSizeInsufficient from mentat.feature_filters.default_filter import DefaultFilter from mentat.git_handler import get_non_gitignored_files from mentat.include_files import is_file_text_encoded from mentat.interval import Interval from mentat.llm_api_handler import count_tokens -from mentat.utils import dd from tests.conftest import run_git_command diff --git a/tests/commands_test.py b/tests/commands_test.py index 488b00bbc..46d1ca652 100644 --- a/tests/commands_test.py +++ b/tests/commands_test.py @@ -11,7 +11,6 @@ from mentat.command.commands.help import HelpCommand from mentat.session import Session from mentat.session_context import SESSION_CONTEXT -from mentat.utils import dd def test_invalid_command(): diff --git a/tests/config_test.py b/tests/config_test.py index 8f9800a1c..982be3ec6 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,19 +1,11 @@ -import argparse -import os from io import StringIO -from pathlib import Path -from textwrap import dedent -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock import pytest import yaml -from yaml import dump import mentat.config from mentat import config -from mentat.config import update_config -from mentat.parsers.replacement_parser import ReplacementParser -from mentat.utils import dd @pytest.fixture @@ -39,6 +31,6 @@ async def test_default_config(): assert config.ai.maximum_context == 16000 assert config.run.auto_tokens == 8000 - assert config.run.auto_context == False + assert config.run.auto_context is False assert config.parser.parser_type == "block" diff --git a/tests/conftest.py b/tests/conftest.py index cffcbc29b..c8a036128 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,22 +16,17 @@ from openai.types.chat.chat_completion_chunk import Choice as AsyncChoice from openai.types.chat.chat_completion_chunk import ChoiceDelta -from mentat.config import load_config -from mentat.parsers.block_parser import BlockParser - -# first thing we do is we init a default config -load_config() - import mentat from mentat.agent_handler import AgentHandler from mentat.auto_completer import AutoCompleter from mentat.code_context import CodeContext from mentat.code_file_manager import CodeFileManager -from mentat.config import MentatConfig, config_file_name +from mentat.config import config_file_name, load_config from mentat.conversation import Conversation from mentat.cost_tracker import CostTracker from mentat.git_handler import get_git_root_for_path from mentat.llm_api_handler import LlmApiHandler +from mentat.parsers.block_parser import BlockParser from mentat.sampler.sampler import Sampler from mentat.session_context import SESSION_CONTEXT, SessionContext from mentat.session_stream import SessionStream, StreamMessage, StreamMessageSource diff --git a/tests/feature_filters/llm_feature_filter_test.py b/tests/feature_filters/llm_feature_filter_test.py index 5b209f790..db6501912 100644 --- a/tests/feature_filters/llm_feature_filter_test.py +++ b/tests/feature_filters/llm_feature_filter_test.py @@ -1,9 +1,7 @@ import pytest -import mentat from mentat.code_feature import CodeFeature from mentat.feature_filters.llm_feature_filter import LLMFeatureFilter -from mentat.utils import dd @pytest.mark.asyncio diff --git a/tests/parser_tests/replacement_format_error_test.py b/tests/parser_tests/replacement_format_error_test.py index 75c42d648..105e70ca1 100644 --- a/tests/parser_tests/replacement_format_error_test.py +++ b/tests/parser_tests/replacement_format_error_test.py @@ -7,7 +7,6 @@ from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session -from mentat.utils import dd @pytest.fixture(autouse=True) diff --git a/tests/parser_tests/replacement_format_test.py b/tests/parser_tests/replacement_format_test.py index 6bbb47eda..3fccc692b 100644 --- a/tests/parser_tests/replacement_format_test.py +++ b/tests/parser_tests/replacement_format_test.py @@ -4,7 +4,6 @@ import pytest import mentat -from mentat.config import ParserSettings from mentat.parsers.replacement_parser import ReplacementParser from mentat.session import Session from tests.parser_tests.inverse import verify_inverse diff --git a/tests/parser_tests/unified_diff_format_test.py b/tests/parser_tests/unified_diff_format_test.py index 29128b4e9..58de902b5 100644 --- a/tests/parser_tests/unified_diff_format_test.py +++ b/tests/parser_tests/unified_diff_format_test.py @@ -4,10 +4,8 @@ import pytest import mentat -from mentat.config import ParserSettings from mentat.parsers.unified_diff_parser import UnifiedDiffParser from mentat.session import Session -from mentat.utils import dd @pytest.fixture(autouse=True) diff --git a/tests/sampler_test.py b/tests/sampler_test.py index c7aeee713..f2930f174 100644 --- a/tests/sampler_test.py +++ b/tests/sampler_test.py @@ -20,7 +20,6 @@ from mentat.sampler.sampler import Sampler, init_settings from mentat.sampler.utils import get_active_snapshot_commit from mentat.session import Session -from mentat.utils import dd from scripts.evaluate_samples import evaluate_sample From a288ba7946ff03894f106132ae26bb086c47d05f Mon Sep 17 00:00:00 2001 From: Greg L Date: Fri, 29 Dec 2023 18:10:41 -0500 Subject: [PATCH 22/24] Update the message in session stream and refactor config The main update to 'session.py' involves changing the message content sent via self.stream and altering its color. In 'config.py', the significant addition is a new function 'load_model' that is tasked with checking if a model is known and setting the value of maximum context appropriately. Also, code formatting has been improved in a few places to maintain readability. --- mentat/config.py | 16 ++++++++++++---- mentat/session.py | 14 +++++++++++++- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/mentat/config.py b/mentat/config.py index dbc599b4a..148344c9f 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -12,6 +12,7 @@ import mentat from mentat import user_session from mentat.git_handler import get_git_root_for_path +from mentat.llm_api_handler import known_models from mentat.parsers.block_parser import BlockParser from mentat.parsers.replacement_parser import ReplacementParser from mentat.parsers.unified_diff_parser import UnifiedDiffParser @@ -76,7 +77,7 @@ def __init__( no_parser_prompt: Optional[bool] = False, ): if model is not None: - self.model = model + self.load_model(model) if feature_selection_model is not None: self.feature_selection_model = feature_selection_model if embedding_model is not None: @@ -92,6 +93,13 @@ def __init__( if no_parser_prompt is not None: self.no_parser_prompt = no_parser_prompt + def load_model(self, model: str) -> None: + self.model = model + known_model = known_models.get(model) + if known_model is not None: + if hasattr(known_model, "context_size"): + self.maximum_context = int(known_model.context_size) + def load_prompts(self, prompt_type: str) -> None: prompts_type = { "markdown": { @@ -242,7 +250,7 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): if user_conf_path.exists(): data = load_yaml(str(user_conf_path)) # fmt: off - yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] + yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] kvs=data, infer_missing=True ) # fmt: on @@ -252,7 +260,7 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): if git_conf_path.exists(): data = load_yaml(str(git_conf_path)) # fmt: off - yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] + yaml_config = yaml_config.from_dict( # pyright: ignore[reportUnknownMemberType] kvs=data, infer_missing=True ) # fmt: on @@ -333,7 +341,7 @@ def update_config(setting: str, value: str | float | int) -> None: try: if setting == "model": - config.ai.model = value + config.ai.load_model(value) elif setting == "temperature": config.ai.temperature = float(value) elif setting == "format": diff --git a/mentat/session.py b/mentat/session.py index bf5f4a626..02126239a 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -231,7 +231,19 @@ def start(self): the main loop which runs until an Exception or session_exit signal is encountered. """ - self.stream.send("ABC", color="red") + self.stream.send( + """ +███╗ ███╗███████╗███╗ ██╗████████╗ █████╗ ████████╗ +████╗ ████║██╔════╝████╗ ██║╚══██╔══╝██╔══██╗╚══██╔══╝ +██╔████╔██║█████╗ ██╔██╗ ██║ ██║ ███████║ ██║ +██║╚██╔╝██║██╔══╝ ██║╚██╗██║ ██║ ██╔══██║ ██║ +██║ ╚═╝ ██║███████╗██║ ╚████║ ██║ ██║ ██║ ██║ +╚═╝ ╚═╝╚══════╝╚═╝ ╚═══╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ +------------------------------------------------------- + It is by will alone I set my mind in motion + """, + color="purple", + ) async def run_main(): try: From 19d3bf40a15fd9bb3cec38d5ef3967fcf89dc81e Mon Sep 17 00:00:00 2001 From: Greg L Date: Fri, 29 Dec 2023 18:13:49 -0500 Subject: [PATCH 23/24] Update command in lint and test workflow The change ensures that the mentat command in the lint and test workflow runs in the correct directory. The previous command was not specifying the directory which may have caused some unexpected behaviors. --- .github/workflows/lint_and_test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/lint_and_test.yml b/.github/workflows/lint_and_test.yml index 1371d4660..d4c8dfb8f 100644 --- a/.github/workflows/lint_and_test.yml +++ b/.github/workflows/lint_and_test.yml @@ -60,7 +60,7 @@ jobs: # Ensure that python doesn't import local mentat folder and that 'mentat' command calls mentat instead of switching folders. working-directory: ./testbed run: | - poetry run mentat + poetry run mentat . license-check: runs-on: ubuntu-latest From efdb45b704cb9e4b1261a47aa22a7e5ec4fcb25b Mon Sep 17 00:00:00 2001 From: Greg L Date: Sat, 30 Dec 2023 18:40:01 -0500 Subject: [PATCH 24/24] Added ability to disable and enable plugins / Commands based on YAML. --- mentat/config.py | 23 ++++++++++++++++++++++ mentat/resources/conf/.mentatconf.yaml | 4 ++++ mentat/sampler/sampler.py | 27 ++++++++++++++------------ mentat/session.py | 2 +- mentat/terminal/client.py | 3 --- tests/sampler_test.py | 24 ++++++++++++++++++----- 6 files changed, 62 insertions(+), 21 deletions(-) diff --git a/mentat/config.py b/mentat/config.py index 148344c9f..7e84c0f06 100644 --- a/mentat/config.py +++ b/mentat/config.py @@ -36,16 +36,20 @@ class RunSettings(DataClassJsonMixin): auto_context: bool = False auto_tokens: int = 8000 auto_context_tokens: int = 0 + active_plugins: List[str] = field(default_factory=list) def __init__( self, file_exclude_glob_list: Optional[List[Path]] = None, + active_plugins: Optional[List[str]] = None, auto_context: Optional[bool] = None, auto_tokens: Optional[int] = None, auto_context_tokens: Optional[int] = None, ) -> None: if file_exclude_glob_list is not None: self.file_exclude_glob_list = file_exclude_glob_list + if active_plugins is not None: + self.active_plugins = active_plugins if auto_context is not None: self.auto_context = auto_context if auto_tokens is not None: @@ -200,6 +204,7 @@ class RunningSessionConfig(DataClassJsonMixin): ) maximum_context: Optional[int] = None auto_context_tokens: Optional[int] = 0 + active_plugins: Optional[List[str]] = None @classmethod def get_fields(cls) -> List[str]: @@ -269,6 +274,9 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): if yaml_config.file_exclude_glob_list is None: yaml_config.file_exclude_glob_list = [] + if yaml_config.active_plugins is None: + yaml_config.active_plugins = [] + if yaml_config.temperature is None: yaml_config.temperature = 0.2 @@ -296,6 +304,7 @@ def load_settings(config_session: Optional[RunningSessionConfig] = None): file_exclude_glob_list=[ Path(p) for p in file_exclude_glob_list ], # pyright: ignore[reportUnknownVariableType] + active_plugins=yaml_config.active_plugins, auto_context_tokens=yaml_config.auto_context_tokens, ) @@ -379,3 +388,17 @@ def get_config(setting: str) -> None: def load_config() -> None: init_config() load_settings() + + +def is_active_plugin(plugin: str | None = None) -> bool: + config = mentat.user_session.get("config") + if ( + plugin is not None + and config is not None + and config.run is not None + and config.run.active_plugins is not None + and plugin in config.run.active_plugins + ): + return True + + return False diff --git a/mentat/resources/conf/.mentatconf.yaml b/mentat/resources/conf/.mentatconf.yaml index 497a78677..5c532c080 100644 --- a/mentat/resources/conf/.mentatconf.yaml +++ b/mentat/resources/conf/.mentatconf.yaml @@ -22,6 +22,10 @@ file_exclude_glob_list: # - "**/.*/**" auto_context_tokens: +# a list of plugins that should be active. Current options include sampler +active_plugins: + - sampler + #settings related to the "parser" # Mentat parses files following a specific format, which you can set here. diff --git a/mentat/sampler/sampler.py b/mentat/sampler/sampler.py index e87726f1c..6c3b844a4 100644 --- a/mentat/sampler/sampler.py +++ b/mentat/sampler/sampler.py @@ -7,6 +7,7 @@ import mentat from mentat.code_feature import get_consolidated_feature_refs +from mentat.config import is_active_plugin from mentat.errors import SampleError from mentat.git_handler import get_git_diff, get_git_root_for_path, get_hexsha_active from mentat.parsers.git_parser import GitParser @@ -17,18 +18,6 @@ from mentat.utils import get_relative_path -def init_settings( - repo: str | None = None, merge_base_target: str | None = None -) -> None: - mentat.user_session.set( - "sampler_settings", - { - "repo": repo, - "merge_base_target": merge_base_target, - }, - ) - - def parse_message(message: ChatCompletionMessageParam) -> dict[str, str]: content = message.get("content") text, code = "", "" @@ -58,15 +47,29 @@ def parse_message(message: ChatCompletionMessageParam) -> dict[str, str]: class Sampler: + is_active: bool = False diff_active: str | None = None commit_active: str | None = None last_sample_id: str | None = None last_sample_hexsha: str | None = None + # set up the base config settings that sampler will use. + def __init__(self): + self.is_active = is_active_plugin("sampler") + if not mentat.user_session.get("sampler_settings"): + mentat.user_session.set( + "sampler_settings", + { + "repo": None, + "merge_base_target": None, + }, + ) + def set_active_diff(self): # Create a temporary commit with the active changes ctx = SESSION_CONTEXT.get() git_root = get_git_root_for_path(ctx.cwd, raise_error=False) + if not git_root: return repo = Repo(git_root) diff --git a/mentat/session.py b/mentat/session.py index 02126239a..bb7a3e247 100644 --- a/mentat/session.py +++ b/mentat/session.py @@ -182,7 +182,7 @@ async def _main(self): for file_edit in file_edits: file_edit.resolve_conflicts() - if session_context.sampler: + if session_context.sampler and session_context.sampler.is_active: session_context.sampler.set_active_diff() applied_edits = await code_file_manager.write_changes_to_files( diff --git a/mentat/terminal/client.py b/mentat/terminal/client.py index 42a3e23d2..6cae1b9fa 100644 --- a/mentat/terminal/client.py +++ b/mentat/terminal/client.py @@ -13,7 +13,6 @@ import mentat from mentat.config import update_config -from mentat.sampler import sampler from mentat.session import Session from mentat.session_stream import StreamMessageSource from mentat.terminal.loading import LoadingHandler @@ -248,8 +247,6 @@ def start( maximum_context: Optional[int], ) -> None: - sampler.init_settings() - if model is not None: update_config("model", model) if temperature is not None: diff --git a/tests/sampler_test.py b/tests/sampler_test.py index f2930f174..804f694d3 100644 --- a/tests/sampler_test.py +++ b/tests/sampler_test.py @@ -10,6 +10,7 @@ ChatCompletionUserMessageParam, ) +import mentat from mentat.errors import SampleError from mentat.git_handler import get_git_diff from mentat.parsers.block_parser import BlockParser @@ -17,7 +18,7 @@ from mentat.python_client.client import PythonClient from mentat.sampler import __version__ from mentat.sampler.sample import Sample -from mentat.sampler.sampler import Sampler, init_settings +from mentat.sampler.sampler import Sampler from mentat.sampler.utils import get_active_snapshot_commit from mentat.session import Session from scripts.evaluate_samples import evaluate_sample @@ -35,7 +36,13 @@ async def test_sample_from_context( mock_session_context, mock_collect_user_input, ): - init_settings(repo="test_sample_repo", merge_base_target="") + mentat.user_session.set( + "sampler_settings", + { + "repo": "test_sample_repo", + "merge_base_target": "", + }, + ) mocker.patch( "mentat.conversation.Conversation.get_messages", @@ -97,7 +104,13 @@ def is_sha1(string: str) -> bool: @pytest.mark.asyncio async def test_sample_command(temp_testbed, mock_collect_user_input, mock_call_llm_api): - init_settings(repo=None) + mentat.user_session.set( + "sampler_settings", + { + "repo": None, + "merge_base_target": None, + }, + ) mock_collect_user_input.set_stream_messages([ "Request", @@ -325,8 +338,9 @@ def get_updates_as_parsed_llm_message(cwd): async def test_sampler_integration( temp_testbed, mock_session_context, mock_call_llm_api ): - init_settings(repo=None) - # Setup the environemnt + mentat.user_session.set("sampler_settings", {"repo": None}) + + # Setup the environment repo = Repo(temp_testbed) (temp_testbed / "test_file.py").write_text("permanent commit") repo.git.add("test_file.py")