diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..671e986
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,135 @@
+#-------------------------------------------------------------------------------
+# LLM APIs settings
+#-------------------------------------------------------------------------------
+# NOTE:
+# - OLAW can use both Open AI and Ollama at the same time, but needs at least one of the two.
+# - Ollama is one of the simplest ways to get started running models locally: https://ollama.ai/
+OLLAMA_API_URL="http://localhost:11434"
+
+#OPENAI_API_KEY=""
+#OPENAI_ORG_ID=""
+
+# NOTE: OPENAI_BASE_URL can be used to interact with OpenAI-compatible providers.
+# For example:
+# - https://huggingface.co/blog/tgi-messages-api
+# - https://docs.vllm.ai/en/latest/getting_started/quickstart.html#using-openai-completions-api-with-vllm
+# Make sure to specify both OPENAI_BASE_URL and OPENAI_COMPATIBLE_MODEL when doing so.
+#OPENAI_BASE_URL=""
+#OPENAI_COMPATIBLE_MODEL=""
+
+#-------------------------------------------------------------------------------
+# Basic Rate Limiting
+#-------------------------------------------------------------------------------
+# NOTE:
+# - This set of variables allows for applying rate-limiting to individual API routes.
+# - See https://flask-limiter.readthedocs.io/en/stable/ for details and syntax.
+RATE_LIMIT_STORAGE_URI="memory://"
+API_MODELS_RATE_LIMIT="1/second"
+API_EXTRACT_SEARCH_STATEMENT_RATE_LIMIT="60 per 1 hour"
+API_SEARCH_RATE_LIMIT="120 per 1 hour"
+API_COMPLETE_RATE_LIMIT="60 per 1 hour"
+
+#-------------------------------------------------------------------------------
+# Court Listener API settings
+#-------------------------------------------------------------------------------
+# NOTE: The chatbot can make calls to the Court Listener API to pull relevant court opinions.
+COURT_LISTENER_MAX_RESULTS=4 # NOTE: To be adjusted based on the context lenght of the model used for inference.
+COURT_LISTENER_API_URL="https://www.courtlistener.com/api/rest/v3/"
+COURT_LISTENER_BASE_URL="https://www.courtlistener.com"
+
+#-------------------------------------------------------------------------------
+# Extract Search Statement Prompt
+#-------------------------------------------------------------------------------
+# NOTE: This prompt is used to identify a legal question and make it into a search statement.
+EXTRACT_SEARCH_STATEMENT_PROMPT="
+Identify whether there is a legal question in the following message and, if so, transform it into a search statement.
+
+If the legal question can be answered by searching case law:
+- Follow the COURTLISTENER instructions to generate a search statment
+
+If there are multiple questions, only consider the last one.
+
+---
+
+COURTLISTENER instructions:
+Here are instructions on how to generate an effective search statement for that platform.
+
+## Keywords
+Identify and extract keywords from the question. If a term can be both singular or plural, use both (i.e: \"pony\" and \"ponies\").
+Use quotation marks around proper nouns and terms that should not be broken up.
+
+## Logical connectors
+Separate the different keywords and parts of the search statement with logical connectors such as AND, OR, NOT.
+
+## Dates and date ranges
+If a date (or element of a date) is present in the question, you can add it to the search statement as such to define a range:
+dateFiled:[YYYY-MM-DD TO YYYY-MM-DD]
+
+If only the year is present, set MM and DD to 01 and 01.
+If only the start year is present, assume the end date is the last day of that year.
+Do not wrap dateField statement in parentheses.
+
+## Name of cases
+If the question features the name of a case, you can add it to the search statement as such:
+caseName:(\"name of a case\")
+
+Tip to recognize case names: they often feature v. or vs. As in: \"Roe v. Wade\".
+
+## Name of court, state or jurisdiction
+If the question features the name of a court or of a state, you can add it to the search statement as such:
+court:(\"name of a court, state or jurisdiction\")
+
+## Excluded terms
+The following terms do not help make good search statements and MUST NOT be present in the search statement: law, laws, case, cases, precedent, precedents, adjudicated.
+
+## Other fields available
+dateFiled, caseName and court are the only fields you should use. Do not invent other fields. Everything else is a search term.
+
+---
+
+Return your response as a JSON object containing the following keys:
+- search_statement: String representing the generated search statement. Is empty if the text does not contain a legal question.
+- search_target: String representing the target API for that search statement. Can be \"courtlistener\" or empty.
+
+Here is the message you need to analyze:
+"
+
+#-------------------------------------------------------------------------------
+# Text Completion Prompts
+#-------------------------------------------------------------------------------
+# NOTE: {history} {rag} and {request} are reserved keywords.
+TEXT_COMPLETION_BASE_PROMPT = "
+{history}
+
+You are a helpful and friendly AI legal assistant.
+Your explanation of legal concepts should be easy to understand while still being accurate and detailed. Explain any legal jargon, and do not assume knowledge of any related concepts.
+
+{rag}
+
+Request: {request}
+
+Helpful response (plain text, no markdown):
+"
+
+# NOTE: Injected into BASE prompt when relevant.
+# Inspired by LangChain's default RAG prompt.
+# {context} is a reserved keyword.
+TEXT_COMPLETION_RAG_PROMPT = "
+Here is context to help you fulfill the user's request:
+{context}
+----------------
+When possible, use context to answer the request from the user.
+Ignore context if it is empty or irrelevant.
+If you don't know the answer, just say that you don't know, don't try to make up an answer.
+Cite and quote your sources whenever possible. Use their number (for example: [1]) to reference them.
+
+"
+
+# NOTE: Injected into BASE prompt when relevant.
+# NOTE: {history} is a reserved keyword
+TEXT_COMPLETION_HISTORY_PROMPT = "
+Here is a summary of the conversation thus far:
+{history}
+----------------
+
+"
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..983f87d
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,2 @@
+.env.example linguist-language=Shell
+*.env.example linguist-language=Shell
\ No newline at end of file
diff --git a/.github/screenshots/idle-01.png b/.github/screenshots/idle-01.png
new file mode 100644
index 0000000..35d6e76
Binary files /dev/null and b/.github/screenshots/idle-01.png differ
diff --git a/.github/screenshots/inspect-01.png b/.github/screenshots/inspect-01.png
new file mode 100644
index 0000000..9712358
Binary files /dev/null and b/.github/screenshots/inspect-01.png differ
diff --git a/.github/screenshots/inspect-02.png b/.github/screenshots/inspect-02.png
new file mode 100644
index 0000000..ba726ca
Binary files /dev/null and b/.github/screenshots/inspect-02.png differ
diff --git a/.github/screenshots/question-01.png b/.github/screenshots/question-01.png
new file mode 100644
index 0000000..2325e43
Binary files /dev/null and b/.github/screenshots/question-01.png differ
diff --git a/.github/screenshots/question-02.png b/.github/screenshots/question-02.png
new file mode 100644
index 0000000..e6df48c
Binary files /dev/null and b/.github/screenshots/question-02.png differ
diff --git a/.github/screenshots/question-03.png b/.github/screenshots/question-03.png
new file mode 100644
index 0000000..1cfe8ae
Binary files /dev/null and b/.github/screenshots/question-03.png differ
diff --git a/.github/screenshots/question-04.png b/.github/screenshots/question-04.png
new file mode 100644
index 0000000..229ed58
Binary files /dev/null and b/.github/screenshots/question-04.png differ
diff --git a/.github/screenshots/question-05.png b/.github/screenshots/question-05.png
new file mode 100644
index 0000000..4940b2a
Binary files /dev/null and b/.github/screenshots/question-05.png differ
diff --git a/.github/screenshots/question-06.png b/.github/screenshots/question-06.png
new file mode 100644
index 0000000..d0a178f
Binary files /dev/null and b/.github/screenshots/question-06.png differ
diff --git a/.github/screenshots/settings-01.png b/.github/screenshots/settings-01.png
new file mode 100644
index 0000000..f46f706
Binary files /dev/null and b/.github/screenshots/settings-01.png differ
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..cdb65f7
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+.env
+*.pyc
+.DS_Store
+chromadb/
+test.py
+TODO.md
+runs/
+_*/
+*.zip
\ No newline at end of file
diff --git a/.vscode/extensions.json b/.vscode/extensions.json
new file mode 100644
index 0000000..972674f
--- /dev/null
+++ b/.vscode/extensions.json
@@ -0,0 +1,8 @@
+{
+ "recommendations": [
+ "ms-python.black-formatter",
+ "ms-python.flake8",
+ "tobermory.es6-string-html",
+ "standard.vscode-standard"
+ ]
+}
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..de9d78b
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,6 @@
+{
+ "editor.formatOnSave": true,
+ "python.formatting.provider": "black",
+ "standard.enable": false,
+ "standard.autoFixOnSave": true
+}
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 0000000..bef38ae
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,10 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it as below."
+authors:
+ - family-names: Cargnelutti
+ given-names: Matteo
+ - family-names: Cushman
+ given-names: Jack
+title: "Open Legal AI Workbench (OLAW)"
+version: 0.0.1
+date-released: 2024-03-06
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..90473db
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 Harvard Library Innovation Laboratory
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..b50f8bf
--- /dev/null
+++ b/README.md
@@ -0,0 +1,397 @@
+# Open Legal AI Workbench (OLAW)
+
+**AI + Legal APIs**: A Tool-Based Retrieval Augmented Generation Workbench for Legal AI UX Research.
+
+https://github.com/harvard-lil/olaw/assets/625889/65dd61db-42f8-490b-a737-0612d97c5c81
+
+More info:
+> TODO: Link to blog post
+
+---
+
+## Summary
+- [Concept](#concept)
+- [Installation](#installation)
+- [Configuring the application](#configuring-the-application)
+- [Starting the server](#starting-the-server)
+- [Recommended models](#recommended-models)
+- [Interacting with the Web UI](#interacting-with-the-web-ui)
+- [Interacting with the API](#interacting-with-the-api)
+- [Adding new tools](#adding-new-tools)
+- [Getting Involved](#getting-involved)
+- [Cite this repository](#cite-this-repository)
+- [Disclaimer](#disclaimer)
+
+---
+
+## Concept
+
+**OLAW** is a tool-based [Retrieval Augmented Generation](https://www.promptingguide.ai/techniques/rag) (RAG) workbench for legal AI UX research.
+It consists of a customizable chatbot that can use legal APIs to augment its responses.
+
+**The goal of this project is to simplify and streamline experimentation with APIs-based RAG in legal contexts by:**
+- **Keeping it simple**: The tool should be easy to operate, modify and interpret.
+- **Being highly customizable and modular**: [Adding a tool](#adding-new-tools) to this workbench should be as simple as possible.
+- **Being open and collaborative**: A lot of this work generally happens behind the scenes. This project aims at amplifying collaborative research on the uses of AI in legal contexts.
+
+The focus here is on ease of access and experimentation, as opposed to overall performance or production-readiness.
+
+### Tool-based RAG?
+There are as many "flavors" of RAG as there are implementations of it.
+This workbench focuses on a tool-based approach, in which **the LLM is indirectly given access to APIs** as a way to augment its responses.
+
+**This process takes place in three steps:**
+1. _Upon receiving a message from the user, the pipeline asks the LLM to analyze the message to:_
+ - Detect if it contains a legal question
+ - [Use a prompt](/.env.example#L37) to determine where to look for additional information _(search target)_
+ - Use that same prompt to generate a search statement to use against the search target
+2. _Upon identifying a search suggestion._
+ - The UI presents the search suggestion to the user and ask for confirmation.
+3. _Upon confirmation from the user:_
+ - The pipeline performs the suggested search against the search target ...
+ - ... and uses the results as additional context when asking the LLM to answer the user's question
+
+[☝️ Summary](#summary)
+
+---
+
+## Installation
+
+OLAW requires the following machine-level dependencies to be installed.
+
+- [Python 3.11+](https://python.org)
+- [Python Poetry](https://python-poetry.org/)
+
+Use the following commands to clone the project and instal its dependencies:
+
+```bash
+# MacOS / Linux / WSL
+git clone https://github.com/harvard-lil/olaw.git
+cd olaw
+poetry install
+```
+
+The workbench itself doesn't have specific hardware requirements. If you would like to use [Ollama](https://ollama.ai) for local inference with open-source language models, be sure to check their [system requirements](https://github.com/ollama/ollama?tab=readme-ov-file#model-library).
+
+[☝️ Summary](#summary)
+
+---
+
+## Configuring the application
+
+This program uses environment variables to handle settings.
+Copy `.env.example` into a new `.env` file and edit it as needed.
+
+```bash
+cp .env.example .env
+```
+
+See details for individual settings in [.env.example](.env.example).
+
+**A few notes:**
+- OLAW can interact with both the [Open AI API](https://platform.openai.com/docs/introduction) and [Ollama](https://ollama.ai) for local inference.
+ - Both can be used at the same time, but at least one is needed.
+ - By default, the program will try to communicate with Ollama's API at `http://localhost:11434`.
+ - It is also possible to use OpenAI's client to interact with compatible providers, such as [HuggingFace's Message API](https://huggingface.co/blog/tgi-messages-api) or [vLLM](https://docs.vllm.ai/en/latest/getting_started/quickstart.html#using-openai-completions-api-with-vllm). To do so, set values for both `OPENAI_BASE_URL` and `OPENAI_COMPATIBLE_MODEL` environment variables.
+- Prompts can be edited directly in the configuration file.
+
+[☝️ Summary](#summary)
+
+---
+
+## Starting the server
+
+The following command will start the OLAW (development) server on port `5000`.
+
+```bash
+poetry run flask run
+# Not: Use --port to use a different port
+```
+
+[☝️ Summary](#summary)
+
+---
+
+## Recommended models
+
+While this pipeline can in theory be run against a wide variety of text generation models, there are two key constraints to keep in mind when picking an LLM:
+- The size of the **context window**. The target model needs to be able to handle long input, as the pipeline may pull additional context from APIs it has access to.
+- Ability to **reliably return JSON data**. This feature is used by the `/api/extract-search-statement` route.
+
+**We have tested this software with the following models:**
+- OpenAI: `openai/gpt-4-turbo-preview` _(128K tokens context)_
+- Ollama: Any version of `ollama/mixtral`_(32K tokens context + sliding window)_
+
+We have observed performance with `openai/gpt-4-turbo-preview` during out initial tests, using the default prompts.
+
+[☝️ Summary](#summary)
+
+---
+
+## Interacting with the WEB UI
+
+Once the server is started, the application's web UI should be available at `http://localhost:5000`.
+
+The interface automatically handles a basic chat history, allowing for few-shots / chain-of-thoughts prompting.
+
+[☝️ Summary](#summary)
+
+---
+
+## Interacting with the API
+
+OLAW comes with a REST API that can be used to interact programmatically with the workbench.
+
+> New to REST APIs? See this [tutorial](https://www.smashingmagazine.com/2018/01/understanding-using-rest-api/).
+
+### [GET] /api/models
+Returns a list of available models as JSON.
+
+
+Sample output
+
+```json
+[
+ "openai/gpt-4-vision-preview",
+ "openai/gpt-4-0613",
+ "openai/gpt-4-0125-preview",
+ "openai/gpt-4-turbo-preview",
+ "openai/gpt-4",
+ "openai/gpt-4-1106-preview",
+ "ollama/llama2:13b",
+ "ollama/llama2:13b-chat-fp16",
+ "ollama/llama2:70b",
+ "ollama/llama2:70b-chat-fp16",
+ "ollama/llama2:7b",
+ "ollama/llama2:latest",
+ "ollama/mistral:7b",
+ "ollama/mistral:7b-instruct-fp16",
+ "ollama/mistral:7b-instruct-v0.2-fp16",
+ "ollama/mixtral:8x7b-instruct-v0.1-fp16",
+ "ollama/mixtral:8x7b-instruct-v0.1-q6_K",
+ "ollama/mixtral:latest",
+ "ollama/phi:2.7b-chat-v2-fp16"
+]
+```
+
+
+
+### [POST] /api/extract-search-statement
+Uses the [search statement extraction prompt](/olaw/blob/main/.env.example#L37) to:
+- Detect if the user asked a question that requires pulling information from a legal database
+- If so, transform said question into a search statement that can be run against a known search target (See [`SEARCH_TARGETS`](/olaw/const/__init__.py)).
+
+
+Returns a JSON object containing `search_statement` and `search_target`. These properties can be empty.
+
+
+Sample input
+
+```json
+{
+ "model": "ollama/mixtral",
+ "temperature": 0.0,
+ "message": "Tell me everything you know about Miranda v. Arizona (1966)"
+}
+```
+
+**Notes:**
+- `temperature` is optional.
+
+
+
+
+Sample output
+
+
+```json
+{
+ "search_statement": "caseName:(\"Miranda v. Arizona\") AND dateFiled:[1966-01-01 TO 1966-12-31]",
+ "search_target": "courtlistener"
+}
+```
+
+
+
+
+### [POST] /api/search
+Performs search using what `/api/extract-search-statement` returned.
+
+Returns a JSON object with search results indexed by `SEARCH_TARGET`.
+
+
+Sample input
+
+```json
+{
+ "search_statement": "caseName:(\"Miranda v. Arizona\") AND dateFiled:[1966-01-01 TO 1966-12-31]",
+ "search_target": "courtlistener"
+}
+```
+
+
+
+
+Sample output
+
+```json
+{
+ "courtlistener": [
+ {
+ "absolute_url": "https://www.courtlistener.com/opinion/107252/miranda-v-arizona/",
+ "case_name": "Miranda v. Arizona",
+ "court": "Supreme Court of the United States",
+ "date_filed": "1966-06-13T00:00:00-07:00",
+ "id": 107252,
+ "ref_tag": 1,
+ "status": "Precedential",
+ "text": "..."
+ },
+ {
+ "absolute_url": "https://www.courtlistener.com/opinion/8976604/miranda-v-arizona/",
+ "case_name": "Miranda v. Arizona",
+ "court": "Supreme Court of the United States",
+ "date_filed": "1969-10-13T00:00:00-07:00",
+ "id": 8968349,
+ "ref_tag": 2,
+ "status": "Precedential",
+ "text": "..."
+ },
+ {
+ "absolute_url": "https://www.courtlistener.com/opinion/8962758/miranda-v-arizona/",
+ "case_name": "Miranda v. Arizona",
+ "court": "Supreme Court of the United States",
+ "date_filed": "1965-11-22T00:00:00-08:00",
+ "id": 8953989,
+ "ref_tag": 3,
+ "status": "Precedential",
+ "text": "..."
+ }
+ ]
+}
+```
+
+
+
+### [POST] /api/complete
+Passes messages and context to target LLM and starts **streaming** text completion. Returns raw text, streamed.
+
+
+Sample input
+
+```json
+{
+ "message": "Tell me everything you know about Miranda v. Arizona (1966)",
+ "model": "openai/gpt-4-turbo-preview",
+ "temperature": 0.0,
+ "max_tokens": 4000,
+ "search_results": {
+ "courtlistener": [...]
+ },
+ "history": [
+ {"role": "user", "content": "Hi there!"},
+ {"role": "assistant", "content": "How may I help you?"}
+ ]
+}
+```
+
+**Notes:**
+- `temperature` is optional.
+- `max_tokens` is optional.
+- `history` must be an array of objects containing `role` and `content` keys. `role` can be either `user` or `assistant`.
+
+
+
+[☝️ Summary](#summary)
+
+---
+
+## Adding new tools
+
+This section of the documentation describes the process of making OLAW understand and use additional _"search target"_ beyond the Court Listener API.
+
+
+1. Declare a new search target
+
+Edit the [`SEARCH_TARGETS`](/olaw/search_targets/__init__.py) list under [`olaw/search_results/__init__.py`](/olaw/search_targets/__init__.py) to declare a new search target.
+
+Lets call this new target `casedotlaw`.
+
+```python
+SEARCH_TARGETS = ["courtlistener", "casedotlaw"]
+```
+
+
+
+
+2. Edit search statement extraction prompt
+
+Edit `EXTRACT_SEARCH_STATEMENT_PROMPT` in your `.env` file to let the LLM know how to write search statements for this new tool.
+
+This prompt is used by `/api/extract-search-statement`, which is then able to output objects as follows:
+
+```json
+{
+ "search_statement": "(Platform-specific search statement based on user question)",
+ "search_target": "casedotlaw"
+}
+```
+
+The process of designing a performant prompt for that task generally requires a few iterations.
+
+
+
+
+3. Add handling logic
+
+Add a file under the `olaw/search_targets/` folder, named after your search target. In that case: `casedotlaw.py`.
+
+This file must contain a class inheriting from `SearchTarget`, which defines 1 property and 1 static method:
+- `RESULTS_DATA_FORMAT` determining how search results data is structured
+- `search()` containing logic for returning search results
+
+You may refer to [`courtlistener.py` as an example](/olaw/search_targets/courtlistener.py).
+
+You will also need to edit [`olaw/search_results/__init__.py`](/olaw/search_targets/__init__.py) as follows:
+- Import `casedotlaw.py`
+- Edit `route_search()` to account for that new target
+
+
+
+[☝️ Summary](#summary)
+
+---
+
+## Getting Involved
+
+This project is collaborative at its core and we warmly welcome feedback and contributions.
+
+- [The issues tab](/issues) is a good place to start to report bugs, suggest features or volunteer to contribute to the codebase on a specific issue.
+- Don't hesitate to use [the discussions tab](/discussions) to ask more general questions about this project.
+
+
+[☝️ Summary](#summary)
+
+---
+
+## Cite this repository
+
+> Cargnelutti, M., & Cushman, J. (2024). Open Legal AI Workbench (OLAW) (Version 0.0.1) [Computer software]
+
+See also:
+- [Our citation file](https://github.com/harvard-lil/olaw/blob/main/CITATION.cff)
+- The _"Cite this repository"_ button in the About section of this repository.
+
+[☝️ Summary](#summary)
+
+---
+
+## Disclaimer
+
+The Library Innovation Lab is an organization based at the Harvard Law School Library. We are a cross-functional group of software developers, librarians, lawyers, and researchers doing work at the edges of technology and digital information.
+
+Our work is rooted in library principles including longevity, authenticity, reliability, and privacy. Any work that we produce takes these principles as a primary lens. However due to the nature of exploration and a desire to prototype our work with real users, we do not guarantee service or performance at the level of a production-grade software for all of our releases. This includes this project, which is an experimental boilerplate released under [MIT License](LICENSE).
+
+Open Legal AI Workbench is an experimental tool for evaluating legal retrieval software and should not be used for legal advice.
+
+[☝️ Summary](#summary)
diff --git a/olaw/__init__.py b/olaw/__init__.py
new file mode 100644
index 0000000..3efbeae
--- /dev/null
+++ b/olaw/__init__.py
@@ -0,0 +1,25 @@
+from dotenv import load_dotenv
+from flask import Flask, make_response, jsonify
+
+from olaw import utils
+
+load_dotenv()
+
+
+def create_app():
+ """
+ App factory (https://flask.palletsprojects.com/en/2.3.x/patterns/appfactories/)
+ """
+ app = Flask(__name__)
+
+ # Note: Every module in this app assumes the app context is available and initialized.
+ with app.app_context():
+ utils.check_env()
+
+ from olaw import views
+
+ @app.errorhandler(429)
+ def ratelimit_handler(e):
+ return make_response(jsonify(error=f"Rate limit exceeded ({e.description})"), 429)
+
+ return app
diff --git a/olaw/search_targets/__init__.py b/olaw/search_targets/__init__.py
new file mode 100644
index 0000000..41aa7e9
--- /dev/null
+++ b/olaw/search_targets/__init__.py
@@ -0,0 +1,41 @@
+SEARCH_TARGETS = ["courtlistener"]
+"""
+ List of of "tools" this RAG pipeline can use to pull information from.
+ See details in `README.md` under "Adding new tools".
+"""
+
+
+class SearchTarget:
+ """
+ Base class for all search targets.
+ Inherit this class to let OLAW use a new tool.
+ """
+
+ RESULTS_DATA_FORMAT = {
+ "text": "", # Full text
+ "prompt_text": "", # Line of text used as part of the RAG prompt to introduce this source.
+ "ui_text": "", # Line of text used as part of the UI to introduce this source.
+ "ui_url": "", # URL used to let users explore this source.
+ }
+
+ @staticmethod
+ def search(search_statement: str) -> list:
+ raise NotImplementedError
+
+
+from .courtlistener import CourtListener # noqa
+
+
+def route_search(search_target: str, search_statement: str):
+ """
+ Routes a search to the right handler.
+ """
+ if search_target not in SEARCH_TARGETS:
+ raise Exception("Invalid search target")
+
+ search_results = []
+
+ if search_target == "courtlistener":
+ search_results = CourtListener.search(search_statement)
+
+ return search_results
diff --git a/olaw/search_targets/courtlistener.py b/olaw/search_targets/courtlistener.py
new file mode 100644
index 0000000..8fa928a
--- /dev/null
+++ b/olaw/search_targets/courtlistener.py
@@ -0,0 +1,131 @@
+import os
+import re
+import traceback
+import requests
+import html2text
+
+from . import SearchTarget
+
+
+class CourtListener(SearchTarget):
+
+ RESULTS_DATA_FORMAT = {
+ "id": "",
+ "case_name": "",
+ "court": "",
+ "absolute_url": "",
+ "status": "",
+ "date_filed": "",
+ "text": "", # Full opinion text
+ "prompt_text": "", # Line of text used as part of the RAG prompt to introduce sources.
+ "ui_text": "", # Line of text used as part of the UI to introduce this source.
+ "ui_url": "", # URL used to let users explore this source.
+ }
+ """
+ Shape of the data for each individual entry of search_results.
+ """
+
+ @staticmethod
+ def search(search_statement: str):
+ """
+ Runs search_statement against the CourtListener search API.
+ - Returns up to COURT_LISTENER_MAX_RESULTS results.
+ - Objects in list use the CourtListener.RESULTS_DATA_FORMAT template.
+ """
+ api_url = os.environ["COURT_LISTENER_API_URL"]
+ base_url = os.environ["COURT_LISTENER_BASE_URL"]
+ max_results = int(os.environ["COURT_LISTENER_MAX_RESULTS"])
+
+ raw_results = None
+ prepared_results = []
+
+ filed_before = None
+ filed_after = None
+
+ # Extract date range from "search_statement":
+ # This is to account for dateFiled:[X TO Y] working inconsistently
+ if "dateFiled" in search_statement:
+ pattern_filed_after = r"dateFiled\:\[([0-9]{4}-[0-9]{2}-[0-9]{2}) TO"
+
+ pattern_filed_before = (
+ r"dateFiled\:\[[0-9]{4}-[0-9]{2}-[0-9]{2} TO ([0-9]{4}-[0-9]{2}-[0-9]{2})\]"
+ )
+ try:
+ filed_after = re.findall(pattern_filed_after, search_statement)[0]
+ filed_after = filed_after.replace("-", "/")
+
+ filed_before = re.findall(pattern_filed_before, search_statement)[0]
+ filed_before = filed_before.replace("-", "/")
+ except Exception:
+ pass
+
+ print(filed_after)
+ print(filed_before)
+
+ #
+ # Pull search results
+ #
+ raw_results = requests.get(
+ f"{api_url}search/",
+ timeout=10,
+ params={
+ "type": "o",
+ "order": "score desc",
+ "q": search_statement,
+ "filed_after": filed_after,
+ "filed_before": filed_before,
+ },
+ ).json()
+
+ #
+ # Pull opinion text for the first X results
+ #
+ for i in range(0, max_results):
+ if i > len(raw_results["results"]) - 1:
+ break
+
+ opinion = dict(CourtListener.RESULTS_DATA_FORMAT)
+
+ opinion_metadata = raw_results["results"][i]
+
+ # Case-specific data
+ opinion["id"] = opinion_metadata["id"]
+ opinion["case_name"] = opinion_metadata["caseName"]
+ opinion["court"] = opinion_metadata["court"]
+ opinion["absolute_url"] = base_url + opinion_metadata["absolute_url"]
+ opinion["status"] = opinion_metadata["status"]
+ opinion["date_filed"] = opinion_metadata["dateFiled"]
+
+ # Request and format opinion text
+ try:
+ opinion_data = requests.get(
+ f"{api_url}opinions/",
+ timeout=10,
+ params={"id": opinion["id"]},
+ ).json()
+
+ opinion_data = opinion_data["results"][0]
+ opinion["text"] = html2text.html2text(opinion_data["html"])
+
+ except Exception:
+ continue
+
+ # Text for LLM (context intro)
+ # [1] Foo v. Bar (1996) Court Name, as sourced from http://url:
+ opinion["prompt_text"] = f"[{i+1}] " # [1]
+ opinion["prompt_text"] += f"{opinion['case_name']} " # Foo v. Bar
+ opinion["prompt_text"] += f"({opinion['date_filed'][0:4]}) " # (1996)
+ opinion["prompt_text"] += f"{opinion['court']}, " # US Supreme Court
+ opinion["prompt_text"] += f"as sourced from {opinion['absolute_url']}:"
+
+ # Text for UI
+ # [1] Foo v. Bar (1996), Court Name
+ opinion["ui_text"] = f"[{i+1}] " # [1]
+ opinion["ui_text"] += f"{opinion['case_name']} " # Foo v. Bar
+ opinion["ui_text"] += f"({opinion['date_filed'][0:4]}), " # (1996)
+ opinion["ui_text"] += f"{opinion['court']} " # US Supreme Court
+ opinion["ui_url"] = opinion["absolute_url"]
+
+ prepared_results.append(opinion)
+
+ return prepared_results
diff --git a/olaw/static/components/ChatBubble.css b/olaw/static/components/ChatBubble.css
new file mode 100644
index 0000000..fc2f50d
--- /dev/null
+++ b/olaw/static/components/ChatBubble.css
@@ -0,0 +1,99 @@
+chat-bubble {
+ display: block;
+ width: fit-content;
+ max-width: 80%;
+ margin-left: auto;
+ background-color: var(--background-);
+ padding: 1rem;
+ border-radius: 0.25rem;
+ margin-bottom: 1.5rem;
+ margin-right: 1rem;
+ line-height: 1.5rem;
+ position: relative;
+ line-break: loose;
+}
+
+chat-bubble::before {
+ content: "";
+ width: 0px;
+ height: 0px;
+ position: absolute;
+ top: 0rem;
+ right: -1rem;
+ border-left: 0.75rem solid var(--background-);
+ border-right: 0.75rem solid transparent;
+ border-top: 0.75rem solid var(--background-);
+ border-bottom: 0.75rem solid transparent;
+}
+
+chat-bubble .actor {
+ font-weight: normal;
+ font-style: italic;
+ margin-bottom: 0.5rem;
+ color: var(--color--);
+}
+
+chat-bubble code {
+ display: block;
+ padding: 1rem;
+ background-color: var(--background);
+ border-radius: 0.25rem;
+ margin-top: 0.5rem;
+ margin-bottom: 1rem;
+ font-size: 0.9rem;
+}
+
+chat-bubble .actions button {
+ background-color: transparent;
+ color: var(--color);
+ text-decoration: underline;
+ font-weight: normal;
+ padding-top: 0rem;
+ padding-bottom: 0rem;
+ padding-left: 0.5rem;
+ padding-right: 0.5rem;
+}
+
+chat-bubble .actions button:hover {
+ color: var(--color-);
+ text-decoration: none;
+}
+
+chat-bubble .actions button:disabled {
+ background-color: transparent;
+ color: var(--color--);
+ text-decoration: none;
+}
+
+chat-bubble[type="user"] {
+ margin-left: unset;
+ margin-right: auto;
+ background-color: var(--color);
+ color: var(--background);
+ margin-right: unset;
+ margin-left: 1rem;
+}
+
+chat-bubble[type="user"]::before {
+ right: unset;
+ left: -1rem;
+ border-top-color: var(--color);
+ border-left-color: var(--color);
+ rotate: 90deg;
+}
+
+chat-bubble[type="ai"] .text {
+ white-space: pre-line;
+}
+
+chat-bubble[type="sources"] .text {
+ margin-bottom: 1rem;
+}
+
+chat-bubble[type="sources"] .actor span {
+ text-transform: capitalize;
+}
+
+chat-bubble[type="sources"] .text * {
+ display: block;
+}
diff --git a/olaw/static/components/ChatBubble.js b/olaw/static/components/ChatBubble.js
new file mode 100644
index 0000000..aa0e8e4
--- /dev/null
+++ b/olaw/static/components/ChatBubble.js
@@ -0,0 +1,194 @@
+import { state } from "../state.js";
+
+/**
+ * UI Element representing a chat bubble.
+ *
+ * Available values for "type" attribute:
+ * - "user": User message
+ * - "ai": Message from AI.
+ * - "error": Standard error message.
+ * - "analyzing-request": System message letting the user know that the system is analyzing the request.
+ * - "confirm-search": Interactive message asking the user to confirm before performing a RAG search.
+ * - "sources": Message listing sources (state.searchResults)
+ *
+ * Uses app state + type to determine what contents to render.
+ */
+export class ChatBubble extends HTMLElement {
+ connectedCallback() {
+ const type = this.getAttribute("type");
+
+ switch (type) {
+ case "user":
+ this.renderUserBubble();
+ break;
+
+ case "ai":
+ this.renderAIBubble();
+ break;
+
+ case "analyzing-request":
+ this.renderAnalyzingRequestBubble();
+ break;
+
+ case "confirm-search":
+ this.renderConfirmSearchBubble();
+
+ const confirmButton = this.querySelector(`[data-action="confirm"]`);
+ const rejectButton = this.querySelector(`[data-action="reject"]`);
+
+ // Event listener for the "confirm" button
+ confirmButton.addEventListener("click", (e) => {
+ state.log(
+ "User accepted suggested search.",
+ "/api/extract-search-statement"
+ );
+
+ confirmButton.setAttribute("disabled", "disabled");
+ rejectButton.setAttribute("disabled", "disabled");
+
+ document.querySelector("chat-flow").search();
+ });
+
+ // Event listener for the "reject" button:
+ rejectButton.addEventListener("click", (e) => {
+ state.log(
+ "User rejected suggested search.",
+ "/api/extract-search-statement"
+ );
+
+ confirmButton.setAttribute("disabled", "disabled");
+ rejectButton.setAttribute("disabled", "disabled");
+
+ document.querySelector("chat-flow").streamCompletion();
+ });
+
+ break;
+
+ case "sources":
+ this.renderSourcesBubble();
+ break;
+
+ case "error":
+ default:
+ this.renderErrorBubble();
+ break;
+ }
+ }
+
+ /**
+ * Renders a "user" bubble.
+ * Uses the current value of `state.message`.
+ * @returns {void}
+ */
+ renderUserBubble = () => {
+ this.innerHTML = /*html*/ `
+
${this.sanitizeString(state.message)}
+ `;
+
+ this.inspectDialogRef = document.querySelector("inspect-dialog");
+ };
+
+ /**
+ * Renders an "ai" bubble.
+ * Text starts empty and is later streamed from ``.
+ * @returns {void}
+ */
+ renderAIBubble = () => {
+ this.innerHTML = /*html*/ `
+ ${this.sanitizeString(state.model)}
+
+ `;
+ };
+
+ /**
+ * Renders an "analyzing-request" bubble.
+ * @returns {void}
+ */
+ renderAnalyzingRequestBubble = () => {
+ this.innerHTML = /*html*/ `
+ System
+ The chatbot is looking for a tool to help answer your question.
+ `;
+ };
+
+ /**
+ * Renders an "confirm-search" bubble.
+ * @returns {void}
+ */
+ renderConfirmSearchBubble = () => {
+ const searchTargetName = this.sanitizeString(state.searchTarget);
+ const searchStatement = this.sanitizeString(state.searchStatement);
+
+ this.innerHTML = /*html*/ `
+ ${this.sanitizeString(state.model)}
+
+ Run the following query against ${searchTargetName}?
+ ${searchStatement}
+
+
+
+ Yes, perform search.
+ Skip.
+
+ `;
+ };
+
+ /**
+ * Renders an "sources" bubble listing everything under state.searchResults.
+ * @returns {void}
+ */
+ renderSourcesBubble = () => {
+ let sourcesText = "";
+
+ for (const searchTarget of state.availableSearchTargets) {
+ if (!state.searchResults[searchTarget]) {
+ continue;
+ }
+
+ for (const source of state.searchResults[searchTarget]) {
+ const text = this.sanitizeString(`${source.ui_text}`, false);
+ const url = this.sanitizeString(source.ui_url, false);
+
+ sourcesText += /*html*/ `
+
+
+ ${text}
+
+
+ `;
+ }
+
+ this.innerHTML = /*html*/ `
+ Source: ${searchTarget}
+ ${sourcesText ? sourcesText : "No results"}
+ `;
+ }
+ };
+
+ /**
+ * Renders an "error" bubble.
+ * @returns {void}
+ */
+ renderErrorBubble = () => {
+ this.innerHTML = /*html*/ `
+ An error occurred (see console for details), please try again.
+ `;
+ };
+
+ /**
+ * Escapes <, > and converts line breaks into .
+ * @param {string} string
+ * @param {boolean} convertLineBreaks
+ * @returns {void}
+ */
+ sanitizeString = (string, convertLineBreaks = true) => {
+ string = string.trim().replaceAll("<", "<").replaceAll(">", ">");
+
+ if (convertLineBreaks === true) {
+ string = string.replaceAll("\n", " ");
+ }
+
+ return string;
+ };
+}
+customElements.define("chat-bubble", ChatBubble);
diff --git a/olaw/static/components/ChatFlow.css b/olaw/static/components/ChatFlow.css
new file mode 100644
index 0000000..136d55f
--- /dev/null
+++ b/olaw/static/components/ChatFlow.css
@@ -0,0 +1,35 @@
+chat-flow {
+ display: block;
+ overflow: auto;
+ display: flex;
+ flex-direction: column;
+ min-height: 52.5dvh;
+ max-height: 65vh;
+}
+
+@media (max-width: 759px) {
+ chat-flow {
+ min-height: 60dvh;
+ max-height: 70dvh;
+ }
+}
+
+/* Placeholder for when chat is empty */
+chat-flow img.placeholder {
+ display: block;
+ margin: auto;
+ max-width: 100%;
+ max-height: 100%;
+ min-width: 15ch;
+ min-height: 15ch;
+ width: 66%;
+ object-fit: contain;
+}
+
+@media (max-width: 759px) {
+ chat-flow img.placeholder {
+ width: 30ch;
+ min-width: 10ch;
+ min-height: 10ch;
+ }
+}
diff --git a/olaw/static/components/ChatFlow.js b/olaw/static/components/ChatFlow.js
new file mode 100644
index 0000000..ef1e363
--- /dev/null
+++ b/olaw/static/components/ChatFlow.js
@@ -0,0 +1,326 @@
+import { state } from "../state.js";
+
+/**
+ * UI Element containing:
+ * - List of chat "bubbles" from the user, AI and system.
+ *
+ * Handles the processing of requests via its `ask()` and `search()` methods.
+ *
+ * Automatically populates:
+ * - `state.processing`
+ * - `state.history`
+ * - `state.searchStatement`
+ * - `state.searchTarget`
+ * - `state.searchResults`
+ *
+ * Automatically enables / disables relevant inputs based on app state.
+ */
+export class ChatFlow extends HTMLElement {
+ /** Reference to the paragraph with the last "ai" bubble in which text should be streamed. */
+ currentAICursorRef = null;
+
+ connectedCallback() {
+ // Enforce singleton
+ for (const node of [...document.querySelectorAll("chat-flow")].slice(1)) {
+ node.remove();
+ }
+
+ this.renderInnerHTML();
+ }
+
+ /**
+ * Processes a request from user (main entry point)
+ * @returns {Promise}
+ */
+ ask = async () => {
+ // Remove placeholder if still present
+ this.querySelector(".placeholder")?.remove();
+
+ // Compile payload
+ const message = state.message;
+ const model = state.model;
+ const temperature = state.temperature;
+
+ if (!message || !model || temperature === null) {
+ this.addBubble("error");
+ this.end();
+ return;
+ }
+
+ // Block UI
+ state.processing = true;
+
+ // Inject user message
+ this.addBubble("user");
+ state.log(state.message, "User sent a message");
+
+ // Inject "analyzing-request" message
+ await new Promise((resolve) => setTimeout(resolve, 500));
+ this.addBubble("analyzing-request");
+
+ // Analyze user request to identify potential legal question
+ try {
+ const response = await fetch("/api/extract-search-statement", {
+ method: "POST",
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({ message, model, temperature }),
+ });
+
+ const data = await response.json();
+
+ if (data?.search_statement && data?.search_target) {
+ state.searchStatement = data.search_statement;
+ state.searchTarget = data.search_target;
+ }
+ } catch (err) {
+ console.error(err);
+ this.addBubble("error");
+ this.end();
+ return;
+ }
+
+ // If legal question was asked:
+ // - Inject "confirm-search" bubble
+ // - Interaction with "confirm-search" will determine next step (search() / streamCompletion())
+ if (state.searchStatement && state.searchTarget) {
+ state.log(
+ "Found a legal question. Awaiting for user confirmation before performing search.",
+ "/api/extract-search-statement"
+ );
+
+ this.addBubble("confirm-search");
+ }
+ // If no legal question was asked:
+ // - Inject "ai" bubble
+ // - Start streaming completion
+ else {
+ state.log(
+ "Did not find legal question. Starting text completion.",
+ "/api/extract-search-statement"
+ );
+
+ this.streamCompletion();
+ }
+ };
+
+ /**
+ * Stops streaming.
+ * @returns {void}
+ */
+ stopStreaming = () => {
+ state.log("Streaming interrupted by user.");
+ state.streaming = false;
+ };
+
+ /**
+ * Ends System/AI turn, goes back to user for input.
+ * @returns {void}
+ */
+ end = () => {
+ state.processing = false;
+ document.querySelector("chat-input textarea").value = "";
+ };
+
+ /**
+ * Inserts a chat bubble of a given type at the end of `chat-flow`.
+ * @param {string} type
+ * @returns {void}
+ */
+ addBubble = (type) => {
+ const bubble = document.createElement("chat-bubble");
+ bubble.setAttribute("type", type);
+
+ this.insertAdjacentElement("beforeend", bubble);
+
+ if (type === "ai") {
+ this.currentAICursorRef = bubble.querySelector(".text");
+ }
+
+ this.scrollIntoConversation();
+ };
+
+ /**
+ * Performs a search against /api/search using search statement and target returned by /api/extract-search-statement.
+ * Populates state.searchResults if successful.
+ * Adds sources bubbles when search is complete
+ * @returns {Promise}
+ */
+ search = async () => {
+ // Compile payload
+ const searchStatement = state.searchStatement;
+ const searchTarget = state.searchTarget;
+
+ if (!searchStatement || !searchTarget) {
+ this.addBubble("error");
+ return;
+ }
+
+ // Run query and:
+ // - Store results
+ // - Inject "sources" bubble if necessary
+ // - In any case: start streaming response
+ try {
+ let totalResults = 0;
+
+ const response = await fetch("/api/search", {
+ method: "POST",
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({
+ search_statement: searchStatement,
+ search_target: searchTarget,
+ }),
+ });
+
+ if (response.status !== 200) {
+ throw new Error((await response.json())?.error);
+ }
+
+ state.searchResults = await response.json();
+ state.log(JSON.stringify(state.searchResults, null, 2), "/api/search");
+
+ for (const key of state.availableSearchTargets) {
+ totalResults += state.searchResults[key]?.length;
+ }
+
+ if (totalResults >= 0) {
+ this.addBubble("sources");
+ }
+ } catch (err) {
+ this.addBubble("error");
+ console.error(err);
+ } finally {
+ this.streamCompletion();
+ }
+ };
+
+ /**
+ * Sends completion request to API and streams results into the last of the list.
+ * Payload is determined by app state.
+ * @returns {Promise}
+ */
+ streamCompletion = async () => {
+ let output = "";
+ let response = null;
+ let responseStream = null;
+ const decoder = new TextDecoder();
+
+ //
+ // Compile payload
+ //
+ const message = state.message;
+ const model = state.model;
+ const temperature = state.temperature;
+ const maxTokens = state.maxTokens;
+ const searchResults = state.searchResults;
+ const history = state.history;
+
+ if (!message || !model || temperature === null) {
+ this.addBubble("error");
+ return;
+ }
+
+ //
+ // Start completion request
+ //
+ try {
+ response = await fetch("/api/complete", {
+ method: "POST",
+ headers: { "content-type": "application/json" },
+ body: JSON.stringify({
+ message,
+ model,
+ temperature,
+ max_tokens: maxTokens,
+ history,
+ search_results: searchResults,
+ }),
+ });
+
+ if (response.status != 200) {
+ throw new Error((await response.json())?.error);
+ }
+ } catch (err) {
+ this.addBubble("error");
+ console.error(err);
+ this.end();
+ return;
+ }
+
+ //
+ // Stream text into "ai" bubble as it comes
+ //
+ try {
+ state.streaming = true;
+ responseStream = response.body.getReader();
+
+ // Inject "ai" bubble to stream into
+ this.addBubble("ai");
+
+ // Stream
+ while (true) {
+ const { done, value } = await responseStream.read();
+
+ const textChunk = decoder.decode(value, { stream: true });
+ this.pushAITextChunk(textChunk);
+ output += textChunk;
+
+ if (done || !state.streaming) {
+ break;
+ }
+ }
+
+ // Log and add interaction to history
+ state.log(output, "/api/complete");
+ state.history.push({ role: "user", content: state.message });
+ state.history.push({ role: "assistant", content: output });
+ } finally {
+ // Clear state of that interaction
+ state.searchStatement = "";
+ state.searchTarget = "";
+ state.searchResults = {};
+ state.message = "";
+
+ state.streaming = false;
+ this.end();
+ }
+ };
+
+ /**
+ * Pushes a chunk of text into last of the list.
+ * @param {string} chunk
+ * @returns {void}
+ */
+ pushAITextChunk = (chunk) => {
+ // Strip common markdown markers
+ // [!] Temporary - should be replaced by proper markdown strip or interpreter.
+ chunk = chunk.replace("**", "");
+ chunk = chunk.replace("##", "");
+ chunk = chunk.replace("###", "");
+
+ const cursor = this.currentAICursorRef;
+ cursor.textContent = cursor.textContent + chunk;
+
+ this.scrollIntoConversation();
+ };
+
+ /**
+ * Automatically scroll to the bottom of the conversation.
+ */
+ scrollIntoConversation = () => {
+ this.scroll({
+ top: this.scrollHeight,
+ left: 0,
+ behavior: "smooth",
+ });
+ };
+
+ renderInnerHTML = () => {
+ this.innerHTML = /*html*/ `
+
+ `;
+ };
+}
+customElements.define("chat-flow", ChatFlow);
diff --git a/olaw/static/components/ChatInput.css b/olaw/static/components/ChatInput.css
new file mode 100644
index 0000000..5007578
--- /dev/null
+++ b/olaw/static/components/ChatInput.css
@@ -0,0 +1,26 @@
+chat-input {
+ display: block;
+ border-bottom: 1px solid var(--background--);
+ padding: 2rem;
+ padding-left: 0rem;
+ padding-right: 0rem;
+}
+
+chat-input textarea {
+ height: 5rem;
+ border-bottom-right-radius: 0px;
+ font-family: var(--font-family-alt);
+}
+
+chat-input button.ask {
+ padding-left: 5ch;
+ padding-right: 5ch;
+ border-top-right-radius: 0px;
+ border-top-left-radius: 0px;
+}
+
+chat-input .actions {
+ text-align: right;
+ position: relative;
+ top: -0.65rem;
+}
\ No newline at end of file
diff --git a/olaw/static/components/ChatInput.js b/olaw/static/components/ChatInput.js
new file mode 100644
index 0000000..6e674cd
--- /dev/null
+++ b/olaw/static/components/ChatInput.js
@@ -0,0 +1,137 @@
+import { state } from "../state.js";
+
+/**
+ * UI Element containing:
+ * - Main text input (message)
+ * - "Ask" button
+ * - "Stop" button
+ * - "Settings" button
+ * - "Inspect" button
+ *
+ * Automatically populates:
+ * - `state.message` (on key up)
+ *
+ * Automatically enables / disables relevant inputs based on app state.
+ */
+export class ChatInput extends HTMLElement {
+ /** Holds reference to interval function calling `this.stateCheck` */
+ stateCheckInterval = null;
+
+ /** Reference to `form > textarea` */
+ inputTextAreaRef = null;
+
+ /** Reference to `form > .actions > button[data-action="stop"]` */
+ stopButtonRef = null;
+
+ /** Reference to `form > .actions > button[data-action="ask"] */
+ askButtonRef = null;
+
+ connectedCallback() {
+ // Enforce singleton
+ for (const node of [...document.querySelectorAll("chat-flow")].slice(1)) {
+ node.remove();
+ }
+
+ this.renderInnerHTML();
+
+ // Grab shared element references
+ this.inputTextAreaRef = this.querySelector("textarea");
+ this.stopButtonRef = this.querySelector(`button[data-action="stop"]`);
+ this.askButtonRef = this.querySelector(`button[data-action="ask"]`);
+
+ // Event listeners for Settings / Inspect dialogs
+ for (const dialogName of ["settings", "inspect"]) {
+ const button = this.querySelector(
+ `button[data-action="open-${dialogName}"]`
+ );
+
+ button.addEventListener("click", (e) => {
+ e.preventDefault();
+ document.querySelector(`${dialogName}-dialog`).open();
+ });
+ }
+
+ // Event listener for submit ("Ask")
+ this.querySelector("form").addEventListener("submit", (e) => {
+ e.preventDefault();
+ document.querySelector("chat-flow").ask();
+ });
+
+ // Event listener for "Stop"
+ this.stopButtonRef.addEventListener("click", (e) => {
+ e.preventDefault();
+ document.querySelector("chat-flow").stopStreaming();
+ });
+
+ // Event listener to capture text input (message)
+ this.inputTextAreaRef.addEventListener("keyup", (e) => {
+ e.preventDefault();
+ state.message = this.inputTextAreaRef.value.trim();
+ });
+
+ // Check every 100ms what parts of this component need to be disabled
+ this.stateCheckInterval = setInterval(this.stateCheck, 100);
+ }
+
+ disconnectedCallback() {
+ clearInterval(this.stateCheckInterval);
+ }
+
+ /**
+ * Determines what parts of this component need to be disabled based on app state.
+ * To be called periodically.
+ * @returns {void}
+ */
+ stateCheck = () => {
+ // Input textarea: disabled while processing
+ if (state.processing) {
+ this.inputTextAreaRef.setAttribute("disabled", "disabled");
+ this.inputTextAreaRef.value = "Please wait ...";
+ } else {
+ this.inputTextAreaRef.removeAttribute("disabled");
+ }
+
+ // "Ask" button is enabled when:
+ // - A message was provided
+ // - A model was picked
+ // - A temperature was picked
+ // - App is not processing / streaming
+ if (
+ !state.processing &&
+ !state.streaming &&
+ state.model &&
+ state.temperature != null &&
+ state.message
+ ) {
+ this.askButtonRef.removeAttribute("disabled");
+ } else {
+ this.askButtonRef.setAttribute("disabled", "disabled");
+ }
+
+ // "Stop" button: enabled while streaming
+ if (state.streaming) {
+ this.stopButtonRef.removeAttribute("disabled");
+ } else {
+ this.stopButtonRef.setAttribute("disabled", "disabled");
+ }
+ };
+
+ renderInnerHTML = () => {
+ this.innerHTML = /*html*/ `
+
+ `;
+ };
+}
+customElements.define("chat-input", ChatInput);
diff --git a/olaw/static/components/InspectDialog.css b/olaw/static/components/InspectDialog.css
new file mode 100644
index 0000000..be3d49b
--- /dev/null
+++ b/olaw/static/components/InspectDialog.css
@@ -0,0 +1,31 @@
+inspect-dialog {
+ display: block;
+}
+
+inspect-dialog dialog {
+ width: 100ch;
+ height: 90dvh;
+}
+
+inspect-dialog dialog[open] {
+ display: flex;
+ flex-direction: column;
+}
+
+inspect-dialog dialog > p {
+ margin-bottom: 1rem;
+}
+
+inspect-dialog dialog #logs {
+ display: block;
+ height: auto;
+ overflow: scroll;
+ white-space: pre-line;
+ padding: 1rem;
+ line-height: 1.5rem;
+ color: var(--color);
+ background-color: var(--background);
+ font-family: 'Courier New', Courier, monospace;
+ border: 1px solid var(--color--);
+ border-radius: 0.25rem;
+}
diff --git a/olaw/static/components/InspectDialog.js b/olaw/static/components/InspectDialog.js
new file mode 100644
index 0000000..6f7f19b
--- /dev/null
+++ b/olaw/static/components/InspectDialog.js
@@ -0,0 +1,105 @@
+import { state } from "../state.js";
+
+/**
+ * UI Element containing:
+ * - Journal of interactions between user and API
+ *
+ * Hoists `log()` method to state via `state.log()`.
+ */
+export class InspectDialog extends HTMLElement {
+ connectedCallback() {
+ // Enforce singleton
+ for (const node of [...document.querySelectorAll("chat-flow")].slice(1)) {
+ node.remove();
+ }
+
+ this.renderInnerHTML();
+ this.logTextCompletionPrompt();
+ this.logExtractSearchStatementPrompt();
+
+ // Event listener for "close"
+ this.querySelector(".close").addEventListener("click", this.close);
+
+ // Hoist log function to state.log
+ state.log = this.log;
+ }
+
+ /**
+ * Add given text to logs.
+ * Automatically adds system info if state.processing is `true`.
+ * @param {String} text - Text to be logged.
+ * @param {?String} title - Title for log section.
+ * @returns {void}
+ */
+ log = (text, title = "") => {
+ let output = "";
+
+ output += "----------------------------------------\n";
+ output += title ? `${title}\n` : ``;
+ output += `${new Date()}\n`;
+ output += "----------------------------------------\n";
+
+ if (state.processing) {
+ output += `Model: ${state.model}\n`;
+ output += `Temperature: ${state.temperature}\n`;
+ output += `Search Statement: ${state.searchStatement}\n`;
+ output += `Search Target: ${state.searchTarget}\n\n`;
+ }
+
+ output += `${text}\n\n`;
+
+ this.querySelector("#logs").textContent += output;
+ console.log(output);
+ };
+
+ /**
+ * Adds text completion prompt transcript to logs.
+ * @return {void}
+ */
+ logTextCompletionPrompt = () => {
+ let prompt = state.basePrompt.trim();
+ prompt = prompt.replace("{history}", state.historyPrompt.trim());
+ prompt = prompt.replace("{rag}", state.ragPrompt.trim());
+ prompt = prompt.trim();
+ this.log(prompt, "Transcript of the text completion prompt");
+ };
+
+ /**
+ * Adds search statement extraction prompt transcript to logs.
+ * @returns {void}
+ */
+ logExtractSearchStatementPrompt = () => {
+ let prompt = state.extractSearchStatementPrompt.trim();
+ this.log(prompt, "Transcript of the search statement extraction prompt");
+ };
+
+ /**
+ * Opens underlying ``
+ * @returns {void}
+ */
+ open = () => {
+ this.querySelector("dialog").showModal();
+ };
+
+ /**
+ * Closes underlying ``
+ * @returns {void}
+ */
+ close = () => {
+ this.querySelector("dialog").close();
+ };
+
+ renderInnerHTML = () => {
+ this.innerHTML = /*html*/ `
+
+ Close
+ Inspect Session
+
+ This information is also available in the browser's JavaScript console.
+
+
+
+ `;
+ };
+}
+customElements.define("inspect-dialog", InspectDialog);
diff --git a/olaw/static/components/SettingsDialog.css b/olaw/static/components/SettingsDialog.css
new file mode 100644
index 0000000..81252a0
--- /dev/null
+++ b/olaw/static/components/SettingsDialog.css
@@ -0,0 +1,7 @@
+settings-dialog {
+ display: block;
+}
+
+settings-dialog dialog {
+ width: 60ch;
+}
diff --git a/olaw/static/components/SettingsDialog.js b/olaw/static/components/SettingsDialog.js
new file mode 100644
index 0000000..63af09d
--- /dev/null
+++ b/olaw/static/components/SettingsDialog.js
@@ -0,0 +1,108 @@
+import { state } from "../state.js";
+
+/**
+ * UI Element containing:
+ * - "model" setting
+ * - "temperature" setting
+ * - "max_tokens" setting
+ *
+ * Automatically populates:
+ * - `state.model` (on change)
+ * - `state.temperature` (on change)
+ * - `state.max_tokens` (on key up)
+ */
+export class SettingsDialog extends HTMLElement {
+ connectedCallback() {
+ // Enforce singleton
+ for (const node of [...document.querySelectorAll("chat-flow")].slice(1)) {
+ node.remove();
+ }
+
+ this.renderInnerHTML();
+
+ // Event listener for "model" select
+ this.querySelector("#model").addEventListener("change", (e) => {
+ if (state.availableModels.includes(e.target.value)) {
+ state.model = e.target.value;
+ }
+ });
+
+ // Event listener for "temperature" select
+ this.querySelector("#temperature").addEventListener("change", (e) => {
+ const temperature = parseFloat(e.target.value) * 10;
+
+ if (temperature < 0 || temperature > 20) {
+ return;
+ }
+
+ state.temperature = (temperature / 10).toFixed(1);
+ });
+
+ // Event listener for "max_tokens"
+ this.querySelector("#max_tokens").addEventListener("keyup", (e) => {
+ state.maxTokens = parseInt(e.target.value);
+ });
+
+ // Event listener for "close"
+ this.querySelector(".close").addEventListener("click", this.close);
+ }
+
+ /**
+ * Opens underlying ``
+ * @returns {void}
+ */
+ open = () => {
+ this.querySelector("dialog").showModal();
+ };
+
+ /**
+ * Closes underlying ``
+ * @returns {void}
+ */
+ close = () => {
+ this.querySelector("dialog").close();
+ };
+
+ renderInnerHTML = () => {
+ let modelSelectOptions = /*html*/ ``;
+ let temperatureSelectOptions = /*html*/ ``;
+
+ for (const model of state.availableModels) {
+ modelSelectOptions += /*html*/ `
+
+ ${model}
+
+ `;
+ }
+
+ for (let i = 0; i < 21; i++) {
+ const temperature = (i / 10).toFixed(1);
+ temperatureSelectOptions += /*html*/ `${temperature} `;
+ }
+
+ this.innerHTML = /*html*/ `
+
+ Close
+ Settings
+
+
+ Model
+
+ ${modelSelectOptions}
+
+
+ Temperature
+
+ ${temperatureSelectOptions}
+
+
+ Max tokens
+
+
+
+ `;
+ };
+}
+customElements.define("settings-dialog", SettingsDialog);
diff --git a/olaw/static/images/lil.svg b/olaw/static/images/lil.svg
new file mode 100644
index 0000000..17a2466
--- /dev/null
+++ b/olaw/static/images/lil.svg
@@ -0,0 +1,171 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/olaw/static/images/logo.svg b/olaw/static/images/logo.svg
new file mode 100644
index 0000000..ccc4d02
--- /dev/null
+++ b/olaw/static/images/logo.svg
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/olaw/static/index.css b/olaw/static/index.css
new file mode 100644
index 0000000..90689f5
--- /dev/null
+++ b/olaw/static/index.css
@@ -0,0 +1,230 @@
+:root {
+ --font-family: 'Helvetica Neue', Helvetica, Arial, Sans-serif;
+ --font-family-alt: Georgia, Times, "Times New Roman", serif;
+
+ --background: #F7F6F2;
+ --background-: #e2e2e2;
+ --background--: #cfcfcf;
+
+ --color: #121212;
+ --color-: #2a2a2a;
+ --color--: #4c4c4c;
+
+ --primary: #F4D24B;
+ --primary-: #D6B42D;
+ --primary--: #bb9c20;
+}
+
+* {
+ padding: 0px;
+ margin: 0px;
+ box-sizing: border-box;
+ outline-color: var(--primary-);
+}
+
+html {
+ color: var(--color);
+ font-size: 16px;
+ font-family: var(--font-family);
+ font-weight: normal;
+ background-color: var(--background);
+}
+
+@media (max-width: 381px) {
+ html {
+ font-size: 14px;
+ }
+}
+
+body {
+}
+
+a {
+ color: var(--color--);
+}
+
+a:hover {
+ color: var(--color--);
+ text-decoration: none;
+}
+
+.hidden {
+ display: none !important;
+}
+
+/*------------------------------------------------------------------------------
+ * Forms (generic)
+ -----------------------------------------------------------------------------*/
+label {
+ display: block;
+ margin-bottom: 0.35rem;
+ font-weight: normal;
+ margin-top: 1rem;
+}
+
+select, input[type="text"], input[type="number"], textarea {
+ display: block;
+ width: 100%;
+ padding: 0.45rem;
+ font-size: 0.85rem;
+ border: 1px solid var(--color--);
+ border-radius: 0.25rem;
+ margin-bottom: 0.65rem;
+ -moz-appearance: none;
+ -webkit-appearance: none;
+ appearance: none;
+ font-family: var(--font-family);
+}
+
+textarea {
+ font-size: 1rem;
+ padding: 0.75rem;
+ resize: none;
+ line-height: 1.5rem;
+}
+
+select {
+ background-image: url('data:image/svg+xml;charset=US-ASCII,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20width%3D%22292.4%22%20height%3D%22292.4%22%3E%3Cpath%20fill%3D%22%23121212%22%20d%3D%22M287%2069.4a17.6%2017.6%200%200%200-13-5.4H18.4c-5%200-9.3%201.8-12.9%205.4A17.6%2017.6%200%200%200%200%2082.2c0%205%201.8%209.3%205.4%2012.9l128%20127.9c3.6%203.6%207.8%205.4%2012.8%205.4s9.2-1.8%2012.8-5.4L287%2095c3.5-3.5%205.4-7.8%205.4-12.8%200-5-1.9-9.2-5.5-12.8z%22%2F%3E%3C%2Fsvg%3E');
+ background-repeat: no-repeat, repeat;
+ background-position: right .7em top 50%, 0 0;
+ background-size: .65em auto, 100%;
+}
+
+button {
+ background-color: var(--color);
+ color: var(--background);
+ font-family: var(--font-family);
+ font-weight: bold;
+ padding: 0.75rem;
+ padding-left: 1rem;
+ padding-right: 1rem;
+ font-size: 1rem;
+ border-radius: 0.5rem;
+ border: 0px;
+ transition: all 0.25s ease-in-out;
+ cursor: pointer;
+}
+
+button.hollow {
+ background-color: transparent;
+ font-weight: normal;
+ text-decoration: underline;
+ color: var(--color-);
+}
+
+button span {
+ font-weight: bold;
+}
+
+button:not(:disabled):hover {
+ text-decoration: underline;
+}
+
+button.hollow:not(:disabled):hover {
+ text-decoration: none;
+}
+
+button:disabled {
+ background-color: var(--color-);
+ cursor: not-allowed;
+}
+
+button.hollow:disabled {
+ background-color: transparent;
+ font-weight: normal;
+ color: var(--color--);
+ text-decoration: none;
+}
+
+/*------------------------------------------------------------------------------
+ * Dialog (generic)
+ -----------------------------------------------------------------------------*/
+ dialog {
+ width: 85ch;
+ max-width: 90%;
+ max-height: 80dvh;
+ margin: auto;
+ border: 0px;
+ padding: 2rem;
+ padding-top: 1rem;
+ border-radius: 0.25rem;
+ background-color: var(--color);
+ color: var(--background);
+ border: 1px solid var(--color--);
+}
+
+dialog::backdrop {
+ background-color: rgba(0, 0, 0, 0.65);
+}
+
+/* "Close" button */
+dialog button.close {
+ position: absolute;
+ top: 1.65rem;
+ right: 1.5rem;
+ z-index: 2;
+ background-color: transparent;
+ color: var(--background);
+ text-decoration: underline;
+}
+
+dialog button.close:hover {
+ text-decoration: none;
+}
+
+dialog h2 {
+ position: relative;
+ border-bottom: 1px solid var(--color-);
+ padding-top: 1rem;
+ padding-bottom: 1rem;
+ margin-bottom: 1.5rem;
+ font-weight: normal;
+}
+
+
+/*------------------------------------------------------------------------------
+ * Layout
+ -----------------------------------------------------------------------------*/
+body > main {
+ width: 100%;
+
+ height: 100vh;
+ height: 100dvh;
+ max-width: 80ch;
+
+ margin: auto;
+ padding: 2rem;
+ display: flex;
+ flex-direction: column;
+ justify-content: space-between;
+}
+
+@media (max-width: 759px) {
+ body > main {
+ padding: 1rem;
+ }
+}
+
+body > main #chat {
+ display: flex;
+ height: 100%;
+ flex-direction: column;
+ justify-content: space-between;
+}
+
+body > main footer {
+ padding-top: 1rem;
+ padding-bottom: 1rem;
+ text-align: center;
+}
+
+body > main footer p {
+ font-size: 0.85rem;
+ opacity: 0.8;
+ line-height: 1.5rem;
+}
+
+body > main footer img {
+ max-width: 35ch;
+ padding-top: 0.5rem;
+}
\ No newline at end of file
diff --git a/olaw/static/state.js b/olaw/static/state.js
new file mode 100644
index 0000000..5dd8ec5
--- /dev/null
+++ b/olaw/static/state.js
@@ -0,0 +1,49 @@
+const constants = window.OPEN_LEGAL_RAG_CONST;
+
+/**
+ * @typedef {object} OpenLegalRagState - App-wide "state". All components are assumed to be able to read and write from this object.
+ * @property {boolean} processing - If `true`, the app is considered "busy". Used to control UI state.
+ * @property {boolean} streaming - If `true`, the app is currently streaming content. Used to control UI state.
+ * @property {?string} searchStatement - Latest `search_target` returned by the API (`/api/extract-search-statement`).
+ * @property {?string} searchTarget - Latest `search_target` returned by the API (`/api/extract-search-statement`).
+ * @property {object} searchResults - Latest output from `/api/search`.
+ * @property {?string} message - Latest message typed by the user.
+ * @property {?string} model - Latest model picked by the user.
+ * @property {?Number} maxTokens - Latest value picked by the user for "max tokens".
+ * @property {{role: string, content: string}[]} history - Keeps track of "basic" chat history. To be fed back to the API with each exchange.
+ * @property {?function} log - Shortcut for InspectDialog.log(text, title).
+ * @property {string} basePrompt - Transcript of the base prompt.
+ * @property {string} historyPrompt - Transcript of the history part of the prompt.
+ * @property {string} ragPrompt - Transcript of the RAG (context) part of the prompt.
+ * @property {string} extractSearchStatementPrompt - Transcript of the prompt used to extract search statement.
+ * @property {string[]} availableModels - List of models that can be used.
+ * @property {string} defaultModel - Model to be used by default.
+ * @property {string[]} availableSearchTargets - List of valid search targets.
+ */
+
+/**
+ * Basic "state" object used across the app to share data.
+ * @type {OpenLegalRagState}
+ */
+export const state = {
+ processing: false,
+ streaming: false,
+ searchStatement: "",
+ searchTarget: "",
+ searchResults: {},
+ message: null,
+ model: constants.default_model,
+ temperature: 0.0,
+ maxTokens: null,
+ history: [],
+
+ log: () => {},
+
+ basePrompt: constants.text_completion_base_prompt,
+ historyPrompt: constants.text_completion_history_prompt,
+ ragPrompt: constants.text_completion_rag_prompt,
+ extractSearchStatementPrompt: constants.extract_search_statement_prompt,
+ availableModels: constants.available_models,
+ defaultModel: constants.default_model,
+ availableSearchTargets: constants.available_search_targets,
+};
diff --git a/olaw/templates/index.html b/olaw/templates/index.html
new file mode 100644
index 0000000..24285c8
--- /dev/null
+++ b/olaw/templates/index.html
@@ -0,0 +1,68 @@
+
+
+
+ Open Legal AI Workbench (OLAW)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ OLAW
+ is an open-source experiment by the
+ Harvard Library Innovation Lab .
+
+
+
+ This is an experimental tool for evaluating legal retrieval software
+ and should not be used for legal advice.
+
+
+
+
+
+
+
+
diff --git a/olaw/utils/__init__.py b/olaw/utils/__init__.py
new file mode 100644
index 0000000..cd29cf4
--- /dev/null
+++ b/olaw/utils/__init__.py
@@ -0,0 +1,3 @@
+from .check_env import check_env
+from .list_available_models import list_available_models
+from .get_limiter import get_limiter
diff --git a/olaw/utils/check_env.py b/olaw/utils/check_env.py
new file mode 100644
index 0000000..1255de5
--- /dev/null
+++ b/olaw/utils/check_env.py
@@ -0,0 +1,28 @@
+import os
+
+
+def check_env() -> bool:
+ """
+ Checks that required env variables are available.
+ Throws if properties are missing or unusable.
+ """
+ environ = os.environ
+
+ for prop in [
+ "RATE_LIMIT_STORAGE_URI",
+ "API_MODELS_RATE_LIMIT",
+ "API_EXTRACT_SEARCH_STATEMENT_RATE_LIMIT",
+ "API_SEARCH_RATE_LIMIT",
+ "API_COMPLETE_RATE_LIMIT",
+ "COURT_LISTENER_MAX_RESULTS",
+ "EXTRACT_SEARCH_STATEMENT_PROMPT",
+ "COURT_LISTENER_API_URL",
+ "COURT_LISTENER_BASE_URL",
+ "TEXT_COMPLETION_BASE_PROMPT",
+ "TEXT_COMPLETION_RAG_PROMPT",
+ "TEXT_COMPLETION_HISTORY_PROMPT",
+ ]:
+ if prop not in environ:
+ raise Exception(f"env var {prop} must be defined.")
+
+ return True
diff --git a/olaw/utils/get_limiter.py b/olaw/utils/get_limiter.py
new file mode 100644
index 0000000..f52e6a4
--- /dev/null
+++ b/olaw/utils/get_limiter.py
@@ -0,0 +1,19 @@
+import os
+
+from flask import current_app, jsonify, make_response
+
+from flask_limiter import Limiter
+from flask_limiter.util import get_remote_address
+
+
+def get_limiter():
+ """
+ Returns instance of the rate limiter.
+ """
+ return Limiter(
+ get_remote_address,
+ app=current_app,
+ default_limits=["120 per hour"],
+ storage_uri=os.environ["RATE_LIMIT_STORAGE_URI"],
+ strategy="moving-window",
+ )
diff --git a/olaw/utils/list_available_models.py b/olaw/utils/list_available_models.py
new file mode 100644
index 0000000..b33116e
--- /dev/null
+++ b/olaw/utils/list_available_models.py
@@ -0,0 +1,48 @@
+import os
+import traceback
+
+from flask import current_app
+from openai import OpenAI
+import ollama
+
+
+def list_available_models() -> list:
+ """
+ Returns a list of the models the pipeline can talk to based on current environment.
+ """
+ models = []
+
+ # Use case: Using OpenAI's client to interact with a non-OpenAI provider.
+ # In that case, the model's name is provided via the environment.
+ if os.environ.get("OPENAI_BASE_URL") and os.environ.get("OPENAI_COMPATIBLE_MODEL"):
+ models.append(os.environ.get("OPENAI_COMPATIBLE_MODEL"))
+
+ # Use case: OpenAI
+ if os.environ.get("OPENAI_API_KEY") and not os.environ.get("OPENAI_BASE_URL"):
+ try:
+ openai_client = OpenAI()
+
+ for model in openai_client.models.list().data:
+ if model.id.startswith("gpt-4"):
+ models.append(f"openai/{model.id}")
+
+ except Exception:
+ current_app.logger.error("Could not list Open AI models.")
+ current_app.logger.error(traceback.format_exc())
+
+ # Use case: Ollama
+ if os.environ.get("OLLAMA_API_URL"):
+ try:
+ ollama_client = ollama.Client(
+ host=os.environ["OLLAMA_API_URL"],
+ timeout=5,
+ )
+
+ for model in ollama_client.list()["models"]:
+ models.append(f"ollama/{model['name']}")
+
+ except Exception:
+ current_app.logger.error("Could not list Ollama models.")
+ current_app.logger.error(traceback.format_exc())
+
+ return models
diff --git a/olaw/views/__init__.py b/olaw/views/__init__.py
new file mode 100644
index 0000000..46ded3b
--- /dev/null
+++ b/olaw/views/__init__.py
@@ -0,0 +1,2 @@
+import olaw.views.ui
+import olaw.views.api
diff --git a/olaw/views/api/__init__.py b/olaw/views/api/__init__.py
new file mode 100644
index 0000000..4dfa4dc
--- /dev/null
+++ b/olaw/views/api/__init__.py
@@ -0,0 +1,4 @@
+from olaw.views.api.complete import post_complete
+from olaw.views.api.extract_search_statement import post_extract_search_statement
+from olaw.views.api.models import get_models
+from olaw.views.api.search import post_search
diff --git a/olaw/views/api/complete.py b/olaw/views/api/complete.py
new file mode 100644
index 0000000..4daeb46
--- /dev/null
+++ b/olaw/views/api/complete.py
@@ -0,0 +1,213 @@
+import os
+import traceback
+
+from flask import current_app, jsonify, request, Response
+from openai import OpenAI
+import ollama
+
+from olaw.utils import list_available_models, get_limiter
+from olaw.search_targets import SEARCH_TARGETS, SearchTarget, CourtListener
+
+
+API_COMPLETE_RATE_LIMIT = os.environ["API_COMPLETE_RATE_LIMIT"]
+
+
+@current_app.route("/api/complete", methods=["POST"])
+@get_limiter().limit(API_COMPLETE_RATE_LIMIT)
+def post_complete():
+ """
+ [POST] /api/complete
+
+ Accepts JSON body with the following properties:
+ - "message": User prompt (required)
+ - "model": One of the models /api/models lists (required)
+ - "temperature": Defaults to 0.0
+ - "search_results": Output from /api/search.
+ - "max_tokens": If provided, caps number of tokens that will be generated in response.
+ - "history": A list of chat completion objects representing the chat history. Each object must contain "user" and "content".
+
+ Example of a "history" list:
+ ```
+ [
+ {"role": "user", "content": "Foo bar"},
+ {"role": "assistant", "content": "Bar baz"}
+ ]
+ ```
+ """
+ available_models = list_available_models()
+
+ input = request.get_json()
+ model = None
+ message = None
+ search_results = {}
+ temperature = 0.0
+ max_tokens = None
+
+ prompt = os.environ["TEXT_COMPLETION_BASE_PROMPT"] # Contains {history} and {rag}
+ rag_prompt = os.environ["TEXT_COMPLETION_RAG_PROMPT"] # Template for {rag}
+ history_prompt = os.environ["TEXT_COMPLETION_HISTORY_PROMPT"] # Template for {history}
+
+ history = [] # Chat completion objects keeping track of exchanges
+
+ #
+ # Check that "model" was provided and is available
+ #
+ if "model" not in input:
+ return jsonify({"error": "No model provided."}), 400
+
+ if input["model"] not in available_models:
+ return jsonify({"error": "Requested model is invalid or not available."}), 400
+
+ model = input["model"]
+
+ #
+ # Check that "message" was provided
+ #
+ if "message" not in input:
+ return jsonify({"error": "No message provided."}), 400
+
+ message = str(input["message"]).strip()
+
+ if not message:
+ return jsonify({"error": "Message cannot be empty."}), 400
+
+ #
+ # Validate "search_results" if provided
+ #
+ if "search_results" in input:
+ try:
+ # Top-level keys must be part of SEARCH_TARGETS
+ for top_level_key in input["search_results"].keys():
+ assert top_level_key in SEARCH_TARGETS
+
+ # Validate base format for each entry
+ for result in input["search_results"][top_level_key]:
+ result_keys = set(result.keys())
+ base_keys = set(SearchTarget.RESULTS_DATA_FORMAT.keys())
+ assert result_keys == base_keys or base_keys.issubset(result_keys)
+
+ search_results = input["search_results"]
+ except Exception:
+ return (
+ jsonify({"error": "search_results must be the output of /api/search."}),
+ 400,
+ )
+
+ #
+ # Validate "temperature" if provided
+ #
+ if "temperature" in input:
+ try:
+ temperature = float(input["temperature"])
+ assert temperature >= 0.0
+ except Exception:
+ return (
+ jsonify({"error": "temperature must be a float superior or equal to 0.0."}),
+ 400,
+ )
+
+ #
+ # Validate "max_tokens" if provided
+ #
+ if "max_tokens" in input and input["max_tokens"] is not None:
+ try:
+ max_tokens = int(input["max_tokens"])
+ assert max_tokens > 0
+ except Exception:
+ return (jsonify({"error": "max_tokens must be an int superior to 0."}), 400)
+
+ #
+ # Validate "history" if provided
+ #
+ if "history" in input:
+ try:
+ for past_message in input["history"]:
+ assert past_message["role"]
+ assert past_message["content"]
+ history.append(past_message)
+
+ except Exception:
+ return (
+ jsonify({"error": "past_messages must be an array of chat completion objects."}),
+ 400,
+ )
+
+ #
+ # Assemble shell prompt
+ #
+ history_txt = ""
+ search_results_txt = ""
+
+ # History
+ for past_message in history:
+ history_txt += f"{past_message['role']}: {past_message['content']}\n"
+
+ if history_txt:
+ history_prompt = history_prompt.replace("{history}", history_txt)
+ prompt = prompt.replace("{history}", history_prompt)
+ else:
+ prompt = prompt.replace("{history}", "")
+
+ #
+ # Assemble context
+ #
+ for search_target in SEARCH_TARGETS:
+ if not search_results.get(search_target):
+ continue
+
+ for result in search_results[search_target]:
+ search_results_txt += result["prompt_text"] + "\n"
+ search_results_txt += result["text"]
+ search_results_txt += "\n\n"
+
+ if search_results_txt:
+ rag_prompt = rag_prompt.replace("{context}", search_results_txt)
+ prompt = prompt.replace("{rag}", rag_prompt)
+ else:
+ prompt = prompt.replace("{rag}", "")
+
+ # Message
+ prompt = prompt.replace("{request}", message)
+ prompt = prompt.strip()
+
+ #
+ # Run completion
+ #
+ try:
+ # Ollama
+ if model.startswith("ollama"):
+
+ ollama_client = ollama.Client(host=os.environ["OLLAMA_API_URL"])
+
+ stream = ollama_client.chat(
+ model=model.replace("ollama/", ""),
+ options={"temperature": temperature},
+ messages=[{"role": "user", "content": prompt}],
+ stream=True,
+ )
+
+ def generate_ollama():
+ for chunk in stream:
+ yield chunk["message"]["content"] or ""
+
+ return Response(generate_ollama(), mimetype="text/plain")
+ # OpenAI / OpenAI-compatible
+ else:
+ openai_client = OpenAI()
+
+ stream = openai_client.chat.completions.create(
+ model=model.replace("openai/", ""),
+ temperature=temperature,
+ max_tokens=max_tokens if max_tokens else None,
+ messages=[{"role": "user", "content": prompt}],
+ stream=True,
+ )
+
+ def generate_openai():
+ for chunk in stream:
+ yield chunk.choices[0].delta.content or ""
+
+ return Response(generate_openai(), mimetype="text/plain")
+ except Exception:
+ current_app.logger.error(traceback.format_exc())
+ return jsonify({"error": f"Could not run completion against {model}."}), 500
diff --git a/olaw/views/api/extract_search_statement.py b/olaw/views/api/extract_search_statement.py
new file mode 100644
index 0000000..0427ea3
--- /dev/null
+++ b/olaw/views/api/extract_search_statement.py
@@ -0,0 +1,130 @@
+import os
+import traceback
+import json
+
+from flask import current_app, jsonify, request
+from openai import OpenAI
+import ollama
+
+from olaw.utils import list_available_models, get_limiter
+
+API_EXTRACT_SEARCH_STATEMENT_RATE_LIMIT = os.environ["API_EXTRACT_SEARCH_STATEMENT_RATE_LIMIT"]
+
+
+@current_app.route("/api/extract-search-statement", methods=["POST"])
+@get_limiter().limit(API_EXTRACT_SEARCH_STATEMENT_RATE_LIMIT)
+def post_extract_search_statement():
+ """
+ [POST] /api/extract-search-statement
+
+ Uses an LLM to analyze a message and, if a legal question is detected:
+ - Indicate what API is best suited for that query
+ - Returns a search statement for said API.
+
+ Edit EXTRACT_SEARCH_STATEMENT_PROMPT to alter behavior.
+
+ Accepts JSON body with the following properties:
+ - "model": One of the models /api/models lists (required)
+ - "message": User prompt (required)
+ - "temperature": Defaults to 0.0
+
+ Returns JSON:
+ - {"search_target": str, "search_statement": str}
+ """
+ available_models = list_available_models()
+ input = request.get_json()
+ model = ""
+ message = ""
+ temperature = 0.0
+ prompt = os.environ["EXTRACT_SEARCH_STATEMENT_PROMPT"]
+ output = ""
+ timeout = 30
+
+ #
+ # Check that "model" was provided and is available
+ #
+ if "model" not in input:
+ return jsonify({"error": "No model provided."}), 400
+
+ if input["model"] not in available_models:
+ return jsonify({"error": "Requested model is invalid or not available."}), 400
+
+ model = input["model"]
+
+ #
+ # Check that "message" was provided
+ #
+ if "message" not in input:
+ return jsonify({"error": "No message provided."}), 400
+
+ message = str(input["message"]).strip()
+
+ if not message:
+ return jsonify({"error": "Message cannot be empty."}), 400
+
+ #
+ # Validate "temperature" if provided
+ #
+ if "temperature" in input:
+ try:
+ temperature = float(input["temperature"])
+ assert temperature >= 0.0
+ except Exception:
+ return (
+ jsonify({"error": "temperature must be a float superior or equal to 0.0."}),
+ 400,
+ )
+
+ #
+ # Ask model to filter out and extract search query
+ #
+ prompt = f"{prompt}\n{message}"
+
+ try:
+ # Ollama
+ if model.startswith("ollama"):
+ ollama_client = ollama.Client(
+ host=os.environ["OLLAMA_API_URL"],
+ timeout=timeout,
+ )
+
+ response = ollama_client.chat(
+ model=model.replace("ollama/", ""),
+ options={"temperature": temperature},
+ format="json",
+ messages=[{"role": "user", "content": prompt}],
+ )
+
+ output = response["message"]["content"]
+ # OpenAI / OpenAI-compatible
+ else:
+ openai_client = OpenAI()
+
+ response = openai_client.chat.completions.create(
+ model=model.replace("openai/", ""),
+ temperature=temperature,
+ messages=[{"role": "user", "content": prompt}],
+ response_format={"type": "json_object"},
+ timeout=timeout,
+ )
+
+ output = json.loads(response.model_dump_json())["choices"][0]["message"]["content"]
+
+ except Exception:
+ current_app.logger.error(traceback.format_exc())
+ return jsonify({"error": f"Could not run completion against {model}."}), 500
+
+ #
+ # Check output format
+ #
+ try:
+ output = json.loads(output)
+ assert "search_statement" in output # Will raise an exception if format is invalid
+ assert isinstance(output["search_statement"], str) or output["search_statement"] is None
+ assert isinstance(output["search_target"], str) or output["search_target"] is None
+ assert len(output.keys()) == 2
+ except Exception:
+ current_app.logger.error(traceback.format_exc())
+ return jsonify({"error": f"{model} returned invalid JSON."}), 500
+
+ return jsonify(output), 200
diff --git a/olaw/views/api/models.py b/olaw/views/api/models.py
new file mode 100644
index 0000000..0b60362
--- /dev/null
+++ b/olaw/views/api/models.py
@@ -0,0 +1,19 @@
+import os
+
+from flask import current_app, jsonify
+from openai import OpenAI
+
+from olaw.utils import list_available_models, get_limiter
+
+API_MODELS_RATE_LIMIT = os.environ["API_MODELS_RATE_LIMIT"]
+
+
+@current_app.route("/api/models")
+@get_limiter().limit(API_MODELS_RATE_LIMIT)
+def get_models():
+ """
+ [GET] /api/models
+
+ Returns a JSON list of available / suitable text completion models.
+ """
+ return jsonify(list_available_models()), 200
diff --git a/olaw/views/api/search.py b/olaw/views/api/search.py
new file mode 100644
index 0000000..361c314
--- /dev/null
+++ b/olaw/views/api/search.py
@@ -0,0 +1,72 @@
+import os
+import traceback
+from flask import current_app, jsonify, request
+
+from olaw.utils import get_limiter
+from olaw.search_targets import SEARCH_TARGETS, route_search
+
+API_SEARCH_RATE_LIMIT = os.environ["API_SEARCH_RATE_LIMIT"]
+
+
+@current_app.route("/api/search", methods=["POST"])
+@get_limiter().limit(API_SEARCH_RATE_LIMIT)
+def post_search():
+ """
+ [POST] /api/search
+
+ Runs a search statement against a legal database and returns up to X results.
+ Target legal API is determined by "search_target".
+ See SEARCH_TARGETS for a list of available targets.
+
+ Accepts JSON body with the following properties, coming from `/api/extract-search-statement`:
+ - "search_statement": Search statement to be used against the search target
+ - "search_target": Determines the search "tool" to be used.
+
+ Returns JSON object in the following format:
+ {
+ "{search_target}": [... results]
+ }
+ """
+ input = request.get_json()
+ search_statement = ""
+ search_target = ""
+ output = {}
+
+ for target in SEARCH_TARGETS:
+ output[target] = []
+
+ #
+ # Check that "search_statement" was provided
+ #
+ if "search_statement" not in input:
+ return jsonify({"error": "No search statement provided."}), 400
+
+ search_statement = str(input["search_statement"]).strip()
+
+ if not search_statement:
+ return jsonify({"error": "Search statement cannot be empty."}), 400
+
+ #
+ # Check that "search_target" was provided and valid
+ #
+ if "search_target" not in input:
+ return jsonify({"error": "No search target provided."}), 400
+
+ search_target = str(input["search_target"]).strip()
+
+ if not search_target:
+ return jsonify({"error": "Search target cannot be empty."}), 400
+
+ if search_target not in SEARCH_TARGETS:
+ return jsonify({"error": f"Search target can only be: {','.join(SEARCH_TARGETS)}."}), 400
+
+ #
+ # "search_target" routing
+ #
+ try:
+ output[search_target] = route_search(search_target, search_statement)
+ except Exception:
+ current_app.logger.error(traceback.format_exc())
+ return jsonify({"error": f"Could not search for court opinions on {search_target}."}), 500
+
+ return jsonify(output), 200
diff --git a/olaw/views/ui.py b/olaw/views/ui.py
new file mode 100644
index 0000000..d4fb9e1
--- /dev/null
+++ b/olaw/views/ui.py
@@ -0,0 +1,46 @@
+import os
+import json
+
+from flask import current_app, render_template
+
+from olaw.search_targets import SEARCH_TARGETS
+from olaw.utils import list_available_models
+
+
+@current_app.route("/")
+def get_root():
+ """
+ [GET] /
+ Renders main page.
+ """
+ available_models = list_available_models()
+ default_model = ""
+
+ #
+ # Select default model
+ #
+ if "openai/gpt-4-turbo-preview" in available_models:
+ default_model = "openai/gpt-4-turbo-preview"
+
+ if not default_model:
+ for model in available_models:
+ if model.startswith("ollama/mixtral"):
+ default_model = model
+
+ #
+ # Compile consts to be passed to app
+ #
+ app_consts = {
+ "available_search_targets": SEARCH_TARGETS,
+ "available_models": available_models,
+ "default_model": default_model,
+ "extract_search_statement_prompt": os.environ["EXTRACT_SEARCH_STATEMENT_PROMPT"],
+ "text_completion_base_prompt": os.environ["TEXT_COMPLETION_BASE_PROMPT"],
+ "text_completion_rag_prompt": os.environ["TEXT_COMPLETION_RAG_PROMPT"],
+ "text_completion_history_prompt": os.environ["TEXT_COMPLETION_HISTORY_PROMPT"],
+ }
+
+ return (
+ render_template("index.html", app_consts=json.dumps(app_consts)),
+ 200,
+ )
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..a569f0b
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,1035 @@
+# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
+
+[[package]]
+name = "annotated-types"
+version = "0.6.0"
+description = "Reusable constraint types to use with typing.Annotated"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"},
+ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"},
+]
+
+[[package]]
+name = "anyio"
+version = "4.2.0"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"},
+ {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"},
+]
+
+[package.dependencies]
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (>=0.23)"]
+
+[[package]]
+name = "black"
+version = "23.12.1"
+description = "The uncompromising code formatter."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "black-23.12.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0aaf6041986767a5e0ce663c7a2f0e9eaf21e6ff87a5f95cbf3675bfd4c41d2"},
+ {file = "black-23.12.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c88b3711d12905b74206227109272673edce0cb29f27e1385f33b0163c414bba"},
+ {file = "black-23.12.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a920b569dc6b3472513ba6ddea21f440d4b4c699494d2e972a1753cdc25df7b0"},
+ {file = "black-23.12.1-cp310-cp310-win_amd64.whl", hash = "sha256:3fa4be75ef2a6b96ea8d92b1587dd8cb3a35c7e3d51f0738ced0781c3aa3a5a3"},
+ {file = "black-23.12.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8d4df77958a622f9b5a4c96edb4b8c0034f8434032ab11077ec6c56ae9f384ba"},
+ {file = "black-23.12.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:602cfb1196dc692424c70b6507593a2b29aac0547c1be9a1d1365f0d964c353b"},
+ {file = "black-23.12.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c4352800f14be5b4864016882cdba10755bd50805c95f728011bcb47a4afd59"},
+ {file = "black-23.12.1-cp311-cp311-win_amd64.whl", hash = "sha256:0808494f2b2df923ffc5723ed3c7b096bd76341f6213989759287611e9837d50"},
+ {file = "black-23.12.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:25e57fd232a6d6ff3f4478a6fd0580838e47c93c83eaf1ccc92d4faf27112c4e"},
+ {file = "black-23.12.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2d9e13db441c509a3763a7a3d9a49ccc1b4e974a47be4e08ade2a228876500ec"},
+ {file = "black-23.12.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d1bd9c210f8b109b1762ec9fd36592fdd528485aadb3f5849b2740ef17e674e"},
+ {file = "black-23.12.1-cp312-cp312-win_amd64.whl", hash = "sha256:ae76c22bde5cbb6bfd211ec343ded2163bba7883c7bc77f6b756a1049436fbb9"},
+ {file = "black-23.12.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1fa88a0f74e50e4487477bc0bb900c6781dbddfdfa32691e780bf854c3b4a47f"},
+ {file = "black-23.12.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4d6a9668e45ad99d2f8ec70d5c8c04ef4f32f648ef39048d010b0689832ec6d"},
+ {file = "black-23.12.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b18fb2ae6c4bb63eebe5be6bd869ba2f14fd0259bda7d18a46b764d8fb86298a"},
+ {file = "black-23.12.1-cp38-cp38-win_amd64.whl", hash = "sha256:c04b6d9d20e9c13f43eee8ea87d44156b8505ca8a3c878773f68b4e4812a421e"},
+ {file = "black-23.12.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e1b38b3135fd4c025c28c55ddfc236b05af657828a8a6abe5deec419a0b7055"},
+ {file = "black-23.12.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4f0031eaa7b921db76decd73636ef3a12c942ed367d8c3841a0739412b260a54"},
+ {file = "black-23.12.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97e56155c6b737854e60a9ab1c598ff2533d57e7506d97af5481141671abf3ea"},
+ {file = "black-23.12.1-cp39-cp39-win_amd64.whl", hash = "sha256:dd15245c8b68fe2b6bd0f32c1556509d11bb33aec9b5d0866dd8e2ed3dba09c2"},
+ {file = "black-23.12.1-py3-none-any.whl", hash = "sha256:78baad24af0f033958cad29731e27363183e140962595def56423e626f4bee3e"},
+ {file = "black-23.12.1.tar.gz", hash = "sha256:4ce3ef14ebe8d9509188014d96af1c456a910d5b5cbf434a09fef7e024b3d0d5"},
+]
+
+[package.dependencies]
+click = ">=8.0.0"
+mypy-extensions = ">=0.4.3"
+packaging = ">=22.0"
+pathspec = ">=0.9.0"
+platformdirs = ">=2"
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"]
+jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "blinker"
+version = "1.7.0"
+description = "Fast, simple object-to-object and broadcast signaling"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "blinker-1.7.0-py3-none-any.whl", hash = "sha256:c3f865d4d54db7abc53758a01601cf343fe55b84c1de4e3fa910e420b438d5b9"},
+ {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"},
+]
+
+[[package]]
+name = "certifi"
+version = "2024.2.2"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"},
+ {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "deprecated"
+version = "1.2.14"
+description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"},
+ {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"},
+]
+
+[package.dependencies]
+wrapt = ">=1.10,<2"
+
+[package.extras]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"]
+
+[[package]]
+name = "distro"
+version = "1.9.0"
+description = "Distro - an OS platform information API"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
+ {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
+]
+
+[[package]]
+name = "flake8"
+version = "6.1.0"
+description = "the modular source code checker: pep8 pyflakes and co"
+optional = false
+python-versions = ">=3.8.1"
+files = [
+ {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"},
+ {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"},
+]
+
+[package.dependencies]
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.11.0,<2.12.0"
+pyflakes = ">=3.1.0,<3.2.0"
+
+[[package]]
+name = "flask"
+version = "3.0.2"
+description = "A simple framework for building complex web applications."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "flask-3.0.2-py3-none-any.whl", hash = "sha256:3232e0e9c850d781933cf0207523d1ece087eb8d87b23777ae38456e2fbe7c6e"},
+ {file = "flask-3.0.2.tar.gz", hash = "sha256:822c03f4b799204250a7ee84b1eddc40665395333973dfb9deebfe425fefcb7d"},
+]
+
+[package.dependencies]
+blinker = ">=1.6.2"
+click = ">=8.1.3"
+itsdangerous = ">=2.1.2"
+Jinja2 = ">=3.1.2"
+Werkzeug = ">=3.0.0"
+
+[package.extras]
+async = ["asgiref (>=3.2)"]
+dotenv = ["python-dotenv"]
+
+[[package]]
+name = "flask-limiter"
+version = "3.5.1"
+description = "Rate limiting for flask applications"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "Flask-Limiter-3.5.1.tar.gz", hash = "sha256:8117e1040e5d5c31bf667d3b649fcba325f979d814a3d76a3a2331c3eab63c5e"},
+ {file = "Flask_Limiter-3.5.1-py3-none-any.whl", hash = "sha256:d40526719994197da180caa870ba01e722ed6a70a75790021638dbf29aae82ee"},
+]
+
+[package.dependencies]
+Flask = ">=2"
+limits = ">=2.8"
+ordered-set = ">4,<5"
+rich = ">=12,<14"
+typing-extensions = ">=4"
+
+[package.extras]
+memcached = ["limits[memcached]"]
+mongodb = ["limits[mongodb]"]
+redis = ["limits[redis]"]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "html2text"
+version = "2020.1.16"
+description = "Turn HTML into equivalent Markdown-structured text."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "html2text-2020.1.16-py3-none-any.whl", hash = "sha256:c7c629882da0cf377d66f073329ccf34a12ed2adf0169b9285ae4e63ef54c82b"},
+ {file = "html2text-2020.1.16.tar.gz", hash = "sha256:e296318e16b059ddb97f7a8a1d6a5c1d7af4544049a01e261731d2d5cc277bbb"},
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.3"
+description = "A minimal low-level HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpcore-1.0.3-py3-none-any.whl", hash = "sha256:9a6a501c3099307d9fd76ac244e08503427679b1e81ceb1d922485e2f2462ad2"},
+ {file = "httpcore-1.0.3.tar.gz", hash = "sha256:5c0f9546ad17dac4d0772b0808856eb616eb8b48ce94f49ed819fd6982a8a544"},
+]
+
+[package.dependencies]
+certifi = "*"
+h11 = ">=0.13,<0.15"
+
+[package.extras]
+asyncio = ["anyio (>=4.0,<5.0)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+trio = ["trio (>=0.22.0,<0.24.0)"]
+
+[[package]]
+name = "httpx"
+version = "0.25.2"
+description = "The next generation HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"},
+ {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"},
+]
+
+[package.dependencies]
+anyio = "*"
+certifi = "*"
+httpcore = "==1.*"
+idna = "*"
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "idna"
+version = "3.6"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
+ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
+]
+
+[[package]]
+name = "importlib-resources"
+version = "6.1.1"
+description = "Read resources from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"},
+ {file = "importlib_resources-6.1.1.tar.gz", hash = "sha256:3893a00122eafde6894c59914446a512f728a0c1a45f9bb9b63721b6bacf0b4a"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"]
+
+[[package]]
+name = "itsdangerous"
+version = "2.1.2"
+description = "Safely pass data to untrusted environments and back."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"},
+ {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"},
+]
+
+[[package]]
+name = "jinja2"
+version = "3.1.3"
+description = "A very fast and expressive template engine."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"},
+ {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "limits"
+version = "3.9.0"
+description = "Rate limiting utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "limits-3.9.0-py3-none-any.whl", hash = "sha256:6dce07d1a4d7bd3361d36f59f3f43c4f39675001daeeae2617c3be42d718daa8"},
+ {file = "limits-3.9.0.tar.gz", hash = "sha256:7b44aa4d05c539276928372681190136914958cccbb99c30ecc5df72a179661a"},
+]
+
+[package.dependencies]
+deprecated = ">=1.2"
+importlib-resources = ">=1.3"
+packaging = ">=21,<24"
+typing-extensions = "*"
+
+[package.extras]
+all = ["aetcd", "coredis (>=3.4.0,<5)", "emcache (>=0.6.1)", "emcache (>=1)", "etcd3", "motor (>=3,<4)", "pymemcache (>3,<5.0.0)", "pymongo (>4.1,<5)", "redis (>3,!=4.5.2,!=4.5.3,<6.0.0)", "redis (>=4.2.0,!=4.5.2,!=4.5.3)"]
+async-etcd = ["aetcd"]
+async-memcached = ["emcache (>=0.6.1)", "emcache (>=1)"]
+async-mongodb = ["motor (>=3,<4)"]
+async-redis = ["coredis (>=3.4.0,<5)"]
+etcd = ["etcd3"]
+memcached = ["pymemcache (>3,<5.0.0)"]
+mongodb = ["pymongo (>4.1,<5)"]
+redis = ["redis (>3,!=4.5.2,!=4.5.3,<6.0.0)"]
+rediscluster = ["redis (>=4.2.0,!=4.5.2,!=4.5.3)"]
+
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
+ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
+]
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+benchmarking = ["psutil", "pytest", "pytest-benchmark"]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
+linkify = ["linkify-it-py (>=1,<3)"]
+plugins = ["mdit-py-plugins"]
+profiling = ["gprof2dot"]
+rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "markupsafe"
+version = "2.1.5"
+description = "Safely add untrusted strings to HTML/XML markup."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"},
+ {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"},
+ {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"},
+ {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"},
+ {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"},
+ {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"},
+ {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"},
+ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"},
+]
+
+[[package]]
+name = "mccabe"
+version = "0.7.0"
+description = "McCabe checker, plugin for flake8"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "mypy-extensions"
+version = "1.0.0"
+description = "Type system extensions for programs checked with the mypy type checker."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
+]
+
+[[package]]
+name = "ollama"
+version = "0.1.6"
+description = "The official Python client for Ollama."
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "ollama-0.1.6-py3-none-any.whl", hash = "sha256:e37f0455025ed5846879551ca2030ec93a71a823395d3517c14d71479ccbdd11"},
+ {file = "ollama-0.1.6.tar.gz", hash = "sha256:6636ff75ae54ac076522dcdc40678b141208325d1cc5d85785559f197b1107de"},
+]
+
+[package.dependencies]
+httpx = ">=0.25.2,<0.26.0"
+
+[[package]]
+name = "openai"
+version = "1.12.0"
+description = "The official Python library for the openai API"
+optional = false
+python-versions = ">=3.7.1"
+files = [
+ {file = "openai-1.12.0-py3-none-any.whl", hash = "sha256:a54002c814e05222e413664f651b5916714e4700d041d5cf5724d3ae1a3e3481"},
+ {file = "openai-1.12.0.tar.gz", hash = "sha256:99c5d257d09ea6533d689d1cc77caa0ac679fa21efef8893d8b0832a86877f1b"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+tqdm = ">4"
+typing-extensions = ">=4.7,<5"
+
+[package.extras]
+datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
+
+[[package]]
+name = "ordered-set"
+version = "4.1.0"
+description = "An OrderedSet is a custom MutableSet that remembers its order, so that every"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ordered-set-4.1.0.tar.gz", hash = "sha256:694a8e44c87657c59292ede72891eb91d34131f6531463aab3009191c77364a8"},
+ {file = "ordered_set-4.1.0-py3-none-any.whl", hash = "sha256:046e1132c71fcf3330438a539928932caf51ddbc582496833e23de611de14562"},
+]
+
+[package.extras]
+dev = ["black", "mypy", "pytest"]
+
+[[package]]
+name = "packaging"
+version = "23.2"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
+]
+
+[[package]]
+name = "pathspec"
+version = "0.12.1"
+description = "Utility library for gitignore style pattern matching of file paths."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"},
+ {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"},
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.2.0"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"},
+ {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"]
+
+[[package]]
+name = "pycodestyle"
+version = "2.11.1"
+description = "Python style guide checker"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"},
+ {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"},
+]
+
+[[package]]
+name = "pydantic"
+version = "2.6.1"
+description = "Data validation using Python type hints"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic-2.6.1-py3-none-any.whl", hash = "sha256:0b6a909df3192245cb736509a92ff69e4fef76116feffec68e93a567347bae6f"},
+ {file = "pydantic-2.6.1.tar.gz", hash = "sha256:4fd5c182a2488dc63e6d32737ff19937888001e2a6d86e94b3f233104a5d1fa9"},
+]
+
+[package.dependencies]
+annotated-types = ">=0.4.0"
+pydantic-core = "2.16.2"
+typing-extensions = ">=4.6.1"
+
+[package.extras]
+email = ["email-validator (>=2.0.0)"]
+
+[[package]]
+name = "pydantic-core"
+version = "2.16.2"
+description = ""
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pydantic_core-2.16.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3fab4e75b8c525a4776e7630b9ee48aea50107fea6ca9f593c98da3f4d11bf7c"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8bde5b48c65b8e807409e6f20baee5d2cd880e0fad00b1a811ebc43e39a00ab2"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2924b89b16420712e9bb8192396026a8fbd6d8726224f918353ac19c4c043d2a"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16aa02e7a0f539098e215fc193c8926c897175d64c7926d00a36188917717a05"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:936a787f83db1f2115ee829dd615c4f684ee48ac4de5779ab4300994d8af325b"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:459d6be6134ce3b38e0ef76f8a672924460c455d45f1ad8fdade36796df1ddc8"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9ee4febb249c591d07b2d4dd36ebcad0ccd128962aaa1801508320896575ef"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40a0bd0bed96dae5712dab2aba7d334a6c67cbcac2ddfca7dbcc4a8176445990"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:870dbfa94de9b8866b37b867a2cb37a60c401d9deb4a9ea392abf11a1f98037b"},
+ {file = "pydantic_core-2.16.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:308974fdf98046db28440eb3377abba274808bf66262e042c412eb2adf852731"},
+ {file = "pydantic_core-2.16.2-cp310-none-win32.whl", hash = "sha256:a477932664d9611d7a0816cc3c0eb1f8856f8a42435488280dfbf4395e141485"},
+ {file = "pydantic_core-2.16.2-cp310-none-win_amd64.whl", hash = "sha256:8f9142a6ed83d90c94a3efd7af8873bf7cefed2d3d44387bf848888482e2d25f"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:406fac1d09edc613020ce9cf3f2ccf1a1b2f57ab00552b4c18e3d5276c67eb11"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce232a6170dd6532096cadbf6185271e4e8c70fc9217ebe105923ac105da9978"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a90fec23b4b05a09ad988e7a4f4e081711a90eb2a55b9c984d8b74597599180f"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8aafeedb6597a163a9c9727d8a8bd363a93277701b7bfd2749fbefee2396469e"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9957433c3a1b67bdd4c63717eaf174ebb749510d5ea612cd4e83f2d9142f3fc8"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0d7a9165167269758145756db43a133608a531b1e5bb6a626b9ee24bc38a8f7"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dffaf740fe2e147fedcb6b561353a16243e654f7fe8e701b1b9db148242e1272"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f8ed79883b4328b7f0bd142733d99c8e6b22703e908ec63d930b06be3a0e7113"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cf903310a34e14651c9de056fcc12ce090560864d5a2bb0174b971685684e1d8"},
+ {file = "pydantic_core-2.16.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46b0d5520dbcafea9a8645a8164658777686c5c524d381d983317d29687cce97"},
+ {file = "pydantic_core-2.16.2-cp311-none-win32.whl", hash = "sha256:70651ff6e663428cea902dac297066d5c6e5423fda345a4ca62430575364d62b"},
+ {file = "pydantic_core-2.16.2-cp311-none-win_amd64.whl", hash = "sha256:98dc6f4f2095fc7ad277782a7c2c88296badcad92316b5a6e530930b1d475ebc"},
+ {file = "pydantic_core-2.16.2-cp311-none-win_arm64.whl", hash = "sha256:ef6113cd31411eaf9b39fc5a8848e71c72656fd418882488598758b2c8c6dfa0"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:88646cae28eb1dd5cd1e09605680c2b043b64d7481cdad7f5003ebef401a3039"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7b883af50eaa6bb3299780651e5be921e88050ccf00e3e583b1e92020333304b"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bf26c2e2ea59d32807081ad51968133af3025c4ba5753e6a794683d2c91bf6e"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99af961d72ac731aae2a1b55ccbdae0733d816f8bfb97b41909e143de735f522"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02906e7306cb8c5901a1feb61f9ab5e5c690dbbeaa04d84c1b9ae2a01ebe9379"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5362d099c244a2d2f9659fb3c9db7c735f0004765bbe06b99be69fbd87c3f15"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ac426704840877a285d03a445e162eb258924f014e2f074e209d9b4ff7bf380"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b94cbda27267423411c928208e89adddf2ea5dd5f74b9528513f0358bba019cb"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6db58c22ac6c81aeac33912fb1af0e930bc9774166cdd56eade913d5f2fff35e"},
+ {file = "pydantic_core-2.16.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:396fdf88b1b503c9c59c84a08b6833ec0c3b5ad1a83230252a9e17b7dfb4cffc"},
+ {file = "pydantic_core-2.16.2-cp312-none-win32.whl", hash = "sha256:7c31669e0c8cc68400ef0c730c3a1e11317ba76b892deeefaf52dcb41d56ed5d"},
+ {file = "pydantic_core-2.16.2-cp312-none-win_amd64.whl", hash = "sha256:a3b7352b48fbc8b446b75f3069124e87f599d25afb8baa96a550256c031bb890"},
+ {file = "pydantic_core-2.16.2-cp312-none-win_arm64.whl", hash = "sha256:a9e523474998fb33f7c1a4d55f5504c908d57add624599e095c20fa575b8d943"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ae34418b6b389d601b31153b84dce480351a352e0bb763684a1b993d6be30f17"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:732bd062c9e5d9582a30e8751461c1917dd1ccbdd6cafb032f02c86b20d2e7ec"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b52776a2e3230f4854907a1e0946eec04d41b1fc64069ee774876bbe0eab55"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ef551c053692b1e39e3f7950ce2296536728871110e7d75c4e7753fb30ca87f4"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ebb892ed8599b23fa8f1799e13a12c87a97a6c9d0f497525ce9858564c4575a4"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa6c8c582036275997a733427b88031a32ffa5dfc3124dc25a730658c47a572f"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4ba0884a91f1aecce75202473ab138724aa4fb26d7707f2e1fa6c3e68c84fbf"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7924e54f7ce5d253d6160090ddc6df25ed2feea25bfb3339b424a9dd591688bc"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69a7b96b59322a81c2203be537957313b07dd333105b73db0b69212c7d867b4b"},
+ {file = "pydantic_core-2.16.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7e6231aa5bdacda78e96ad7b07d0c312f34ba35d717115f4b4bff6cb87224f0f"},
+ {file = "pydantic_core-2.16.2-cp38-none-win32.whl", hash = "sha256:41dac3b9fce187a25c6253ec79a3f9e2a7e761eb08690e90415069ea4a68ff7a"},
+ {file = "pydantic_core-2.16.2-cp38-none-win_amd64.whl", hash = "sha256:f685dbc1fdadb1dcd5b5e51e0a378d4685a891b2ddaf8e2bba89bd3a7144e44a"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:55749f745ebf154c0d63d46c8c58594d8894b161928aa41adbb0709c1fe78b77"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b30b0dd58a4509c3bd7eefddf6338565c4905406aee0c6e4a5293841411a1286"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18de31781cdc7e7b28678df7c2d7882f9692ad060bc6ee3c94eb15a5d733f8f7"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5864b0242f74b9dd0b78fd39db1768bc3f00d1ffc14e596fd3e3f2ce43436a33"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8f9186ca45aee030dc8234118b9c0784ad91a0bb27fc4e7d9d6608a5e3d386c"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc6f6c9be0ab6da37bc77c2dda5f14b1d532d5dbef00311ee6e13357a418e646"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa057095f621dad24a1e906747179a69780ef45cc8f69e97463692adbcdae878"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6ad84731a26bcfb299f9eab56c7932d46f9cad51c52768cace09e92a19e4cf55"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3b052c753c4babf2d1edc034c97851f867c87d6f3ea63a12e2700f159f5c41c3"},
+ {file = "pydantic_core-2.16.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0f686549e32ccdb02ae6f25eee40cc33900910085de6aa3790effd391ae10c2"},
+ {file = "pydantic_core-2.16.2-cp39-none-win32.whl", hash = "sha256:7afb844041e707ac9ad9acad2188a90bffce2c770e6dc2318be0c9916aef1469"},
+ {file = "pydantic_core-2.16.2-cp39-none-win_amd64.whl", hash = "sha256:9da90d393a8227d717c19f5397688a38635afec89f2e2d7af0df037f3249c39a"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f60f920691a620b03082692c378661947d09415743e437a7478c309eb0e4f82"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:47924039e785a04d4a4fa49455e51b4eb3422d6eaacfde9fc9abf8fdef164e8a"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6294e76b0380bb7a61eb8a39273c40b20beb35e8c87ee101062834ced19c545"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe56851c3f1d6f5384b3051c536cc81b3a93a73faf931f404fef95217cf1e10d"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9d776d30cde7e541b8180103c3f294ef7c1862fd45d81738d156d00551005784"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:72f7919af5de5ecfaf1eba47bf9a5d8aa089a3340277276e5636d16ee97614d7"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:4bfcbde6e06c56b30668a0c872d75a7ef3025dc3c1823a13cf29a0e9b33f67e8"},
+ {file = "pydantic_core-2.16.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ff7c97eb7a29aba230389a2661edf2e9e06ce616c7e35aa764879b6894a44b25"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9b5f13857da99325dcabe1cc4e9e6a3d7b2e2c726248ba5dd4be3e8e4a0b6d0e"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a7e41e3ada4cca5f22b478c08e973c930e5e6c7ba3588fb8e35f2398cdcc1545"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60eb8ceaa40a41540b9acae6ae7c1f0a67d233c40dc4359c256ad2ad85bdf5e5"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7beec26729d496a12fd23cf8da9944ee338c8b8a17035a560b585c36fe81af20"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22c5f022799f3cd6741e24f0443ead92ef42be93ffda0d29b2597208c94c3753"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:eca58e319f4fd6df004762419612122b2c7e7d95ffafc37e890252f869f3fb2a"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed957db4c33bc99895f3a1672eca7e80e8cda8bd1e29a80536b4ec2153fa9804"},
+ {file = "pydantic_core-2.16.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:459c0d338cc55d099798618f714b21b7ece17eb1a87879f2da20a3ff4c7628e2"},
+ {file = "pydantic_core-2.16.2.tar.gz", hash = "sha256:0ba503850d8b8dcc18391f10de896ae51d37fe5fe43dbfb6a35c5c5cad271a06"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
+
+[[package]]
+name = "pyflakes"
+version = "3.1.0"
+description = "passive checker of Python programs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"},
+ {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"},
+]
+
+[[package]]
+name = "pygments"
+version = "2.17.2"
+description = "Pygments is a syntax highlighting package written in Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"},
+ {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"},
+]
+
+[package.extras]
+plugins = ["importlib-metadata"]
+windows-terminal = ["colorama (>=0.4.6)"]
+
+[[package]]
+name = "python-dotenv"
+version = "1.0.1"
+description = "Read key-value pairs from a .env file and set them as environment variables"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
+ {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
+]
+
+[package.extras]
+cli = ["click (>=5.0)"]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "rich"
+version = "13.7.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"},
+ {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"},
+]
+
+[package.dependencies]
+markdown-it-py = ">=2.2.0"
+pygments = ">=2.13.0,<3.0.0"
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<9)"]
+
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.66.2"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"},
+ {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.9.0"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"},
+ {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"},
+]
+
+[[package]]
+name = "urllib3"
+version = "2.2.0"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "urllib3-2.2.0-py3-none-any.whl", hash = "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224"},
+ {file = "urllib3-2.2.0.tar.gz", hash = "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+h2 = ["h2 (>=4,<5)"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[[package]]
+name = "werkzeug"
+version = "3.0.1"
+description = "The comprehensive WSGI web application library."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"},
+ {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.1.1"
+
+[package.extras]
+watchdog = ["watchdog (>=2.3)"]
+
+[[package]]
+name = "wrapt"
+version = "1.16.0"
+description = "Module for decorators, wrappers and monkey patching."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"},
+ {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"},
+ {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"},
+ {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"},
+ {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"},
+ {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"},
+ {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"},
+ {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"},
+ {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"},
+ {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"},
+ {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"},
+ {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"},
+ {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"},
+ {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"},
+ {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"},
+ {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"},
+ {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"},
+ {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"},
+ {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"},
+ {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"},
+ {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"},
+ {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"},
+ {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"},
+ {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"},
+ {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"},
+ {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"},
+ {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"},
+ {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"},
+ {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"},
+ {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"},
+ {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"},
+ {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"},
+ {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"},
+ {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"},
+ {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"},
+ {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"},
+ {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"},
+ {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"},
+ {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"},
+ {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"},
+ {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"},
+ {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"},
+ {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"},
+ {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"},
+ {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"},
+ {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"},
+ {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"},
+ {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"},
+ {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"},
+ {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"},
+ {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"},
+ {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"},
+ {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"},
+ {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"},
+ {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"},
+ {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"},
+ {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"},
+ {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"},
+ {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"},
+ {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"},
+ {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"},
+ {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"},
+ {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"},
+ {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"},
+ {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"},
+ {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"},
+ {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"},
+ {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"},
+ {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"},
+ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"},
+]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.11"
+content-hash = "2da582b7e838d5e83e447ace514580ef3ee875d9346b45c98b276a171cc59f37"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..07c54e0
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,30 @@
+[tool.poetry]
+authors = ["Harvard Library Innovation Lab"]
+description = ""
+license = "MIT"
+name = "olaw"
+package-mode = false
+readme = "README.md"
+version = "0.1.0"
+
+[tool.poetry.dependencies]
+click = "^8.1.7"
+flask = "^3.0.0"
+flask-limiter = "^3.5.1"
+html2text = "^2020.1.16"
+ollama = "^0.1.6"
+openai = "^1.11.1"
+python = "^3.11"
+python-dotenv = "^1.0.0"
+requests = "^2.31.0"
+
+[tool.poetry.group.dev.dependencies]
+black = "^23.10.1"
+flake8 = "^6.1.0"
+
+[build-system]
+build-backend = "poetry.core.masonry.api"
+requires = ["poetry-core"]
+
+[tool.black]
+line-length = 100
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..5e49371
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,3 @@
+[flake8]
+max-line-length = 100
+extend-ignore = F401
diff --git a/wsgi.py b/wsgi.py
new file mode 100644
index 0000000..158b975
--- /dev/null
+++ b/wsgi.py
@@ -0,0 +1,6 @@
+""" WSGI hook """
+
+from olaw import create_app
+
+if __name__ == "__main__":
+ create_app().run()