diff --git a/.env.template b/.env.template new file mode 100644 index 0000000..79ddaf9 --- /dev/null +++ b/.env.template @@ -0,0 +1,2 @@ +OPENAI_API_KEY=your-api-key +OPENAI_MODEL=gpt-4 diff --git a/.gitignore b/.gitignore index 0d20b64..63a1eb3 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ *.pyc +.env +.pytest_cache +__pycache__ diff --git a/README.md b/README.md index 686db4b..8ea0a9a 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ An easy-to-use implementation of AI functions using OpenAI's GPT-4 (or any other 1. Clone the repository: ```bash -git clone https://github.com/YourUsername/SuperSimpleAIFunctions.git +git clone https://github.com/Torantulino/AI-Functions.git ``` 2. Install the required dependencies: @@ -34,7 +34,7 @@ git clone https://github.com/YourUsername/SuperSimpleAIFunctions.git pip install -r requirements.txt ``` -3. Obtain an [OpenAI API key](https://beta.openai.com/signup/) and store it in a `keys.py` file in the same directory as the scripts or set it as an environment variable. +3. Rename `.env.template` to `.env` and set the `OPENAI_API_KEY` variable to your OpenAI API key. You can obtain your OpenAI API key at: https://platform.openai.com/signup/. ## Usage @@ -43,10 +43,11 @@ pip install -r requirements.txt `ai_functions.py` contains the following function: ```python -def ai_function(function, args, description, model="gpt-4"): +def ai_function(function, args, description): ``` The `ai_function` takes the following parameters: + - `function`: A string describing the function signature. - `args`: A list of arguments for the function. - `description`: A string describing the purpose of the function. @@ -69,21 +70,19 @@ print(result) # Output: 12 The table below shows the success rate of the AI functions with different GPT models: -| Description | GPT-4 Result | GPT-3.5-turbo Result | Reason | -|---------------------------|--------------|----------------------|--------| -| Generate fake people | PASSED | FAILED | Incorrect response format | -| Generate Random Password | PASSED | PASSED | N/A | -| Calculate area of triangle| FAILED | FAILED | Incorrect float value (GPT-4), Incorrect response format (GPT-3.5-turbo) | -| Calculate the nth prime number | PASSED | PASSED | N/A | -| Encrypt text | PASSED | PASSED | N/A | -| Find missing numbers | PASSED | PASSED | N/A | +| Description | GPT-4 Result | GPT-3.5-turbo Result | Reason | +| ------------------------------ | ------------ | -------------------- | ------------------------------------------------------------------------ | +| Generate fake people | PASSED | FAILED | Incorrect response format | +| Generate Random Password | PASSED | PASSED | N/A | +| Calculate area of triangle | FAILED | FAILED | Incorrect float value (GPT-4), Incorrect response format (GPT-3.5-turbo) | +| Calculate the nth prime number | PASSED | PASSED | N/A | +| Encrypt text | PASSED | PASSED | N/A | +| Find missing numbers | PASSED | PASSED | N/A | It's important to note that AI Functions are not suited for certain tasks, particularly those involving mathematical calculations and precision. As observed in the case of calculating the area of a triangle and finding the nth prime number, GPT models can struggle with providing accurate results. The limitations of GPT models in such cases are mainly due to their inherent inability to perform precise arithmetic and the ambiguity in understanding user inputs. In conclusion, while AI Functions can be helpful in various scenarios, they may not be the optimal choice for tasks requiring mathematical accuracy or specific domain knowledge. For such use-cases, utilizing traditional algorithms and libraries would yield better results. - - ### test_ai_functions.py `test_ai_functions.py` contains test cases for the `ai_function`. To run the tests, execute the script with Python: diff --git a/ai_functions.py b/ai_functions.py index a0d53a3..ae12cf6 100644 --- a/ai_functions.py +++ b/ai_functions.py @@ -1,14 +1,43 @@ +import os +from typing import Any + import openai +from dotenv import load_dotenv + +# Load environment variables +load_dotenv() + + +def ai_function( + function: str, + args: list[str], + description: str, + model: str = os.getenv("OPENAI_MODEL", "gpt-4"), + temperature: float = 0, +) -> str: + """ + Executes a function, given the function string, arguments and description. + + Args: + function (str): The Python function string to be executed. + args (list[str]): A list of arguments to be passed to the function. + description (str): A description of the function to be executed. + model (str, optional): The model to be used. Defaults to "gpt-4". + temperature (float, optional): The temperature to be used. Defaults to 0. + """ + args_str: str = ", ".join(args) + content: tuple[str, ...] = ( + f"You are now the following python function: ```# {description}\n{function}```", + "\n\nOnly respond with your `return` value. Do not include any other explanatory text in your response.", + ) -def ai_function(function, args, description, model = "gpt-4"): - # parse args to comma separated string - args = ", ".join(args) - messages = [{"role": "system", "content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value. Do not include any other explanatory text in your response."},{"role": "user", "content": args}] + messages: list[dict[str, str]] = [ + {"role": "system", "content": "".join(content)}, + {"role": "user", "content": args_str}, + ] - response = openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=0 + response: Any = openai.ChatCompletion.create( + model=model, messages=messages, temperature=temperature ) return response.choices[0].message["content"] diff --git a/keys.py b/keys.py deleted file mode 100644 index 30ce07f..0000000 --- a/keys.py +++ /dev/null @@ -1 +0,0 @@ -OPENAI_API_KEY="" # Get yours from: https://beta.openai.com/account/api-keys \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ff11a64..dc53806 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ openai==0.27.0 pytest==7.2.2 +python-dotenv==1.0.0 diff --git a/test_ai_function.py b/test_ai_function.py index bb2e19d..80391cb 100644 --- a/test_ai_function.py +++ b/test_ai_function.py @@ -1,13 +1,23 @@ import ast import json +import os import time -import ai_functions -import pytest + import openai -import keys +import pytest +from dotenv import load_dotenv + +import ai_functions + +# Load environment variables +load_dotenv() # Initialize the OpenAI API client -openai.api_key = keys.OPENAI_API_KEY +openai.api_key = os.getenv("OPENAI_API_KEY") + +@pytest.fixture +def model(): + return os.getenv("OPENAI_MODEL", "gpt-4") # Run all tests, print the results, and return the number of failed tests def run_tests(model): @@ -19,7 +29,7 @@ def run_tests(model): "Calculate the nth prime number", "Encrypt text", "Find missing numbers" -] + ] failed_tests = [] i = 0