Skip to content

Commit

Permalink
chore: Update dependencies and bump version to 0.3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
Rishang committed Sep 3, 2024
1 parent 18e4e9d commit dced8dd
Show file tree
Hide file tree
Showing 8 changed files with 1,532 additions and 955 deletions.
58 changes: 33 additions & 25 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,9 @@ GPT_PROMPT_URL=<url-to-your-prompt-file>
# openai
OPENAI_API_KEY=<your-openai-api-key>
OPENAI_ORG=<org-*****> # optional openai organization id
OPENAI_MODEL=gpt-4 # optional openai model name
MODEL=gpt-4o # optional openai model name (default: gpt-3.5-turbo)


# palm (optional)
# ref: https://github.com/dsdanielpark/palm-API
PALM_API_KEY=<your-palm-api-key>
```

In order to configure them you can use `heygpt config` command:
Expand All @@ -56,48 +54,58 @@ In order to configure them you can use `heygpt config` command:

Configure heygpt.

╭─ Options -------------------------------------------------+
│ --prompt-file TEXT Prompt file path. |
│ --prompt-url TEXT Prompt file url. |
│ --openai-key TEXT OpenAI API key. |
│ --openai-org TEXT OpenAI organization id. |
│ --openai-model TEXT OpenAI model name. |
│ --palm-key TEXT palm API key. |
| |
│ --help Show this message and exit. |
------------------------------------------------------------+
╭─ Options -----------------------------------------------------+
│ --prompt-file TEXT Prompt file path. |
│ --prompt-url TEXT Prompt file url. |
│ --openai-key TEXT OpenAI API key. |
│ --openai-org TEXT OpenAI organization id. |
| --model TEXT Default model name [OpenAI/Gemini]|
│ --help Show this message and exit. |
----------------------------------------------------------------+
```

Default model name is `gpt-3.5-turbo` for this tool. You can change it to `gpt-4o` or any other model name you want to use.

```bash
heygpt config --openai-key <your-openai-api-key>
```

### Using local/remote prompts

Prompt csv formate
Prompt YAML formate

```yaml
# ~/path/to/prompts.yaml
- Title: Fix Grammar
Command:
- role: user
content: |
Review the provided text and correct any grammatical errors. Ensure that the text is clear, concise, and free of any spelling mistakes.
```
To use your saved prompts run:
```csv
Title,Command
<Your title for promot>,<your command for promopt>
```
heygpt config --prompt-file ~/path/to/prompts.yaml
```

Here, `--prompt-url ` and `--prompt-file` is optional. If you want to use own custom
prompts.

For providing a URL of `csv` file containing your prompts.
For providing a URL of `yaml` file containing your prompts.

```bash
# remote csv file
heygpt config --prompt-url <url-to-your-prompt-file.csv>
# remote yaml file
heygpt config --prompt-url <url-to-your-prompt-file.yaml>
```

Note: This is the default csv used for prompts: [default-prompts.csv](./prompts.csv), for using your own prompts, you need to follow the same format as in this file.
Note: This is the default yaml used for prompts: [default-prompts.yaml](./prompts.yaml), for using your own prompts, you need to follow the same format as in this file.

For your own prompts by providing a URL to a `csv` file containing your prompts. You can also use local `csv` file by providing a relative path to it.
For your own prompts by providing a URL to a `yaml` file containing your prompts. You can also use local `yaml` file by providing a relative path to it.

```bash
# local csv file
heygpt config --prompt-file ~/path/to/prompts.csv
# local yaml file
heygpt config --prompt-file ~/path/to/prompts.yaml
```

## Usage Examples
Expand Down
4 changes: 2 additions & 2 deletions src/heygpt/api.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware

from heygpt.core import completion_openai_gpt, openai_model
from heygpt.core import completion_openai_gpt, model
from heygpt.prompts import make_prompt, PromptInput

from heygpt.serve_prompts import prompts
Expand All @@ -18,7 +18,7 @@


@app.post("/gpt")
async def gpt(msg: PromptInput, model: str = openai_model):
async def gpt(msg: PromptInput, model: str = model):
return completion_openai_gpt(
command=msg.prompt.Command, text=f"""{msg.text}""", model=model
)
Expand Down
48 changes: 18 additions & 30 deletions src/heygpt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,28 +14,24 @@
from heygpt.constant import prompt_items_url
from heygpt.prompts import load_prompts, make_prompt
from heygpt.core import (
openai_model,
model as _model,
sh,
completion_openai_gpt,
completion_palm_text,
wisper,
print_md,
ask_prompt_input,
)

app = typer.Typer(
help="""
HeyGPT CLI\n\nA simple command line tool to generate text using OpenAI GPT or Google Palm based on ready-made templated prompts.
HeyGPT CLI\n\nA simple command line tool to generate text using OpenAI GPT or Google Gemini based on ready-made templated prompts.
\n\n\nFor debug logs use: `export LOG_LEVEL=DEBUG` or `set LOG_LEVEL=DEBUG` on windows.""",
pretty_exceptions_enable=False,
)


@app.command(help="Ask query or task to GPT using prompt templates")
def ask(
palm: bool = typer.Option(
False, "--palm", "-b", help="Use google palm instead of openai."
),
no_prompt: bool = typer.Option(
False, "--no-prompt", "-n", help="Ask without any prompt templates."
),
Expand All @@ -45,12 +41,11 @@ def ask(
tag: Annotated[Optional[List[str]], typer.Option()] = [],
save: str = typer.Option("", "--output", "-o", help="Save output to file."),
model: str = typer.Option(
openai_model,
_model,
"--model",
"-m",
help="OpenAI model name. info: https://platform.openai.com/docs/models/",
help=f"default {_model} | OpenAI model name. info: https://platform.openai.com/docs/models/",
),
system: str = typer.Option("", "--system", "-s", help="System name for prompt."),
temperature: float = typer.Option(
0.5,
"--temperature",
Expand All @@ -65,7 +60,7 @@ def ask(
),
):
tags: str = " #".join(tag)
command: str = ""
command: list = []

# print(tags)
# return
Expand All @@ -89,7 +84,6 @@ def ask(
for i in prompts:
if i.Title == act:
command = i.Command
system = i.System
log.debug(command)
_found_prompt = True
break
Expand All @@ -98,7 +92,7 @@ def ask(
return

if tags.strip() != "":
command += f"\nFor: #{tags}"
text += f"\nFor: #{tags}"

if not sys.stdin.isatty():
text = sys.stdin.read()
Expand All @@ -108,18 +102,15 @@ def ask(
text = Prompt.ask("[blue]Enter text")

# log.debug(text)
if palm:
content = completion_palm_text(command=command, text=text, _print=raw)
else:
completion = completion_openai_gpt(
command=command,
system=system,
text=text,
model=model,
_print=raw,
temperature=temperature,
)
content = completion

completion = completion_openai_gpt(
command=command,
text=text,
model=model,
_print=raw,
temperature=temperature,
)
content = completion

if not raw:
print_md(content)
Expand Down Expand Up @@ -170,8 +161,7 @@ def config(
prompt_url: str = typer.Option("", help="Prompt file url."),
openai_key: str = typer.Option("", help="OpenAI API key."),
openai_org: str = typer.Option("", help="OpenAI organization id."),
openai_model: str = typer.Option("", help="OpenAI model name."),
palm_key: str = typer.Option("", help="palm API key."),
model: str = typer.Option("", help="LLM model name."),
):
from heygpt.constant import config_path

Expand All @@ -193,12 +183,10 @@ def config(
configs["prompt_url"] = prompt_url
if openai_key != "":
configs["openai_key"] = openai_key
if palm_key != "":
configs["palm_key"] = palm_key
if openai_org != "":
configs["openai_org"] = openai_org
if openai_model != "":
configs["openai_model"] = openai_model
if model != "":
configs["model"] = model

new_configs = configs
print(json.dumps(new_configs))
Expand Down
117 changes: 28 additions & 89 deletions src/heygpt/core.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,25 @@
import os

import requests
import openai
import google.generativeai as palm
from rich.console import Console
from rich.markdown import Markdown
from prompt_toolkit import prompt
from prompt_toolkit.completion import FuzzyWordCompleter

from litellm import completion
from heygpt.constant import configs
from heygpt.prompts import openai_fmt_prompt, Message
from heygpt.utils import log, notNone

console = Console()

openai.api_key = os.getenv("OPENAI_API_KEY", configs.get("openai_key"))
openai.organization = os.getenv("OPENAI_ORG", configs.get("openai_org"))

__openai_model__ = os.getenv("OPENAI_MODEL", configs.get("openai_model"))
if notNone(__openai_model__, str):
openai_model = __openai_model__
__model__ = os.getenv("MODEL", configs.get("model"))
if notNone(__model__, str):
model = __model__
else:
openai_model = "gpt-3.5-turbo"
model = "gpt-3.5-turbo"


def sh(command):
Expand All @@ -39,10 +38,10 @@ def ask_prompt_input(items: list, title="Select item"):


def completion_openai_gpt(
text: str = None,
command: str = "",
text: str = "",
command: list[Message] = [],
system: str = "",
model=openai_model,
model=model,
_print=False,
temperature=0.7,
):
Expand All @@ -60,90 +59,30 @@ def completion_openai_gpt(
if not text:
raise Exception("No text found")

if command != "":
_command = command + "\n\n" + text
else:
_command = text

if "gpt-" in model:
completion = openai.chat.completions.create(
model=model,
stream=True,
temperature=temperature,
messages=[
{
"role": "system",
"content": f"{system}",
},
{"role": "user", "content": command},
{"role": "user", "content": text},
],
# stop="",
)

for chunk in completion:
# Process each chunk as needed
c = chunk.choices[0].delta.content or ""
out += c
if _print:
console.print(c, end="", markup=True)
messages = openai_fmt_prompt(command)

if messages != []:
messages[-1]["content"] += "\n\n" + text
else:
raise Exception("Invalid model GPT allowed for now")
completion = openai.completions.create(
model=model,
prompt=_command,
temperature=temperature,
max_tokens=1000,
stream=True,
top_p=1,
)
for chunk in completion:
c = chunk["choices"][0]["text"]
out += c
if _print:
console.print(c, end="", markup=True)
messages = [{"role": "user", "content": text}]

chat_completion = completion(
model=model,
messages=messages,
temperature=temperature,
stream=True,
)

for chunk in chat_completion:
# Process each chunk as needed
c = chunk.choices[0].delta.content or ""
out += c
if _print:
console.print(c, end="", markup=True)

return out


def completion_palm_text(text: str, command: str = "", _print=False):
"""
ref: https://developers.generativeai.google/api/python/google/generativeai/generate_text
"""
if os.environ.get("PALM_API_KEY") == None:
if configs.get("palm_key"):
os.environ["PALM_API_KEY"] = configs.get("palm_key")

palm.configure(api_key=os.environ.get("PALM_API_KEY"))

defaults = {
"model": "models/text-bison-001",
"temperature": 0.7,
"candidate_count": 1,
"top_k": 40,
"top_p": 0.95,
"max_output_tokens": 1024,
"stop_sequences": [],
"safety_settings": [
# {"category": "HARM_CATEGORY_DEROGATORY", "threshold": 1},
# {"category": "HARM_CATEGORY_TOXICITY", "threshold": 1},
# {"category": "HARM_CATEGORY_VIOLENCE", "threshold": 2},
# {"category": "HARM_CATEGORY_SEXUAL", "threshold": 2},
# {"category": "HARM_CATEGORY_MEDICAL", "threshold": 2},
# {"category": "HARM_CATEGORY_DANGEROUS", "threshold": 2},
],
}
prompt = f"""{command}
{text}"""
response = palm.generate_text(**defaults, prompt=prompt)

completer = response.result
if _print:
print(completer)
return completer


def wisper(audio_file):
with open(f"{audio_file}", "rb") as file:
transcript = openai.Audio.transcribe("whisper-1", file)
Expand Down
Loading

0 comments on commit dced8dd

Please sign in to comment.