Skip to content

Commit

Permalink
feat: Groq as LLM provider
Browse files Browse the repository at this point in the history
  • Loading branch information
thompsonson authored Sep 27, 2024
2 parents 2bad3e7 + 9cf6d27 commit 69ef2ec
Show file tree
Hide file tree
Showing 6 changed files with 149 additions and 11 deletions.
14 changes: 13 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,10 @@ Welcome to a LLM Response Analysis Framework! This tool is designed to dive deep
## Features

- **Dynamic LLM Integration**
Seamlessly connect with various LLM providers and models to fetch responses using a flexible architecture.
Seamlessly connect with various LLM providers and models to fetch responses using a flexible architecture. Following integrations are available.
- Openai
- Groq
- Ollama

- **LangChain Structured Output Chain Analysis**
Seamlessly connect with a LangChain Structured Output and check for the consistency of responses. [See this documentation for further information.](docs/check_chain/README.md)
Expand Down Expand Up @@ -92,6 +95,11 @@ Before using `det`, configure your LLM and embeddings provider API keys

`export OPENAI_API_KEY=sk-makeSureThisIsaRealKey`

for groq to configure groq client API key.

`export GROQ_API_KEY=gsk-DUMMYKEYISTHIS`


### Basic Usage

To get a list of all the arguments and their descriptions, use:
Expand All @@ -109,6 +117,10 @@ det check-responses \
--embeddings-model text-embedding-ada-002
```

for `Groq` use --llm-provider as `Groq`
for `Ollama` use --llm-provider as `Ollama`


### LangChain Structured Output Chains

a LangChain Structured Output example
Expand Down
14 changes: 13 additions & 1 deletion det/embeddings/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import logging

from openai import OpenAI

import openai

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -65,6 +65,18 @@ def _instantiate_openai_client(self, api_key: str):
else:
self.client = OpenAI()
logger.info("OpenAI client instantiated successfully without api_key.")
self.client.models.list()
except openai.APIConnectionError as e:
logger.error("The server could not be reached")
logger.error(e.__cause__) # The original exception
except openai.RateLimitError as e:
logger.error(
f"A 429 status code was received; we should back off a bit.{e}"
)
except openai.APIStatusError as e:
logger.error(f"Another non-200-range status code was received: {e}")
logger.error(e.status_code)
logger.error(e.response)
except Exception as e:
logger.error(f"Error instantiating OpenAI client: {str(e)}")
self.client = None
Expand Down
72 changes: 72 additions & 0 deletions det/llm/llm_groq.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""
# llm/llm_groq.py
This module defines the GroqClient class, an implementation of the BaseLLMClient for generating
text responses using Groq's API. It abstracts the details of API interaction, allowing for easy
generation of text completions with various configurations.
The client utilizes environment variables for API authentication, ensuring secure access without
hard-coding sensitive information. It supports specifying a model at initialization for all
subsequent requests, with the option to override default parameters per request.
Example:
--------
llm_client = GroqClient(model="llama3-8b-8192")
prompt = "Explain the significance of abstract classes in object-oriented programming."
response = llm_client.generate_response(prompt, temperature=0.5, max_tokens=100)
print(response)
This setup facilitates seamless integration with different Large Language Models by adhering to the
BaseLLMClient interface, promoting a plug-and-play architecture for text generation tasks.
"""

from groq import Groq
import groq
import logging

from det.llm.base import LLMGeneratorInterface

logger = logging.getLogger(__name__)


class GroqClient(LLMGeneratorInterface):
"""
Example:
--------
llm_client = GroqClient(model="llama3-8b-8192", api_key="your_api_key_here")
prompt = "Explain the significance of abstract classes in object-oriented programming."
response = llm_client.generate_response(prompt, temperature=0.5, max_tokens=100)
print(response)
"""

def __init__(self, model: str = "llama3-8b-8192", api_key: str = None):
try:
if api_key:
self.client = Groq(api_key=api_key)
else:
self.client = Groq()
self.model = model
self.client.models.list()
except groq.APIConnectionError as e:
logger.error(f"The server could not be reached: {e}")
except groq.RateLimitError as e:
logger.error(
f"A 429 status code was received; we should back off a bit: {e}"
)
except groq.APIStatusError as e:
logger.error(f"Another non-200-range status code was received: {e}")
except Exception as e:
logger.error(f"Error instantiating Groq client: {str(e)}")
self.client = None

def generate_response(self, prompt: str, **kwargs):
try:
response = self.client.chat.completions.create(
model=self.model, # Use the model specified during initialization
messages=[{"role": "user", "content": prompt}],
**kwargs,
)
return response.choices[0].message.content
except Exception as e:
print(f"An error occurred: {e}")
return None
32 changes: 27 additions & 5 deletions det/llm/llm_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,13 @@
"""

from openai import OpenAI
import openai
import logging

from det.llm.base import LLMGeneratorInterface

logger = logging.getLogger(__name__)


class OpenAIClient(LLMGeneratorInterface):
"""
Expand All @@ -36,11 +40,29 @@ class OpenAIClient(LLMGeneratorInterface):
"""

def __init__(self, model: str = "gpt-3.5-turbo", api_key: str = None):
if api_key:
self.client = OpenAI(api_key=api_key)
else:
self.client = OpenAI()
self.model = model
try:
if api_key:
self.client = OpenAI(api_key=api_key)
logger.info("OpenAI client instantiated successfully using api_key.")
else:
self.client = OpenAI()
logger.info("OpenAI client instantiated successfully without api_key.")
self.model = model
self.client.models.list()
except openai.APIConnectionError as e:
logger.error("The server could not be reached")
logger.error(e.__cause__) # The original exception
except openai.RateLimitError as e:
logger.error(
f"A 429 status code was received; we should back off a bit:{e}"
)
except openai.APIStatusError as e:
logger.error("Another non-200-range status code was received")
logger.error(e.status_code)
logger.error(e.response)
except Exception as e:
logger.error(f"Error instantiating OpenAI client: {str(e)}")
self.client = None

def generate_response(self, prompt: str, **kwargs):
try:
Expand Down
27 changes: 23 additions & 4 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ langchain = "^0.1.12"
langchain-community = "^0.0.29"
langchain-openai = "^0.1.1"
deepdiff = "^6.7.1"
groq = "^0.11.0"

[tool.poetry.group.dev.dependencies]
pre-commit = "^3.6.2"
Expand Down

0 comments on commit 69ef2ec

Please sign in to comment.