Skip to content

Commit

Permalink
Claude API (#1)
Browse files Browse the repository at this point in the history
* chatgpt -> claude

* Remove json config as deprecated

* Rename package folder

* Rename llm file

* Added anthropic requirement

* More chatgpt -> claude

* Added model and openai objects

* Updated call_model

* Updated role names

* Use claude-2 model with system prompt

* Use claude prompt structure

* Fix openai key

* Fix prompt

* context_depth only even

* Update dependencies for chatbotsforum compat.
Update license tests

---------

Co-authored-by: Daniel McKnight <daniel@neon.ai>
  • Loading branch information
NeonBohdan and NeonDaniel authored Jan 17, 2024
1 parent 461dd8d commit 633277b
Show file tree
Hide file tree
Showing 11 changed files with 68 additions and 65 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/license_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ jobs:
license_tests:
uses: neongeckocom/.github/.github/workflows/license_tests.yml@master
with:
packages-exclude: '^(neon-llm-chatgpt|tqdm).*'
packages-exclude: '^(neon-llm|tqdm|klat-connector|neon-chatbot|dnspython).*'
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
FROM python:3.9-slim

LABEL vendor=neon.ai \
ai.neon.name="neon-llm-chatgpt"
ai.neon.name="neon-llm-claude"

ENV OVOS_CONFIG_BASE_FOLDER neon
ENV OVOS_CONFIG_FILENAME diana.yaml
Expand All @@ -12,4 +12,4 @@ WORKDIR /app
COPY . /app
RUN pip install /app

CMD [ "neon-llm-chatgpt" ]
CMD [ "neon-llm-claude" ]
17 changes: 9 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# NeonAI LLM ChatGPT
Proxies API calls to ChatGPT.
# NeonAI LLM Claude
Proxies API calls to Anthropic Claude.

## Request Format
API requests should include `history`, a list of tuples of strings, and the current
Expand All @@ -25,12 +25,13 @@ MQ:
port: <MQ Port>
server: <MQ Hostname or IP>
users:
neon_llm_chat_gpt:
password: <neon_chatgpt user's password>
user: neon_chatgpt
LLM_CHAT_GPT:
neon_llm_claude:
password: <neon_claude user's password>
user: neon_claude
LLM_CLAUDE:
key: ""
model: "gpt-3.5-turbo"
openai_key: ""
model: "claude-2"
role: "You are trying to give a short answer in less than 40 words."
context_depth: 3
max_tokens: 100
Expand All @@ -40,6 +41,6 @@ LLM_CHAT_GPT:
For example, if your configuration resides in `~/.config`:
```shell
export CONFIG_PATH="/home/${USER}/.config"
docker run -v ${CONFIG_PATH}:/config neon_llm_chatgpt
docker run -v ${CONFIG_PATH}:/config neon_llm_claude
```
> Note: If connecting to a local MQ server, you may need to specify `--network host`
4 changes: 2 additions & 2 deletions docker_overlay/etc/neon/diana.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ MQ:
mq_handler:
user: neon_api_utils
password: Klatchat2021
LLM_CHAT_GPT:
model: "gpt-3.5-turbo"
LLM_CLAUDE:
model: "claude-2"
role: "You are trying to give a short answer in less than 40 words."
context_depth: 3
max_tokens: 100
Expand Down
19 changes: 0 additions & 19 deletions neon_llm_chatgpt/default_config.json

This file was deleted.

File renamed without changes.
8 changes: 4 additions & 4 deletions neon_llm_chatgpt/__main__.py → neon_llm_claude/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from neon_llm_chatgpt.rmq import ChatgptMQ
from neon_llm_claude.rmq import ClaudeMQ


def main():
# Run RabbitMQ
chatgptMQ = ChatgptMQ()
chatgptMQ.run(run_sync=False, run_consumers=True,
claudeMQ = ClaudeMQ()
claudeMQ.run(run_sync=False, run_consumers=True,
daemonize_consumers=True)
chatgptMQ.observer_thread.join()
claudeMQ.observer_thread.join()


if __name__ == "__main__":
Expand Down
58 changes: 39 additions & 19 deletions neon_llm_chatgpt/chatgpt.py → neon_llm_claude/claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,29 +24,43 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT

import openai
from openai.embeddings_utils import get_embeddings, distances_from_embeddings

from typing import List, Dict
from neon_llm_core.llm import NeonLLM


class ChatGPT(NeonLLM):
class Claude(NeonLLM):

mq_to_llm_role = {
"user": "user",
"llm": "assistant"
"user": HUMAN_PROMPT,
"llm": AI_PROMPT
}

def __init__(self, config):
super().__init__(config)
self._openai = None
self._context_depth = 0

self.model_name = config["model"]
self.role = config["role"]
self.context_depth = config["context_depth"]
self.max_tokens = config["max_tokens"]
self.api_key = config["key"]
self.openai_key = config["openai_key"]
self.warmup()

@property
def context_depth(self):
return self._context_depth

@context_depth.setter
def context_depth(self, value):
self._context_depth = value + value % 2

@property
def tokenizer(self) -> None:
return self._tokenizer
Expand All @@ -56,12 +70,19 @@ def tokenizer_model_name(self) -> str:
return ""

@property
def model(self) -> openai:
def model(self) -> Anthropic:
if self._model is None:
openai.api_key = self.api_key
self._model = openai
anthropic = Anthropic(api_key=self.api_key)
self._model = anthropic
return self._model

@property
def openai(self) -> openai:
if self._openai is None:
openai.api_key = self.openai_key
self._openai = openai
return self._openai

@property
def llm_model_name(self) -> str:
return self.model_name
Expand All @@ -72,6 +93,7 @@ def _system_prompt(self) -> str:

def warmup(self):
self.model
self.openai

def get_sorted_answer_indexes(self, question: str, answers: List[str], persona: dict) -> List[int]:
"""
Expand All @@ -90,41 +112,39 @@ def get_sorted_answer_indexes(self, question: str, answers: List[str], persona:

def _call_model(self, prompt: List[Dict[str, str]]) -> str:
"""
Wrapper for ChatGPT Model generation logic
Wrapper for Claude Model generation logic
:param prompt: Input messages sequence
:returns: Output text sequence generated by model
"""

response = openai.ChatCompletion.create(
response = self.model.completions.create(
model=self.llm_model_name,
messages=prompt,
prompt=prompt,
temperature=0,
max_tokens=self.max_tokens,
max_tokens_to_sample=self.max_tokens,
)
text = response.choices[0].message['content']
text = response.completion

return text

def _assemble_prompt(self, message: str, chat_history: List[List[str]], persona: dict) -> List[Dict[str, str]]:
"""
Assembles prompt engineering logic
Setup Guidance:
https://platform.openai.com/docs/guides/gpt/chat-completions-api
https://docs.anthropic.com/claude/docs/introduction-to-prompt-design
:param message: Incoming prompt
:param chat_history: History of preceding conversation
:returns: assembled prompt
"""
system_prompt = persona.get("description", self._system_prompt)
messages = [
{"role": "system", "content": system_prompt},
]
prompt = system_prompt
# Context N messages
for role, content in chat_history[-self.context_depth:]:
role_chatgpt = self.convert_role(role)
messages.append({"role": role_chatgpt, "content": content})
messages.append({"role": "user", "content": message})
return messages
role_claude = self.convert_role(role)
prompt += f"{role_claude} {content}"
prompt += f"{self.convert_role('user')} {message}{self.convert_role('llm')}"
return prompt

def _score(self, prompt: str, targets: List[str], persona: dict) -> List[float]:
"""
Expand Down
10 changes: 5 additions & 5 deletions neon_llm_chatgpt/rmq.py → neon_llm_claude/rmq.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from neon_llm_core.rmq import NeonLLMMQConnector

from neon_llm_chatgpt.chatgpt import ChatGPT
from neon_llm_claude.claude import Claude


class ChatgptMQ(NeonLLMMQConnector):
class ClaudeMQ(NeonLLMMQConnector):
"""
Module for processing MQ requests to ChatGPT
Module for processing MQ requests to Сlaude
"""

def __init__(self):
Expand All @@ -39,12 +39,12 @@ def __init__(self):

@property
def name(self):
return "chat_gpt"
return "claude"

@property
def model(self):
if self._model is None:
self._model = ChatGPT(self.model_config)
self._model = Claude(self.model_config)
return self._model

def warmup(self):
Expand Down
3 changes: 2 additions & 1 deletion requirements/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# model
anthropic
openai[embeddings]~=0.27
# networking
neon_llm_core~=0.1.0
neon_llm_core[chatbots]~=0.1.0,>=0.1.1a1
8 changes: 4 additions & 4 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,12 @@ def get_requirements(requirements_filename: str):
version = line.split("'")[1]

setup(
name='neon-llm-chatgpt',
name='neon-llm-claude',
version=version,
description='LLM service for Chat GPT',
description='LLM service for Anthropic Claude',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/NeonGeckoCom/neon-llm-chatgpt',
url='https://github.com/NeonGeckoCom/neon-llm-claude',
author='Neongecko',
author_email='developers@neon.ai',
license='BSD-3.0',
Expand All @@ -85,7 +85,7 @@ def get_requirements(requirements_filename: str):
],
entry_points={
'console_scripts': [
'neon-llm-chatgpt=neon_llm_chatgpt.__main__:main'
'neon-llm-claude=neon_llm_claude.__main__:main'
]
}
)

0 comments on commit 633277b

Please sign in to comment.