Skip to content

Commit

Permalink
Merge pull request #40 from dexhunter/feat/add_bardapi
Browse files Browse the repository at this point in the history
feat: Add bard api (#36)
  • Loading branch information
yuxiang-wu committed Jun 5, 2023
2 parents a823b10 + 04d7d07 commit 348a4ce
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 2 deletions.
76 changes: 76 additions & 0 deletions chatarena/backends/bard.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
from typing import List
import os
import re
import logging
from tenacity import retry, stop_after_attempt, wait_random_exponential

from .base import IntelligenceBackend
from ..message import Message, SYSTEM_NAME as SYSTEM

try:
import bardapi
except ImportError:
is_bard_available = False
logging.warning("bard package is not installed")
else:
bard_api_key = os.environ.get('_BARD_API_KEY')
if bard_api_key is None:
logging.warning(
"Bard API key is not set. Please set the environment variable _BARD_API_KEY")
is_bard_available = False
else:
is_bard_available = True

DEFAULT_MAX_TOKENS = 4096


class Bard(IntelligenceBackend):
"""
Interface to the Bard offered by Google.
"""
stateful = False
type_name = "bard"

def __init__(self, max_tokens: int = DEFAULT_MAX_TOKENS, **kwargs):
assert is_bard_available, "bard package is not installed or the API key is not set"
super().__init__(max_tokens=max_tokens, **kwargs)

self.max_tokens = max_tokens

self.client = bardapi.core.Bard()

@retry(stop=stop_after_attempt(6), wait=wait_random_exponential(min=1, max=60))
def _get_response(self, prompt: str):
response = self.client.get_answer(
input_text=prompt,
)

response = response['content'].strip()
return response

def query(self, agent_name: str, role_desc: str, history_messages: List[Message], global_prompt: str = None,
request_msg: Message = None, *args, **kwargs) -> str:
"""
format the input and call the Bard API
args:
agent_name: the name of the agent
role_desc: the description of the role of the agent
env_desc: the description of the environment
history_messages: the history of the conversation, or the observation for the agent
request_msg: the request from the system to guide the agent's next response
"""
all_messages = [(SYSTEM, global_prompt), (SYSTEM, role_desc)
] if global_prompt else [(SYSTEM, role_desc)]

for message in history_messages:
all_messages.append((message.agent_name, message.content))
if request_msg:
all_messages.append((SYSTEM, request_msg.content))

# current bard api doesn't support role system, so just dump the raw messages as prompt
response = self._get_response(str(all_messages), *args, **kwargs)

# Remove the agent name if the response starts with it
response = re.sub(rf"^\s*\[{agent_name}]:?", "", response).strip()

return response
8 changes: 6 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,20 @@
"tenacity==8.2.2",
"rich==13.3.3",
"prompt_toolkit==3.0.38",

]
anthropic_requirements = ["anthropic>=0.2.8"]
cohere_requirements = ["cohere>=4.3.1"]
hf_requirements = ["transformers>=4.27.4"]
bard_requirements = ["bardapi==0.1.11"]
gradio_requirements = ["gradio==3.20.0"]
pettingzoo_requirements = ["pettingzoo==1.23.0", "chess==1.9.4"]

all_backends = anthropic_requirements + cohere_requirements + hf_requirements

all_backends = anthropic_requirements + cohere_requirements + hf_requirements + bard_requirements
all_envs = pettingzoo_requirements
all_requirements = anthropic_requirements + cohere_requirements + hf_requirements + \
gradio_requirements + pettingzoo_requirements
gradio_requirements + pettingzoo_requirements + bard_requirements

setup(
name="chatarena",
Expand All @@ -41,6 +44,7 @@
"anthropic": anthropic_requirements,
"cohere": cohere_requirements,
"huggingface": hf_requirements,
"bard": bard_requirements,
"pettingzoo": pettingzoo_requirements,
"gradio": gradio_requirements,
"all_backends": all_backends,
Expand Down

0 comments on commit 348a4ce

Please sign in to comment.