Skip to content

Commit

Permalink
~ | Merge pull request #1081 from hlohaus/son
Browse files Browse the repository at this point in the history
Add Llama2 and NoowAi Provider
  • Loading branch information
xtekky authored Oct 15, 2023
2 parents 9ae3bc4 + c1adfbe commit 1168d66
Show file tree
Hide file tree
Showing 8 changed files with 158 additions and 10 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -325,12 +325,12 @@ asyncio.run(run_all())

##### Proxy Support:

All providers support specifying a proxy in the create function.
All providers support specifying a proxy in the create functions.

```py
import g4f

response = await g4f.ChatCompletion.create(
response = g4f.ChatCompletion.create(
model=g4f.models.default,
messages=[{"role": "user", "content": "Hello"}],
proxy="http://host:port",
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/ChatForAi.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ async def create_async_generator(
**kwargs
},
"botSettings": {},
"prompt": prompt,
"prompt": prompt,
"messages": messages,
"timestamp": timestamp,
"sign": generate_signature(timestamp, prompt, conversation_id)
Expand Down
76 changes: 76 additions & 0 deletions g4f/Provider/Llama2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
from __future__ import annotations

from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider

models = {
"7B": {"name": "Llama 2 7B", "version": "d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381", "shortened":"7B"},
"13B": {"name": "Llama 2 13B", "version": "9dff94b1bed5af738655d4a7cbcdcde2bd503aa85c94334fe1f42af7f3dd5ee3", "shortened":"13B"},
"70B": {"name": "Llama 2 70B", "version": "2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf", "shortened":"70B"},
"Llava": {"name": "Llava 13B", "version": "6bc1c7bb0d2a34e413301fee8f7cc728d2d4e75bfab186aa995f63292bda92fc", "shortened":"Llava"}
}

class Llama2(AsyncGeneratorProvider):
url = "https://www.llama2.ai"
supports_gpt_35_turbo = True
working = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = "70B"
if model not in models:
raise ValueError(f"Model are not supported: {model}")
version = models[model]["version"]
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "text/plain;charset=UTF-8",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"version": version,
"systemPrompt": kwargs.get("system_message", "You are a helpful assistant."),
"temperature": kwargs.get("temperature", 0.75),
"topP": kwargs.get("top_p", 0.9),
"maxTokens": kwargs.get("max_tokens", 1024),
"image": None
}
started = False
async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if not started:
chunk = chunk.lstrip()
started = True
yield chunk.decode()

def format_prompt(messages: Messages):
messages = [
f"[INST]{message['content']}[/INST]"
if message["role"] == "user"
else message["content"]
for message in messages
]
return "\n".join(messages)
66 changes: 66 additions & 0 deletions g4f/Provider/NoowAi.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from __future__ import annotations

import random, string, json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider


class NoowAi(AsyncGeneratorProvider):
url = "https://noowai.com"
supports_gpt_35_turbo = True
working = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "noowai.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"botId": "default",
"customId": "d49bc3670c3d858458576d75c8ea0f5d",
"session": "N/A",
"chatId": random_string(),
"contextId": 25,
"messages": messages,
"newMessage": messages[-1]["content"],
"stream": True
}
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
try:
line = json.loads(line[6:])
assert "type" in line
except:
raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
break

def random_string(length: int = 10):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(length))
7 changes: 6 additions & 1 deletion g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,10 @@
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .H2o import H2o
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .Myshell import Myshell
from .NoowAi import NoowAi
from .Opchatgpts import Opchatgpts
from .Phind import Phind
from .Vercel import Vercel
Expand Down Expand Up @@ -82,9 +83,11 @@ class ProviderUtils:
'HuggingChat': HuggingChat,
'Komo': Komo,
'Liaobots': Liaobots,
'Llama2': Llama2,
'Lockchat': Lockchat,
'MikuChat': MikuChat,
'Myshell': Myshell,
'NoowAi': NoowAi,
'Opchatgpts': Opchatgpts,
'OpenAssistant': OpenAssistant,
'OpenaiChat': OpenaiChat,
Expand Down Expand Up @@ -148,8 +151,10 @@ class ProviderUtils:
'H2o',
'HuggingChat',
'Liaobots',
'Llama2',
'Lockchat',
'Myshell',
'NoowAi',
'Opchatgpts',
'Raycast',
'OpenaiChat',
Expand Down
6 changes: 2 additions & 4 deletions g4f/Provider/H2o.py → g4f/Provider/deprecated/H2o.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,12 @@

from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, format_prompt


class H2o(AsyncGeneratorProvider):
url = "https://gpt-gm.h2o.ai"
working = False
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"

@classmethod
Expand Down Expand Up @@ -86,7 +85,6 @@ async def create_async_generator(
async with session.delete(
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
json=data
) as response:
response.raise_for_status()

Expand Down
3 changes: 2 additions & 1 deletion g4f/Provider/deprecated/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,5 @@
from .V50 import V50
from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .Vitalentum import Vitalentum
from .H2o import H2o
4 changes: 3 additions & 1 deletion g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
Yqcloud,
Myshell,
FreeGpt,
NoowAi,
Vercel,
Aichat,
GPTalk,
Expand Down Expand Up @@ -51,8 +52,9 @@ class Model:
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
AiAsk, Aichat, ChatgptDemo, FreeGpt, GptGo, Liaobots, You,
AiAsk, Aichat, ChatgptDemo, FreeGpt, Liaobots, You,
GPTalk, ChatgptLogin, GptChatly, GptForLove, Opchatgpts,
NoowAi,
])
)

Expand Down

0 comments on commit 1168d66

Please sign in to comment.