Skip to content

Commit

Permalink
Merge pull request #2246 from kqlio67/main
Browse files Browse the repository at this point in the history
Update provider ecosystem and enhance functionality
  • Loading branch information
xtekky authored Sep 29, 2024
2 parents f13b214 + 58db9e0 commit 0deb0f6
Show file tree
Hide file tree
Showing 61 changed files with 2,007 additions and 1,788 deletions.
139 changes: 1 addition & 138 deletions README.md

Large diffs are not rendered by default.

171 changes: 171 additions & 0 deletions docs/providers-and-models.md

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions etc/unittest/async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,13 @@ async def test_max_tokens(self):
async def test_max_stream(self):
client = AsyncClient(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
response = client.chat.completions.create(messages, "Hello", stream=True)
response = await client.chat.completions.create(messages, "Hello", stream=True)
async for chunk in response:
self.assertIsInstance(chunk, ChatCompletionChunk)
if chunk.choices[0].delta.content is not None:
self.assertIsInstance(chunk.choices[0].delta.content, str)
messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
response = await client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
response = [chunk async for chunk in response]
self.assertEqual(len(response), 3)
for chunk in response:
Expand All @@ -53,4 +53,4 @@ async def test_stop(self):
self.assertEqual("How are you?", response.choices[0].message.content)

if __name__ == '__main__':
unittest.main()
unittest.main()
4 changes: 2 additions & 2 deletions g4f/Provider/AI365VIP.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@ class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
'gpt-4o',
'claude-3-haiku-20240307',
]
model_aliases = {
"claude-3-haiku": "claude-3-haiku-20240307",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
}

@classmethod
Expand Down
76 changes: 76 additions & 0 deletions g4f/Provider/AIChatFree.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
from __future__ import annotations

import time
from hashlib import sha256

from aiohttp import BaseConnector, ClientSession

from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin


class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info/"
working = True
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Connection": "keep-alive",
"TE": "trailers",
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": [
{
"role": "model" if message["role"] == "assistant" else "user",
"parts": [{"text": message["content"]}],
}
for message in messages
],
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")


def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()
121 changes: 54 additions & 67 deletions g4f/Provider/Airforce.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,17 @@
from __future__ import annotations

from aiohttp import ClientSession, ClientResponseError
from urllib.parse import urlencode
import json
import io
import asyncio

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, is_accepted_format
from ..image import ImageResponse
from .helper import format_prompt
from ..errors import ResponseStatusError

class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
text_api_endpoint = "https://api.airforce/chat/completions"
image_api_endpoint = "https://api.airforce/v1/imagine2"
image_api_endpoint = "https://api.airforce/imagine2"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
Expand All @@ -25,53 +22,38 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
text_models = [
# Open source models
'llama-2-13b-chat',

'llama-3-70b-chat',
'llama-3-70b-chat-turbo',
'llama-3-70b-chat-lite',

'llama-3-8b-chat',
'llama-3-8b-chat-turbo',
'llama-3-8b-chat-lite',

'llama-3.1-405b-turbo',
'llama-3.1-70b-turbo',
'llama-3.1-8b-turbo',

'LlamaGuard-2-8b',
'Llama-Guard-7b',
'Meta-Llama-Guard-3-8B',

'Mixtral-8x7B-Instruct-v0.1',
'Mixtral-8x22B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.1',
'Mistral-7B-Instruct-v0.2',
'Mistral-7B-Instruct-v0.3',

'Qwen1.5-72B-Chat',
'Qwen1.5-110B-Chat',
'Qwen2-72B-Instruct',

'gemma-2b-it',
'gemma-2-9b-it',
'gemma-2-27b-it',

'dbrx-instruct',

'deepseek-llm-67b-chat',

'Nous-Hermes-2-Mixtral-8x7B-DPO',
'Nous-Hermes-2-Yi-34B',

'WizardLM-2-8x22B',

'SOLAR-10.7B-Instruct-v1.0',

'StripedHyena-Nous-7B',

'sparkdesk',


# Other models
'chatgpt-4o-latest',
'gpt-4',
Expand All @@ -85,18 +67,20 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
'gpt-3.5-turbo-16k',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-16k-0613',

'gemini-1.5-flash',
'gemini-1.5-pro',
]

image_models = [
'flux',
'flux-realism',
'flux-anime',
'flux-3d',
'flux-disney',
'flux-pixel',
'flux-4o',
'any-dark',
'dall-e-3',
]

models = [
Expand All @@ -106,61 +90,47 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
# Open source models
"llama-2-13b": "llama-2-13b-chat",

"llama-3-70b": "llama-3-70b-chat",
"llama-3-70b": "llama-3-70b-chat-turbo",
"llama-3-70b": "llama-3-70b-chat-lite",

"llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo",
"llama-3-8b": "llama-3-8b-chat-lite",

"llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",

"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.2",
"mistral-7b": "Mistral-7B-Instruct-v0.3",

"mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",

"qwen-1-5-72b": "Qwen1.5-72B-Chat",
"qwen-1_5-110b": "Qwen1.5-110B-Chat",
"qwen-1.5-72b": "Qwen1.5-72B-Chat",
"qwen-1.5-110b": "Qwen1.5-110B-Chat",
"qwen-2-72b": "Qwen2-72B-Instruct",

"gemma-2b": "gemma-2b-it",
"gemma-2b-9b": "gemma-2-9b-it",
"gemma-2b-27b": "gemma-2-27b-it",

"deepseek": "deepseek-llm-67b-chat",

"yi-34b": "Nous-Hermes-2-Yi-34B",

"wizardlm-2-8x22b": "WizardLM-2-8x22B",

"solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",

"sh-n-7b": "StripedHyena-Nous-7B",

"sparkdesk-v1.1": "sparkdesk",


# Other models
"gpt-4o": "chatgpt-4o-latest",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",

"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
"gpt-3.5-turbo": "gpt-3.5-turbo-0613",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",


"gemini-flash": "gemini-1.5-flash",
"gemini-pro": "gemini-1.5-pro",

# Image models
"dalle-3": "dall-e-3",
}

@classmethod
Expand All @@ -178,16 +148,20 @@ async def create_async_generator(
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": "https://api.airforce",
"sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"',
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
"authorization": "Bearer null",
"cache-control": "no-cache",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://llmplayground.net/",
"sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
}


if model in cls.image_models:
async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
yield item
Expand All @@ -197,31 +171,44 @@ async def create_async_generator(

@classmethod
async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
async with ClientSession(headers=headers) as session:
async with ClientSession() as session:
data = {
"messages": [{"role": "user", "content": format_prompt(messages)}],
"messages": [{"role": "user", "content": message['content']} for message in messages],
"model": model,
"max_tokens": kwargs.get('max_tokens', 4096),
"temperature": kwargs.get('temperature', 1),
"top_p": kwargs.get('top_p', 1),
"stream": True
}

async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: "):
try:
data = json.loads(line[6:])
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
if 'content' in delta:
yield delta['content']
except json.JSONDecodeError:
continue
elif line == "data: [DONE]":
break

try:
async with session.post(cls.text_api_endpoint, json=data, headers=headers, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: "):
if line == "data: [DONE]":
break
try:
data = json.loads(line[6:])
if 'choices' in data and len(data['choices']) > 0:
delta = data['choices'][0].get('delta', {})
if 'content' in delta:
content = delta['content']
if "One message exceeds the 1000chars per message limit" in content:
raise ResponseStatusError(
"Message too long",
400,
"Please try a shorter message."
)
yield content
except json.JSONDecodeError:
continue
except ResponseStatusError as e:
raise e
except Exception as e:
raise ResponseStatusError(str(e), 500, "An unexpected error occurred")

@classmethod
async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
Expand All @@ -246,10 +233,10 @@ async def generate_image(cls, model: str, messages: Messages, headers: dict, pro
else:
try:
text = content.decode('utf-8', errors='ignore')
yield f"Error: {text}"
raise ResponseStatusError("Image generation failed", response.status, text)
except Exception as decode_error:
yield f"Error: Unable to decode response - {str(decode_error)}"
raise ResponseStatusError("Decoding error", 500, str(decode_error))
except ClientResponseError as e:
yield f"Error: HTTP {e.status}: {e.message}"
raise ResponseStatusError(f"HTTP {e.status}", e.status, e.message)
except Exception as e:
yield f"Unexpected error: {str(e)}"
raise ResponseStatusError("Unexpected error", 500, str(e))
4 changes: 2 additions & 2 deletions g4f/Provider/Aura.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
working = True
working = False

@classmethod
async def create_async_generator(
Expand Down Expand Up @@ -46,4 +46,4 @@ async def create_async_generator(
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode(error="ignore")
yield chunk.decode(error="ignore")
Loading

0 comments on commit 0deb0f6

Please sign in to comment.