Skip to content

Commit

Permalink
Merge pull request #2186 from zukixa/gg
Browse files Browse the repository at this point in the history
8 providers improved
  • Loading branch information
xtekky authored Aug 30, 2024
2 parents 3b570bf + bda2d67 commit c702f54
Show file tree
Hide file tree
Showing 26 changed files with 411 additions and 393 deletions.
38 changes: 21 additions & 17 deletions g4f/Provider/AI365VIP.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,31 +35,35 @@ async def create_async_generator(
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://chat.ai365vip.com",
"priority": "u=1, i",
"referer": "https://chat.ai365vip.com/en",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"origin": cls.url,
"referer": f"{cls.url}/en",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"127.0.6533.119"',
"sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": '""',
"sec-ch-ua-platform": '"Linux"',
"sec-ch-ua-platform-version": '"4.19.276"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
data = {
"model": {
"id": model,
"name": {
"gpt-3.5-turbo": "GPT-3.5",
"claude-3-haiku-20240307": "claude-3-haiku",
"gpt-4o": "GPT-4O"
}.get(model, model),
},
"messages": [{"role": "user", "content": format_prompt(messages)}],
"prompt": "You are a helpful assistant.",
}
"model": {
"id": model,
"name": "GPT-3.5",
"maxLength": 3000,
"tokenLimit": 2048
},
"messages": [{"role": "user", "content": format_prompt(messages)}],
"key": "",
"prompt": "You are a helpful assistant.",
"temperature": 1
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
Expand Down
64 changes: 64 additions & 0 deletions g4f/Provider/AiChatOnline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_string, format_prompt

class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
site_url = "https://aichatonline.org"
url = "https://aichatonlineorg.erweima.ai"
api_endpoint = "/aichatonline/api/chat/gpt"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = 'gpt-4o-mini'
supports_message_history = False

@classmethod
async def grab_token(
cls,
session: ClientSession,
proxy: str
):
async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
response.raise_for_status()
return (await response.json())['data']

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chatgpt/chat/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "aichatonline.org",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"conversationId": get_random_string(),
"prompt": format_prompt(messages),
}
headers['UniqueId'] = await cls.grab_token(session, proxy)
async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
try:
yield json.loads(chunk)['data']['message']
except:
continue
4 changes: 2 additions & 2 deletions g4f/Provider/Aura.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ async def create_async_generator(
new_messages.append(message)
data = {
"model": {
"id": "openchat_v3.2_mistral",
"name": "OpenChat Aura",
"id": "openchat_3.6",
"name": "OpenChat 3.6 (latest)",
"maxLength": 24576,
"tokenLimit": max_tokens
},
Expand Down
22 changes: 18 additions & 4 deletions g4f/Provider/Blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
working = True
default_model = 'blackbox'

models = [
default_model,
"gemini-1.5-flash",
"llama-3.1-8b",
'llama-3.1-70b',
'llama-3.1-405b',
]
@classmethod
async def create_async_generator(
cls,
Expand All @@ -28,7 +34,8 @@ async def create_async_generator(
if image is not None:
messages[-1]["data"] = {
"fileText": image_name,
"imageBase64": to_data_uri(image)
"imageBase64": to_data_uri(image),
"title": str(uuid.uuid4())
}

headers = {
Expand All @@ -48,7 +55,13 @@ async def create_async_generator(
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())

model_id_map = {
"blackbox": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
}
data = {
"messages": messages,
"id": random_id,
Expand All @@ -62,12 +75,13 @@ async def create_async_generator(
"webSearchMode": False,
"userSystemPrompt": "",
"githubToken": None,
"trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo
"maxTokens": None
}

async with session.post(
f"{cls.url}/api/chat", json=data, proxy=proxy
) as response: # type: ClientResponse
) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
Expand Down
7 changes: 3 additions & 4 deletions g4f/Provider/Chatgpt4Online.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ class Chatgpt4Online(AsyncGeneratorProvider):
working = True
supports_gpt_4 = True

async def get_nonce():
async with ClientSession() as session:
async def get_nonce(headers: dict) -> str:
async with ClientSession(headers=headers) as session:
async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
return (await response.json())["restNonce"]

Expand All @@ -42,9 +42,8 @@ async def create_async_generator(
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"x-wp-nonce": await cls.get_nonce(),
}

headers['x-wp-nonce'] = await cls.get_nonce(headers)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
Expand Down
54 changes: 37 additions & 17 deletions g4f/Provider/ChatgptFree.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,34 @@
from __future__ import annotations

import re

import json
import asyncio
from ..requests import StreamSession, raise_for_status
from ..typing import Messages
from .base_provider import AsyncProvider
from ..typing import Messages, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

class ChatgptFree(AsyncProvider):
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
supports_gpt_4 = True
working = True
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}

@classmethod
async def create_async(
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
cookies: dict = None,
**kwargs
) -> str:
) -> AsyncGenerator[str, None]:
headers = {
'authority': 'chatgptfree.ai',
'accept': '*/*',
Expand All @@ -38,7 +43,6 @@ async def create_async(
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}

async with StreamSession(
headers=headers,
cookies=cookies,
Expand All @@ -49,19 +53,11 @@ async def create_async(

if not cls._nonce:
async with session.get(f"{cls.url}/") as response:

await raise_for_status(response)
response = await response.text()

result = re.search(r'data-post-id="([0-9]+)"', response)
if not result:
raise RuntimeError("No post id found")
cls._post_id = result.group(1)

result = re.search(r'data-nonce="(.*?)"', response)
if result:
cls._nonce = result.group(1)

else:
raise RuntimeError("No nonce found")

Expand All @@ -74,6 +70,30 @@ async def create_async(
"message": prompt,
"bot_id": "0"
}

async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
await raise_for_status(response)
return (await response.json())["data"]
buffer = ""
async for line in response.iter_lines():
line = line.decode('utf-8').strip()
if line.startswith('data: '):
data = line[6:]
if data == '[DONE]':
break
try:
json_data = json.loads(data)
content = json_data['choices'][0]['delta'].get('content', '')
if content:
yield content
except json.JSONDecodeError:
continue
elif line:
buffer += line

if buffer:
try:
json_response = json.loads(buffer)
if 'data' in json_response:
yield json_response['data']
except json.JSONDecodeError:
print(f"Failed to decode final JSON. Buffer content: {buffer}")
2 changes: 1 addition & 1 deletion g4f/Provider/DDG.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4o": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
}

# Obfuscated URLs and headers
Expand Down
6 changes: 1 addition & 5 deletions g4f/Provider/DeepInfra.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,7 @@ class DeepInfra(Openai):
needs_auth = True
supports_stream = True
supports_message_history = True
default_model = "meta-llama/Meta-Llama-3-70B-Instruct"
default_vision_model = "llava-hf/llava-1.5-7b-hf"
model_aliases = {
'dbrx-instruct': 'databricks/dbrx-instruct',
}
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"

@classmethod
def get_models(cls):
Expand Down
Loading

0 comments on commit c702f54

Please sign in to comment.