Skip to content

Commit

Permalink
fix for 500 Internal Server Error xtekky#2199 [Request] Blackbox prov…
Browse files Browse the repository at this point in the history
…ider now support Gemini and LLaMa 3.1 models xtekky#2198 with some stuff from xtekky#2196
  • Loading branch information
zukixa committed Aug 29, 2024
1 parent a338ed5 commit bda2d67
Show file tree
Hide file tree
Showing 21 changed files with 366 additions and 297 deletions.
38 changes: 21 additions & 17 deletions g4f/Provider/AI365VIP.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,31 +35,35 @@ async def create_async_generator(
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://chat.ai365vip.com",
"priority": "u=1, i",
"referer": "https://chat.ai365vip.com/en",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"origin": cls.url,
"referer": f"{cls.url}/en",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"127.0.6533.119"',
"sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": '""',
"sec-ch-ua-platform": '"Linux"',
"sec-ch-ua-platform-version": '"4.19.276"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
data = {
"model": {
"id": model,
"name": {
"gpt-3.5-turbo": "GPT-3.5",
"claude-3-haiku-20240307": "claude-3-haiku",
"gpt-4o": "GPT-4O"
}.get(model, model),
},
"messages": [{"role": "user", "content": format_prompt(messages)}],
"prompt": "You are a helpful assistant.",
}
"model": {
"id": model,
"name": "GPT-3.5",
"maxLength": 3000,
"tokenLimit": 2048
},
"messages": [{"role": "user", "content": format_prompt(messages)}],
"key": "",
"prompt": "You are a helpful assistant.",
"temperature": 1
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
Expand Down
64 changes: 64 additions & 0 deletions g4f/Provider/AiChatOnline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from __future__ import annotations

import json
from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_random_string, format_prompt

class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
site_url = "https://aichatonline.org"
url = "https://aichatonlineorg.erweima.ai"
api_endpoint = "/aichatonline/api/chat/gpt"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
default_model = 'gpt-4o-mini'
supports_message_history = False

@classmethod
async def grab_token(
cls,
session: ClientSession,
proxy: str
):
async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
response.raise_for_status()
return (await response.json())['data']

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chatgpt/chat/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "aichatonline.org",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"conversationId": get_random_string(),
"prompt": format_prompt(messages),
}
headers['UniqueId'] = await cls.grab_token(session, proxy)
async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
try:
yield json.loads(chunk)['data']['message']
except:
continue
20 changes: 17 additions & 3 deletions g4f/Provider/Blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
working = True
default_model = 'blackbox'

models = [
default_model,
"gemini-1.5-flash",
"llama-3.1-8b",
'llama-3.1-70b',
'llama-3.1-405b',
]
@classmethod
async def create_async_generator(
cls,
Expand All @@ -28,7 +34,8 @@ async def create_async_generator(
if image is not None:
messages[-1]["data"] = {
"fileText": image_name,
"imageBase64": to_data_uri(image)
"imageBase64": to_data_uri(image),
"title": str(uuid.uuid4())
}

headers = {
Expand All @@ -48,7 +55,13 @@ async def create_async_generator(
async with ClientSession(headers=headers) as session:
random_id = secrets.token_hex(16)
random_user_id = str(uuid.uuid4())

model_id_map = {
"blackbox": {},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}
}
data = {
"messages": messages,
"id": random_id,
Expand All @@ -62,6 +75,7 @@ async def create_async_generator(
"webSearchMode": False,
"userSystemPrompt": "",
"githubToken": None,
"trendingAgentModel": model_id_map[model], # if you actually test this on the site, just ask each model "yo", weird behavior imo
"maxTokens": None
}

Expand Down
7 changes: 3 additions & 4 deletions g4f/Provider/Chatgpt4Online.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ class Chatgpt4Online(AsyncGeneratorProvider):
working = True
supports_gpt_4 = True

async def get_nonce():
async with ClientSession() as session:
async def get_nonce(headers: dict) -> str:
async with ClientSession(headers=headers) as session:
async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
return (await response.json())["restNonce"]

Expand All @@ -42,9 +42,8 @@ async def create_async_generator(
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
"x-wp-nonce": await cls.get_nonce(),
}

headers['x-wp-nonce'] = await cls.get_nonce(headers)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
Expand Down
59 changes: 37 additions & 22 deletions g4f/Provider/ChatgptFree.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,33 @@

import re
import json

import asyncio
from ..requests import StreamSession, raise_for_status
from ..typing import Messages
from .base_provider import AsyncProvider
from ..typing import Messages, AsyncGenerator
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

class ChatgptFree(AsyncProvider):
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"
supports_gpt_35_turbo = True
supports_gpt_4 = True
working = True
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}

@classmethod
async def create_async(
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
cookies: dict = None,
**kwargs
) -> str:
) -> AsyncGenerator[str, None]:
headers = {
'authority': 'chatgptfree.ai',
'accept': '*/*',
Expand All @@ -39,7 +43,6 @@ async def create_async(
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}

async with StreamSession(
headers=headers,
cookies=cookies,
Expand All @@ -50,19 +53,11 @@ async def create_async(

if not cls._nonce:
async with session.get(f"{cls.url}/") as response:

await raise_for_status(response)
response = await response.text()

result = re.search(r'data-post-id="([0-9]+)"', response)
if not result:
raise RuntimeError("No post id found")
cls._post_id = result.group(1)

result = re.search(r'data-nonce="(.*?)"', response)
if result:
cls._nonce = result.group(1)

else:
raise RuntimeError("No nonce found")

Expand All @@ -75,10 +70,30 @@ async def create_async(
"message": prompt,
"bot_id": "0"
}
async with session.get(f"{cls.url}/wp-admin/admin-ajax.php", params=data, cookies=cookies) as response:

async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
await raise_for_status(response)
full_answer = ""
for line in ((await response.text()).splitlines())[:-1]:
if line.startswith("data:") and "[DONE]" not in line:
full_answer += json.loads(line[5:])['choices'][0]['delta'].get('content', "")
return full_answer
buffer = ""
async for line in response.iter_lines():
line = line.decode('utf-8').strip()
if line.startswith('data: '):
data = line[6:]
if data == '[DONE]':
break
try:
json_data = json.loads(data)
content = json_data['choices'][0]['delta'].get('content', '')
if content:
yield content
except json.JSONDecodeError:
continue
elif line:
buffer += line

if buffer:
try:
json_response = json.loads(buffer)
if 'data' in json_response:
yield json_response['data']
except json.JSONDecodeError:
print(f"Failed to decode final JSON. Buffer content: {buffer}")
2 changes: 1 addition & 1 deletion g4f/Provider/DDG.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4o": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1"
}

# Obfuscated URLs and headers
Expand Down
6 changes: 1 addition & 5 deletions g4f/Provider/DeepInfra.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,7 @@ class DeepInfra(Openai):
needs_auth = True
supports_stream = True
supports_message_history = True
default_model = "meta-llama/Meta-Llama-3-70B-Instruct"
default_vision_model = "llava-hf/llava-1.5-7b-hf"
model_aliases = {
'dbrx-instruct': 'databricks/dbrx-instruct',
}
default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"

@classmethod
def get_models(cls):
Expand Down
20 changes: 20 additions & 0 deletions g4f/Provider/FreeChatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,31 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
'gpt-3.5-turbo',
'SparkDesk-v1.1',
'deepseek-coder',
'@cf/qwen/qwen1.5-14b-chat-awq',
'deepseek-chat',
'Qwen2-7B-Instruct',
'glm4-9B-chat',
'chatglm3-6B',
'Yi-1.5-9B-Chat',
]
model_aliases = {
"qwen-1.5-14b": "@cf/qwen/qwen1.5-14b-chat-awq",
"sparkdesk-v1.1": "SparkDesk-v1.1",
"qwen2-7b": "Qwen2-7B-Instruct",
"glm4-9b": "glm4-9B-chat",
"chatglm3-6b": "chatglm3-6B",
"yi-1.5-9b": "Yi-1.5-9B-Chat",
}

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model.lower() in cls.model_aliases:
return cls.model_aliases[model.lower()]
else:
return cls.default_model

@classmethod
async def create_async_generator(
cls,
Expand All @@ -46,6 +64,7 @@ async def create_async_generator(
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
model = cls.get_model(model)
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
Expand Down Expand Up @@ -74,5 +93,6 @@ async def create_async_generator(
chunk = json.loads(line_str[6:])
delta_content = chunk.get("choices", [{}])[0].get("delta", {}).get("content", "")
accumulated_text += delta_content
yield delta_content
except json.JSONDecodeError:
pass
18 changes: 10 additions & 8 deletions g4f/Provider/FreeGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,23 +6,25 @@
from typing import AsyncGenerator, Optional, Dict, Any
from ..typing import Messages
from ..requests import StreamSession, raise_for_status
from .base_provider import AsyncGeneratorProvider
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..errors import RateLimitError

# Constants
DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/"
"https://v.aifree.site/",
"https://al.aifree.site/",
"https://u4.aifree.site/"
]
RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"


class FreeGpt(AsyncGeneratorProvider):
url: str = "https://freegptsnav.aifree.site"
working: bool = True
supports_message_history: bool = True
supports_system_message: bool = True
supports_gpt_35_turbo: bool = True
class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://freegptsnav.aifree.site"
working = True
supports_message_history = True
supports_system_message = True
default_model = 'llama-3.1-70b'

@classmethod
async def create_async_generator(
Expand Down
Loading

0 comments on commit bda2d67

Please sign in to comment.