Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AItianhuSpace Provider with GPT 4 added #941

Merged
merged 2 commits into from
Sep 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions g4f/Provider/AItianhuSpace.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
from __future__ import annotations

import random, json

from g4f.requests import AsyncSession, StreamRequest
from .base_provider import AsyncGeneratorProvider, format_prompt

domains = {
"gpt-3.5-turbo": ".aitianhu.space",
"gpt-4": ".aitianhu.website",
}

class AItianhuSpace(AsyncGeneratorProvider):
url = "https://chat3.aiyunos.top/"
working = True
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
stream: bool = True,
**kwargs
) -> str:
if not model:
model = "gpt-3.5-turbo"
elif not model in domains:
raise ValueError(f"Model are not supported: {model}")

chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
rand = ''.join(random.choice(chars) for _ in range(6))
domain = domains[model]
url = f'https://{rand}{domain}/api/chat-process'

headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
}
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with StreamRequest(session, "POST", url, json=data) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.rstrip())
if "detail" in line:
content = line["detail"]["choices"][0]["delta"].get("content")
if content:
yield content
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
raise RuntimeError("Rate limit for GPT 4 reached")
else:
raise RuntimeError("Response: {line}")


@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
("top_p", "int"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
5 changes: 3 additions & 2 deletions g4f/Provider/Aivvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,10 @@ def create_completion(cls,

response = requests.post(
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
response.raise_for_status()

for line in response.iter_content(chunk_size=1048):
yield line.decode('utf-8')
for chunk in response.iter_content(chunk_size=None):
yield chunk.decode('utf-8')

@classmethod
@property
Expand Down
30 changes: 17 additions & 13 deletions g4f/Provider/Vercel.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,21 @@ class Vercel(BaseProvider):
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs ) -> CreateResult:
stream: bool,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in model_info:
raise ValueError(f"Model are not supported: {model}")

headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'custom-encoding' : AntiBotToken(),
'custom-encoding' : get_anti_bot_token(),
'origin' : 'https://sdk.vercel.ai',
'pragma' : 'no-cache',
'referer' : 'https://sdk.vercel.ai/',
Expand All @@ -48,22 +54,20 @@ def create_completion(
'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0} | model_info[model]['default_params']

server_error = True
retries = 0
max_retries = kwargs.get('max_retries', 20)

while server_error and not retries > max_retries:
for i in range(max_retries):
response = requests.post('https://sdk.vercel.ai/api/generate',
headers=headers, json=json_data, stream=True)
try:
response.raise_for_status()
except:
continue
for token in response.iter_content(chunk_size=8):
yield token.decode()
break

for token in response.iter_content(chunk_size=2046):
if token != b'Internal Server Error':
server_error = False
yield (token.decode())

retries += 1

def AntiBotToken() -> str:
def get_anti_bot_token() -> str:
headers = {
'authority' : 'sdk.vercel.ai',
'accept' : '*/*',
Expand Down
2 changes: 2 additions & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from .Ails import Ails
from .AiService import AiService
from .AItianhu import AItianhu
from .AItianhuSpace import AItianhuSpace
from .Aivvm import Aivvm
from .Bard import Bard
from .Bing import Bing
Expand Down Expand Up @@ -52,6 +53,7 @@
'Ails',
'AiService',
'AItianhu',
'AItianhuSpace',
'Aivvm',
'Bard',
'Bing',
Expand Down
31 changes: 29 additions & 2 deletions g4f/Provider/base_provider.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from __future__ import annotations

import asyncio
from asyncio import SelectorEventLoop
import functools
from asyncio import SelectorEventLoop, AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod

import browser_cookie3
Expand All @@ -27,6 +29,31 @@ def create_completion(
) -> CreateResult:
raise NotImplementedError()

@classmethod
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
*,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
) -> str:
if not loop:
loop = asyncio.get_event_loop()

partial_func = functools.partial(
cls.create_completion,
model,
messages,
False,
**kwargs
)
response = await loop.run_in_executor(
executor,
partial_func
)
return "".join(response)

@classmethod
@property
Expand Down Expand Up @@ -127,7 +154,7 @@ def create_event_loop() -> SelectorEventLoop:
except RuntimeError:
return SelectorEventLoop()
raise RuntimeError(
'Use "create_async" instead of "create" function in a async loop.')
'Use "create_async" instead of "create" function in a running event loop.')


_cookies = {}
Expand Down
7 changes: 4 additions & 3 deletions g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
Wewordle,
Yqcloud,
AItianhu,
AItianhuSpace,
Aichat,
Myshell,
)
Expand All @@ -38,7 +39,7 @@ class Model:
Wewordle, # Responds with markdown
Yqcloud, # Answers short questions in chinese
ChatBase, # Don't want to answer creatively
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell,
])
)

Expand All @@ -47,15 +48,15 @@ class Model:
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, Myshell,
DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell,
])
)

gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
Aivvm, Myshell
Aivvm, Myshell, AItianhuSpace,
])
)

Expand Down
78 changes: 78 additions & 0 deletions g4f/requests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
from __future__ import annotations

import json, sys
from aiohttp import StreamReader
from aiohttp.base_protocol import BaseProtocol

from curl_cffi.requests import AsyncSession
from curl_cffi.requests.cookies import Request
from curl_cffi.requests.cookies import Response


class StreamResponse:
def __init__(self, inner: Response, content: StreamReader, request: Request):
self.inner = inner
self.content = content
self.request = request
self.status_code = inner.status_code
self.reason = inner.reason
self.ok = inner.ok

async def text(self) -> str:
content = await self.content.read()
return content.decode()

def raise_for_status(self):
if not self.ok:
raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}")

async def json(self, **kwargs):
return json.loads(await self.content.read(), **kwargs)


class StreamRequest:
def __init__(self, session: AsyncSession, method: str, url: str, **kwargs):
self.session = session
self.loop = session.loop
self.content = StreamReader(
BaseProtocol(session.loop),
sys.maxsize,
loop=session.loop
)
self.method = method
self.url = url
self.options = kwargs

def on_content(self, data):
if not self.enter.done():
self.enter.set_result(None)
self.content.feed_data(data)

def on_done(self, task):
self.content.feed_eof()

async def __aenter__(self) -> StreamResponse:
self.curl = await self.session.pop_curl()
self.enter = self.session.loop.create_future()
request, _, header_buffer = self.session._set_curl_options(
self.curl,
self.method,
self.url,
content_callback=self.on_content,
**self.options
)
handle = self.session.acurl.add_handle(self.curl)
self.handle = self.session.loop.create_task(handle)
self.handle.add_done_callback(self.on_done)
await self.enter
return StreamResponse(
self.session._parse_response(self.curl, request, _, header_buffer),
self.content,
request
)

async def __aexit__(self, exc_type, exc, tb):
await self.handle
self.curl.clean_after_perform()
self.curl.reset()
self.session.push_curl(self.curl)