Skip to content

Commit

Permalink
Merge pull request #1789 from hlohaus/free
Browse files Browse the repository at this point in the history
Add authless OpenaiChat
  • Loading branch information
hlohaus authored Apr 5, 2024
2 parents b401b6d + 1e2cf48 commit c791012
Show file tree
Hide file tree
Showing 5 changed files with 78 additions and 51 deletions.
85 changes: 56 additions & 29 deletions g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,16 @@
from ...requests import get_args_from_browser, raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...errors import MissingAuthError
from ...errors import MissingAuthError, ResponseError
from ...providers.conversation import BaseConversation
from ..openai.har_file import getArkoseAndAccessToken
from ..openai.har_file import getArkoseAndAccessToken, NoValidHarFileError
from ... import debug

class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service"""

url = "https://chat.openai.com"
working = True
needs_auth = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_message_history = True
Expand All @@ -56,6 +55,7 @@ async def create(
prompt: str = None,
model: str = "",
messages: Messages = [],
action: str = "next",
**kwargs
) -> Response:
"""
Expand Down Expand Up @@ -169,14 +169,17 @@ async def get_default_model(cls, session: StreamSession, headers: dict):
The default model name as a string
"""
if not cls.default_model:
async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response:
url = f"{cls.url}/backend-anon/models" if cls._api_key is None else f"{cls.url}/backend-api/models"
async with session.get(url, headers=headers) as response:
cls._update_request_args(session)
if response.status == 401:
raise MissingAuthError('Add a "api_key" or a .har file' if cls._api_key is None else "Invalid api key")
await raise_for_status(response)
data = await response.json()
if "categories" in data:
cls.default_model = data["categories"][-1]["default_model"]
return cls.default_model
raise RuntimeError(f"Response: {data}")
raise ResponseError(data)
return cls.default_model

@classmethod
Expand Down Expand Up @@ -330,39 +333,42 @@ async def create_async_generator(
Raises:
RuntimeError: If an error occurs during processing.
"""
if parent_id is None:
parent_id = str(uuid.uuid4())

async with StreamSession(
proxies={"https": proxy},
impersonate="chrome",
timeout=timeout
) as session:
if cls._headers is None or cookies is not None:
cls._create_request_args(cookies)
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key

if api_key is not None:
cls._create_request_args(cookies)
cls._set_api_key(api_key)

if cls.default_model is None and cls._headers is not None:
if cls.default_model is None and cls._api_key is not None:
try:
if not model:
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
else:
cls.default_model = cls.get_model(model)
except Exception as e:
api_key = cls._api_key = None
cls._create_request_args()
if debug.logging:
print("OpenaiChat: Load default_model failed")
print(f"{e.__class__.__name__}: {e}")

arkose_token = None
if cls.default_model is None:
arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy)
cls._create_request_args(cookies)
cls._set_api_key(api_key)
try:
arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy)
cls._create_request_args(cookies)
cls._set_api_key(api_key)
except NoValidHarFileError:
...
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))

async with session.post(
f"{cls.url}/backend-anon/sentinel/chat-requirements" if not cls._api_key else
f"{cls.url}/backend-api/sentinel/chat-requirements",
json={"conversation_mode_kind": "primary_assistant"},
headers=cls._headers
Expand All @@ -389,17 +395,22 @@ async def create_async_generator(
print(f"{e.__class__.__name__}: {e}")

model = cls.get_model(model).replace("gpt-3.5-turbo", "text-davinci-002-render-sha")
fields = Conversation(conversation_id, parent_id) if conversation is None else copy(conversation)
fields.finish_reason = None
while fields.finish_reason is None:
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
else:
conversation = copy(conversation)
if cls._api_key is None:
auto_continue = False
conversation.finish_reason = None
while conversation.finish_reason is None:
websocket_request_id = str(uuid.uuid4())
data = {
"action": action,
"conversation_mode": {"kind": "primary_assistant"},
"force_paragen": False,
"force_rate_limit": False,
"conversation_id": fields.conversation_id,
"parent_message_id": fields.message_id,
"conversation_id": conversation.conversation_id,
"parent_message_id": conversation.message_id,
"model": model,
"history_and_training_disabled": history_disabled and not auto_continue and not return_conversation,
"websocket_request_id": websocket_request_id
Expand All @@ -415,24 +426,27 @@ async def create_async_generator(
if need_arkose:
headers["OpenAI-Sentinel-Arkose-Token"] = arkose_token
async with session.post(
f"{cls.url}/backend-anon/conversation" if cls._api_key is None else
f"{cls.url}/backend-api/conversation",
json=data,
headers=headers
) as response:
cls._update_request_args(session)
await raise_for_status(response)
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields):
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, conversation):
if return_conversation:
history_disabled = False
return_conversation = False
yield fields
yield conversation
yield chunk
if not auto_continue:
if auto_continue and conversation.finish_reason == "max_tokens":
conversation.finish_reason = None
action = "continue"
await asyncio.sleep(5)
else:
break
action = "continue"
await asyncio.sleep(5)
if history_disabled and auto_continue:
await cls.delete_conversation(session, cls._headers, fields.conversation_id)
await cls.delete_conversation(session, cls._headers, conversation.conversation_id)

@staticmethod
async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str, is_curl: bool) -> AsyncIterator:
Expand Down Expand Up @@ -594,15 +608,28 @@ async def fetch_access_token(cls, session: StreamSession, headers: dict):
if "accessToken" in data:
return data["accessToken"]

@staticmethod
def get_default_headers() -> dict:
return {
"accept-language": "en-US",
"content-type": "application/json",
"oai-device-id": str(uuid.uuid4()),
"oai-language": "en-US",
"sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Google Chrome\";v=\"122\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin"
}

@staticmethod
def _format_cookies(cookies: Cookies):
return "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token")

@classmethod
def _create_request_args(cls, cookies: Union[Cookies, None]):
cls._headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}
def _create_request_args(cls, cookies: Cookies = None):
cls._headers = cls.get_default_headers()
cls._cookies = {} if cookies is None else cookies
cls._update_cookie_header()

Expand Down
10 changes: 5 additions & 5 deletions g4f/Provider/openai/har_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@
from .crypt import decrypt, encrypt
from ...requests import StreamSession

class NoValidHarFileError(Exception):
...

class arkReq:
def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
self.arkURL = arkURL
Expand Down Expand Up @@ -39,7 +42,7 @@ def readHAR():
if harPath:
break
if not harPath:
raise RuntimeError("No .har file found")
raise NoValidHarFileError("No .har file found")
for path in harPath:
with open(path, 'rb') as file:
try:
Expand All @@ -54,7 +57,7 @@ def readHAR():
accessToken = json.loads(v["response"]["content"]["text"]).get("accessToken")
cookies = {c['name']: c['value'] for c in v['request']['cookies']}
if not accessToken:
RuntimeError("No accessToken found in .har files")
raise NoValidHarFileError("No accessToken found in .har files")
if not chatArks:
return None, accessToken, cookies
return chatArks.pop(), accessToken, cookies
Expand All @@ -75,9 +78,6 @@ def parseHAREntry(entry) -> arkReq:
return tmpArk

def genArkReq(chatArk: arkReq) -> arkReq:
if not chatArk:
raise RuntimeError("No .har file with arkose found")

tmpArk: arkReq = deepcopy(chatArk)
if tmpArk is None or not tmpArk.arkBody or not tmpArk.arkHeader:
raise RuntimeError("The .har file is not valid")
Expand Down
3 changes: 3 additions & 0 deletions g4f/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,5 +40,8 @@ class NoImageResponseError(Exception):
class RateLimitError(Exception):
...

class ResponseError(Exception):
...

class ResponseStatusError(Exception):
...
26 changes: 9 additions & 17 deletions g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,15 @@
ChatgptNext,
HuggingChat,
HuggingFace,
ChatgptDemo,
GptForLove,
OpenaiChat,
ChatgptAi,
DeepInfra,
ChatBase,
GigaChat,
Liaobots,
FreeGpt,
Llama2,
Vercel,
Gemini,
GptGo,
Gpt6,
Bing,
You,
Pi,
Expand All @@ -41,7 +37,7 @@ class Model:
name: str
base_provider: str
best_provider: ProviderType = None

@staticmethod
def __all__() -> list[str]:
"""Returns a list of all model names."""
Expand All @@ -52,9 +48,10 @@ def __all__() -> list[str]:
base_provider = "",
best_provider = RetryProvider([
Bing,
ChatgptAi, GptGo,
ChatgptAi,
You,
Chatgpt4Online
Chatgpt4Online,
OpenaiChat
])
)

Expand All @@ -63,23 +60,18 @@ def __all__() -> list[str]:
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
FreeGpt, You,
Chatgpt4Online,
FreeGpt,
You,
ChatgptNext,
ChatgptDemo,
Gpt6,
OpenaiChat,
])
)

# GPT-3.5 / GPT-4
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
GptGo, You,
GptForLove, ChatBase,
Chatgpt4Online,
])
best_provider = OpenaiChat
)

gpt_4 = Model(
Expand Down
5 changes: 5 additions & 0 deletions g4f/requests/raise_for_status.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,17 @@ class CloudflareError(ResponseStatusError):
def is_cloudflare(text: str) -> bool:
return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text

def is_openai(text: str) -> bool:
return "<p>Unable to load site</p>" in text

async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None):
if response.status in (429, 402):
raise RateLimitError(f"Response {response.status}: Rate limit reached")
message = await response.text() if not response.ok and message is None else message
if response.status == 403 and is_cloudflare(message):
raise CloudflareError(f"Response {response.status}: Cloudflare detected")
elif response.status == 403 and is_openai(message):
raise ResponseStatusError(f"Response {response.status}: Bot are detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status}: {message}")

Expand Down

0 comments on commit c791012

Please sign in to comment.