Skip to content

Commit

Permalink
~ | improve compatibility
Browse files Browse the repository at this point in the history
  • Loading branch information
xtekky committed Nov 20, 2023
1 parent e8d88c9 commit 9140541
Show file tree
Hide file tree
Showing 17 changed files with 69 additions and 31 deletions.
4 changes: 3 additions & 1 deletion etc/tool/vercel.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ def get_model_info() -> dict[str, Any]:

models_regex = r'let .="\\n\\nHuman:\",r=(.+?),.='
for script in scripts:
if matches := re.findall(models_regex, script):

matches = re.findall(models_regex, script)
if matches:
models_str = matches[0]
stop_sequences_regex = r"(?<=stopSequences:{value:\[)\D(?<!\])"
models_str = re.sub(
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/Chatgpt4Online.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ async def create_async(
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
if result := re.search(r'data-nonce="(.*?)"', response):
result = re.search(r'data-nonce="(.*?)"', response)

if result:
cls._wpnonce = result.group(1)
else:
raise RuntimeError("No nonce found")
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/ChatgptAi.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@ async def create_async_generator(
async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status()
text = await response.text()
if result := re.search(r"data-system='(.*?)'", text):

result = re.search(r"data-system='(.*?)'", text)
if result :
cls._system = json.loads(html.unescape(result.group(1)))
if not cls._system:
raise RuntimeError("System args not found")
Expand Down
11 changes: 8 additions & 3 deletions g4f/Provider/ChatgptDemo.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,13 @@ async def create_async_generator(
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
if result := re.search(

result = re.search(
r'<div id="USERID" style="display: none">(.*?)<\/div>',
response,
):
)

if result:
user_id = result.group(1)
else:
raise RuntimeError("No user id found")
Expand All @@ -59,5 +62,7 @@ async def create_async_generator(
async for line in response.content:
if line.startswith(b"data: "):
line = json.loads(line[6:-1])
if chunk := line["choices"][0]["delta"].get("content"):

chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk
3 changes: 2 additions & 1 deletion g4f/Provider/ChatgptFree.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@ async def create_async(
raise RuntimeError("No post id found")
cls._post_id = result.group(1)

if result := re.search(r'data-nonce="(.*?)"', response):
result = re.search(r'data-nonce="(.*?)"', response)
if result:
cls._nonce = result.group(1)

else:
Expand Down
13 changes: 8 additions & 5 deletions g4f/Provider/ChatgptLogin.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,12 @@ async def create_async_generator(
async with session.get(f"{cls.url}/chat/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
if result := re.search(
result = re.search(
r'<div id="USERID" style="display: none">(.*?)<\/div>',
response,
):
)

if result:
cls._user_id = result.group(1)
else:
raise RuntimeError("No user id found")
Expand All @@ -67,9 +69,10 @@ async def create_async_generator(
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
if content := json.loads(line[6:])["choices"][0][
"delta"
].get("content"):

content = json.loads(line[6:])["choices"][0]["delta"].get("content")
if content:
yield content

async with session.post(f"{cls.url}/chat/delete_chat", json={"chat_id": chat_id}, proxy=proxy) as response:
response.raise_for_status()
16 changes: 11 additions & 5 deletions g4f/Provider/ChatgptX.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,21 @@ async def create_async_generator(
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response = await response.text()
if result := re.search(

result = re.search(
r'<meta name="csrf-token" content="(.*?)"', response
):
)
if result:
csrf_token = result.group(1)
if result := re.search(r"openconversions\('(.*?)'\)", response):

result = re.search(r"openconversions\('(.*?)'\)", response)
if result:
chat_id = result.group(1)
if result := re.search(

result = re.search(
r'<input type="hidden" id="user_id" value="(.*?)"', response
):
)
if result:
user_id = result.group(1)

if not csrf_token or not chat_id or not user_id:
Expand Down
7 changes: 5 additions & 2 deletions g4f/Provider/GptGod.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,15 @@ async def create_async_generator(
response.raise_for_status()
event = None
async for line in response.content:
print(line)
# print(line)

if line.startswith(b'event: '):
event = line[7:-1]

elif event == b"data" and line.startswith(b"data: "):
if data := json.loads(line[6:-1]):
data = json.loads(line[6:-1])
if data:
yield data

elif event == b"done":
break
3 changes: 1 addition & 2 deletions g4f/Provider/Vercel.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@
from .base_provider import BaseProvider
from ..debug import logging


class Vercel(BaseProvider):
url = 'https://sdk.vercel.ai'
working = True
working = False
supports_message_history = True
supports_gpt_35_turbo = True
supports_stream = True
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/deprecated/CodeLinkAva.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,7 @@ async def create_async_generator(
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
if content := line["choices"][0]["delta"].get("content"):

content = line["choices"][0]["delta"].get("content")
if content:
yield content
4 changes: 3 additions & 1 deletion g4f/Provider/deprecated/Equing.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,5 +65,7 @@ def create_completion(
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
if token := line_json['choices'][0]['delta'].get('content'):

token = line_json['choices'][0]['delta'].get('content')
if token:
yield token
6 changes: 4 additions & 2 deletions g4f/Provider/deprecated/FastGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,11 @@ def create_completion(
try:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
if token := line_json['choices'][0]['delta'].get(
token = line_json['choices'][0]['delta'].get(
'content'
):
)

if token:
yield token
except:
continue
5 changes: 4 additions & 1 deletion g4f/Provider/deprecated/Lockchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ def create_completion(
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
print("error, retrying...")

Lockchat.create_completion(
model = model,
messages = messages,
Expand All @@ -47,5 +48,7 @@ def create_completion(

if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
if token := token["choices"][0]["delta"].get("content"):
token = token["choices"][0]["delta"].get("content")

if token:
yield (token)
4 changes: 3 additions & 1 deletion g4f/Provider/deprecated/Vitalentum.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,5 +49,7 @@ async def create_async_generator(
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
if content := line["choices"][0]["delta"].get("content"):
content = line["choices"][0]["delta"].get("content")

if content:
yield content
7 changes: 5 additions & 2 deletions g4f/Provider/unfinished/ChatAiGpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,12 @@ async def create_async_generator(
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
response = await response.text()
if result := re.search(

result = re.search(
r'data-nonce=(.*?) data-post-id=([0-9]+)', response
):
)

if result:
cls._nonce, cls._post_id = result.group(1), result.group(2)
else:
raise RuntimeError("No nonce found")
Expand Down
3 changes: 2 additions & 1 deletion g4f/Provider/unfinished/MikuChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ async def create_async_generator(
async for line in response.iter_lines():
if line.startswith(b"data: "):
line = json.loads(line[6:])
if chunk := line["choices"][0]["delta"].get("content"):
chunk = line["choices"][0]["delta"].get("content")
if chunk:
yield chunk

def k(e: str, t: int):
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
with open('requirements.txt') as f:
required = f.read().splitlines()

VERSION = '0.1.8.7'
VERSION = '0.1.8.8'
DESCRIPTION = (
'The official gpt4free repository | various collection of powerful language models'
)
Expand Down

0 comments on commit 9140541

Please sign in to comment.