Skip to content

Commit

Permalink
Adding a new provider with support for models
Browse files Browse the repository at this point in the history
  • Loading branch information
kqlio67 committed Sep 6, 2024
1 parent 34e1dc7 commit 0fb46d4
Show file tree
Hide file tree
Showing 3 changed files with 101 additions and 2 deletions.
89 changes: 89 additions & 0 deletions g4f/Provider/Bixin123.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
from __future__ import annotations

from aiohttp import ClientSession
import json
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from .helper import format_prompt

class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.bixin123.com"
api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True

default_model = 'gpt-3.5-turbo-0125'
models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']

model_aliases = {
"gpt-3.5-turbo": "gpt-3.5-turbo-0125",
"gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
}

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

headers = {
"accept": "application/json, text/plain, */*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"fingerprint": "988148794",
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/chat",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
"x-website-domain": "chat.bixin123.com",
}

async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"options": {
"usingNetwork": False,
"file": ""
}
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
response_text = await response.text()

lines = response_text.strip().split("\n")
last_json = None
for line in reversed(lines):
try:
last_json = json.loads(line)
break
except json.JSONDecodeError:
pass

if last_json:
text = last_json.get("text", "")
yield text
else:
yield ""
1 change: 1 addition & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Bixin123 import Bixin123
from .Blackbox import Blackbox
from .ChatGot import ChatGot
from .Chatgpt4Online import Chatgpt4Online
Expand Down
13 changes: 11 additions & 2 deletions g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
AiChatOnline,
Allyfy,
Bing,
Bixin123,
Blackbox,
ChatGot,
Chatgpt4Online,
Expand Down Expand Up @@ -81,6 +82,7 @@ def __all__() -> list[str]:
ReplicateHome,
Upstage,
Blackbox,
Bixin123,
])
)

Expand All @@ -103,7 +105,7 @@ def __all__() -> list[str]:
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Allyfy, TwitterBio, Nexra,
Allyfy, TwitterBio, Nexra, Bixin123,
])
)

Expand All @@ -128,7 +130,7 @@ def __all__() -> list[str]:
name = 'gpt-4-turbo',
base_provider = 'OpenAI',
best_provider = IterListProvider([
Nexra, Liaobots, Bing
Nexra, Bixin123, Liaobots, Bing
])
)

Expand Down Expand Up @@ -332,6 +334,12 @@ def __all__() -> list[str]:
best_provider = IterListProvider([FreeChatgpt])
)

qwen_turbo = Model(
name = 'qwen-turbo',
base_provider = 'Qwen',
best_provider = IterListProvider([Bixin123])
)


### Zhipu AI ###
glm4_9b = Model(
Expand Down Expand Up @@ -584,6 +592,7 @@ class ModelUtils:

### Qwen ###
'qwen-1.5-14b': qwen_1_5_14b,
'qwen-turbo': qwen_turbo,


### Zhipu AI ###
Expand Down

0 comments on commit 0fb46d4

Please sign in to comment.