diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py index 07e9fe7f1ae..d13d7a58fa1 100644 --- a/g4f/Provider/CablyAI.py +++ b/g4f/Provider/CablyAI.py @@ -1,7 +1,7 @@ from __future__ import annotations from ..typing import AsyncResult, Messages -from .needs_auth.OpenaiTemplate import OpenaiTemplate +from .template import OpenaiTemplate class CablyAI(OpenaiTemplate): url = "https://cablyai.com" diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index a9c4b5ebce7..8e961fb59d7 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -22,7 +22,7 @@ from .helper import format_prompt_max_length from .openai.har_file import get_headers, get_har_files from ..typing import CreateResult, Messages, ImagesType -from ..errors import MissingRequirementsError, NoValidHarFileError +from ..errors import MissingRequirementsError, NoValidHarFileError, MissingAuthError from ..requests.raise_for_status import raise_for_status from ..providers.response import BaseConversation, JsonConversation, RequestLogin, Parameters from ..providers.asyncio import get_running_loop @@ -119,6 +119,8 @@ def create_completion( # else: # clarity_token = None response = session.get("https://copilot.microsoft.com/c/api/user") + if response.status_code == 401: + raise MissingAuthError("Status 401: Invalid access token") raise_for_status(response) user = response.json().get('firstName') if user is None: diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index f384a3ffe33..9b968e9f9af 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -1,7 +1,7 @@ from __future__ import annotations from ..typing import AsyncResult, Messages -from .needs_auth.OpenaiTemplate import OpenaiTemplate +from .template import OpenaiTemplate class DeepInfraChat(OpenaiTemplate): url = "https://deepinfra.com/chat" diff --git a/g4f/Provider/Jmuz.py b/g4f/Provider/Jmuz.py index 27c10233a1a..5b4c6632b70 100644 --- a/g4f/Provider/Jmuz.py +++ b/g4f/Provider/Jmuz.py @@ -1,7 +1,7 @@ from __future__ import annotations from ..typing import AsyncResult, Messages -from .needs_auth.OpenaiTemplate import OpenaiTemplate +from .template import OpenaiTemplate class Jmuz(OpenaiTemplate): url = "https://discord.gg/Ew6JzjA2NR" diff --git a/g4f/Provider/Mhystical.py b/g4f/Provider/Mhystical.py index 1257a80ffc7..3a60e9584b9 100644 --- a/g4f/Provider/Mhystical.py +++ b/g4f/Provider/Mhystical.py @@ -1,7 +1,7 @@ from __future__ import annotations from ..typing import AsyncResult, Messages -from .needs_auth.OpenaiTemplate import OpenaiTemplate +from .template import OpenaiTemplate class Mhystical(OpenaiTemplate): url = "https://mhystical.cc" diff --git a/g4f/Provider/OIVSCode.py b/g4f/Provider/OIVSCode.py index 6a91886907d..ef88c5edee6 100644 --- a/g4f/Provider/OIVSCode.py +++ b/g4f/Provider/OIVSCode.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .needs_auth.OpenaiTemplate import OpenaiTemplate +from .template import OpenaiTemplate class OIVSCode(OpenaiTemplate): label = "OI VSCode Server" diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index af0b8d5b763..a1050575e4e 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -5,6 +5,7 @@ from ..typing import AsyncResult, Messages from ..requests import StreamSession, raise_for_status +from ..providers.response import Reasoning, FinishReason from .base_provider import AsyncGeneratorProvider, ProviderModelMixin API_URL = "https://www.perplexity.ai/socket.io/" @@ -13,25 +14,16 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): url = "https://labs.perplexity.ai" working = True - default_model = "llama-3.1-70b-instruct" + + default_model = "sonar-pro" models = [ - "llama-3.1-sonar-large-128k-online", - "llama-3.1-sonar-small-128k-online", - "llama-3.1-sonar-large-128k-chat", - "llama-3.1-sonar-small-128k-chat", - "llama-3.1-8b-instruct", - "llama-3.1-70b-instruct", - "llama-3.3-70b-instruct", - "/models/LiquidCloud", + default_model, + "sonar", + "sonar-reasoning", ] - model_aliases = { - "sonar-online": "llama-3.1-sonar-large-128k-online", - "sonar-chat": "llama-3.1-sonar-large-128k-chat", - "llama-3.3-70b": "llama-3.3-70b-instruct", - "llama-3.1-8b": "llama-3.1-8b-instruct", - "llama-3.1-70b": "llama-3.1-70b-instruct", - "lfm-40b": "/models/LiquidCloud", + "sonar-online": default_model, + "sonar-chat": default_model, } @classmethod @@ -78,13 +70,14 @@ async def create_async_generator( assert(await ws.receive_str()) assert(await ws.receive_str() == "6") message_data = { - "version": "2.13", + "version": "2.16", "source": "default", "model": model, "messages": messages } await ws.send_str("42" + json.dumps(["perplexity_labs", message_data])) last_message = 0 + is_thinking = False while True: message = await ws.receive_str() if message == "2": @@ -94,9 +87,25 @@ async def create_async_generator( continue try: data = json.loads(message[2:])[1] - yield data["output"][last_message:] + new_content = data["output"][last_message:] + + if "" in new_content: + yield Reasoning(None, "thinking") + is_thinking = True + if "" in new_content: + new_content = new_content.split("", 1) + yield Reasoning(f"{new_content[0]}") + yield Reasoning(None, "finished") + yield new_content[1] + is_thinking = False + elif is_thinking: + yield Reasoning(new_content) + else: + yield new_content + last_message = len(data["output"]) if data["final"]: + yield FinishReason("stop") break except: raise RuntimeError(f"Message: {message}") diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 5db86513714..0e0b2f4d7a3 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -11,6 +11,7 @@ from .local import * from .hf_space import HuggingSpace from .mini_max import HailuoAI, MiniMax +from .template import OpenaiTemplate, BackendApi from .AIChatFree import AIChatFree from .AIUncensored import AIUncensored @@ -55,6 +56,7 @@ if isinstance(provider, type) and issubclass(provider, BaseProvider) ] +__providers__ = __providers__ + HuggingSpace.providers __all__: list[str] = [ provider.__name__ for provider in __providers__ ] diff --git a/g4f/Provider/hf_space/Janus_Pro_7B.py b/g4f/Provider/hf_space/Janus_Pro_7B.py new file mode 100644 index 00000000000..43e5f518874 --- /dev/null +++ b/g4f/Provider/hf_space/Janus_Pro_7B.py @@ -0,0 +1,144 @@ +from __future__ import annotations + +import json +import uuid +import re +from datetime import datetime, timezone, timedelta +import urllib.parse + +from ...typing import AsyncResult, Messages, Cookies +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt +from ...providers.response import JsonConversation, ImageResponse +from ...requests.aiohttp import StreamSession, StreamResponse +from ...requests.raise_for_status import raise_for_status +from ...cookies import get_cookies +from ...errors import ResponseError +from ... import debug + +class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://huggingface.co/spaces/deepseek-ai/Janus-Pro-7B" + api_url = "https://deepseek-ai-janus-pro-7b.hf.space" + referer = f"{api_url}?__theme=light" + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = "janus-pro-7b" + default_image_model = "janus-pro-7b-image" + models = [default_model, default_image_model] + image_models = [default_image_model] + + @classmethod + def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation): + if method == "post": + return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{ + "headers": { + "content-type": "application/json", + "x-zerogpu-token": conversation.zerogpu_token, + "x-zerogpu-uuid": conversation.uuid, + "referer": cls.referer, + }, + "json": {"data":[None,prompt,42,0.95,0.1],"event_data":None,"fn_index":2,"trigger_id":10,"session_hash":conversation.session_hash}, + }) + elif method == "image": + return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{ + "headers": { + "content-type": "application/json", + "x-zerogpu-token": conversation.zerogpu_token, + "x-zerogpu-uuid": conversation.uuid, + "referer": cls.referer, + }, + "json": {"data":[prompt,1234,5,1],"event_data":None,"fn_index":3,"trigger_id":20,"session_hash":conversation.session_hash}, + }) + return session.get(f"{cls.api_url}/gradio_api/queue/data?session_hash={conversation.session_hash}", **{ + "headers": { + "accept": "text/event-stream", + "content-type": "application/json", + "referer": cls.referer, + } + }) + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + prompt: str = None, + proxy: str = None, + cookies: Cookies = None, + return_conversation: bool = False, + conversation: JsonConversation = None, + **kwargs + ) -> AsyncResult: + def generate_session_hash(): + """Generate a unique session hash.""" + return str(uuid.uuid4()).replace('-', '')[:12] + + method = "post" + if model == cls.default_image_model or prompt is not None: + method = "image" + + prompt = format_prompt(messages) if prompt is None and conversation is None else prompt + prompt = messages[-1]["content"] if prompt is None else prompt + + session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash") + async with StreamSession(proxy=proxy, impersonate="chrome") as session: + session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash") + user_uuid = None if conversation is None else getattr(conversation, "user_uuid", None) + zerogpu_token = "[object Object]" + + cookies = get_cookies("huggingface.co", raise_requirements_error=False) if cookies is None else cookies + if cookies: + # Get current UTC time + 10 minutes + dt = (datetime.now(timezone.utc) + timedelta(minutes=10)).isoformat(timespec='milliseconds') + encoded_dt = urllib.parse.quote(dt) + async with session.get(f"https://huggingface.co/api/spaces/deepseek-ai/Janus-Pro-7B/jwt?expiration={encoded_dt}&include_pro_status=true", cookies=cookies) as response: + zerogpu_token = (await response.json()) + zerogpu_token = zerogpu_token["token"] + if user_uuid is None: + async with session.get(cls.url, cookies=cookies) as response: + match = re.search(r""token":"([^&]+?)"", await response.text()) + if match: + zerogpu_token = match.group(1) + match = re.search(r""sessionUuid":"([^&]+?)"", await response.text()) + if match: + user_uuid = match.group(1) + + if conversation is None or not hasattr(conversation, "session_hash"): + conversation = JsonConversation(session_hash=session_hash, zerogpu_token=zerogpu_token, uuid=user_uuid) + conversation.zerogpu_token = zerogpu_token + if return_conversation: + yield conversation + + async with cls.run(method, session, prompt, conversation) as response: + await raise_for_status(response) + + async with cls.run("get", session, prompt, conversation) as response: + response: StreamResponse = response + async for line in response.iter_lines(): + decoded_line = line.decode(errors="replace") + if decoded_line.startswith('data: '): + try: + json_data = json.loads(decoded_line[6:]) + if json_data.get('msg') == 'log': + debug.log(json_data["log"]) + + if json_data.get('msg') == 'process_generating': + if 'output' in json_data and 'data' in json_data['output']: + yield f"data: {json.dumps(json_data['output']['data'])}" + + if json_data.get('msg') == 'process_completed': + if 'output' in json_data and 'error' in json_data['output']: + raise ResponseError("Text model is not working. Try out image model" if "AttributeError" in json_data['output']['error'] else json_data['output']['error']) + if 'output' in json_data and 'data' in json_data['output']: + if "image" in json_data['output']['data'][0][0]: + yield ImageResponse([image["image"]["url"] for image in json_data['output']['data'][0]], prompt) + else: + yield f"data: {json.dumps(json_data['output']['data'])}" + break + + except json.JSONDecodeError: + debug.log("Could not parse JSON:", decoded_line) \ No newline at end of file diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py index accd00d0d21..3e40a37b09a 100644 --- a/g4f/Provider/hf_space/__init__.py +++ b/g4f/Provider/hf_space/__init__.py @@ -10,6 +10,7 @@ from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell from .CohereForAI import CohereForAI +from .Janus_Pro_7B import Janus_Pro_7B from .Qwen_QVQ_72B import Qwen_QVQ_72B from .Qwen_Qwen_2_5M_Demo import Qwen_Qwen_2_5M_Demo from .Qwen_Qwen_2_72B_Instruct import Qwen_Qwen_2_72B_Instruct @@ -25,8 +26,11 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): default_image_model = BlackForestLabsFlux1Dev.default_model default_vision_model = Qwen_QVQ_72B.default_model providers = [ - BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell, - CohereForAI, Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct, StableDiffusion35Large + BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, + VoodoohopFlux1Schnell, + CohereForAI, Janus_Pro_7B, + Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct, + StableDiffusion35Large ] @classmethod diff --git a/g4f/Provider/mini_max/MiniMax.py b/g4f/Provider/mini_max/MiniMax.py index c954fb622a9..fbca13eabbd 100644 --- a/g4f/Provider/mini_max/MiniMax.py +++ b/g4f/Provider/mini_max/MiniMax.py @@ -1,6 +1,6 @@ from __future__ import annotations -from ..needs_auth.OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class MiniMax(OpenaiTemplate): label = "MiniMax API" diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py index d46eae8d97c..9946ef255d6 100644 --- a/g4f/Provider/needs_auth/CopilotAccount.py +++ b/g4f/Provider/needs_auth/CopilotAccount.py @@ -10,6 +10,9 @@ from ...errors import NoValidHarFileError from ... import debug +def cookies_to_dict(): + return Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies} + class CopilotAccount(AsyncAuthedProvider, Copilot): needs_auth = True use_nodriver = True @@ -24,21 +27,20 @@ class CopilotAccount(AsyncAuthedProvider, Copilot): @classmethod async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator: - if cls._access_token is None: - try: - cls._access_token, cls._cookies = readHAR(cls.url) - except NoValidHarFileError as h: - debug.log(f"Copilot: {h}") - if has_nodriver: - login_url = os.environ.get("G4F_LOGIN_URL") - if login_url: - yield RequestLogin(cls.label, login_url) - cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy) - else: - raise h + try: + Copilot._access_token, Copilot._cookies = readHAR(cls.url) + except NoValidHarFileError as h: + debug.log(f"Copilot: {h}") + if has_nodriver: + login_url = os.environ.get("G4F_LOGIN_URL") + if login_url: + yield RequestLogin(cls.label, login_url) + Copilot._access_token, Copilot._cookies = await get_access_token_and_cookies(cls.url, proxy) + else: + raise h yield AuthResult( - api_key=cls._access_token, - cookies=cls._cookies, + api_key=Copilot._access_token, + cookies=cookies_to_dict() ) @classmethod @@ -54,4 +56,4 @@ async def create_authed( Copilot.needs_auth = cls.needs_auth for chunk in Copilot.create_completion(model, messages, **kwargs): yield chunk - auth_result.cookies = Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies} \ No newline at end of file + auth_result.cookies = cookies_to_dict() \ No newline at end of file diff --git a/g4f/Provider/needs_auth/Custom.py b/g4f/Provider/needs_auth/Custom.py index 2bd7c0147cf..4e86e523cc0 100644 --- a/g4f/Provider/needs_auth/Custom.py +++ b/g4f/Provider/needs_auth/Custom.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class Custom(OpenaiTemplate): label = "Custom Provider" diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py index 6c077a5ec86..a5f6350a52d 100644 --- a/g4f/Provider/needs_auth/DeepInfra.py +++ b/g4f/Provider/needs_auth/DeepInfra.py @@ -4,7 +4,7 @@ from ...typing import AsyncResult, Messages from ...requests import StreamSession, raise_for_status from ...image import ImageResponse -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class DeepInfra(OpenaiTemplate): url = "https://deepinfra.com" diff --git a/g4f/Provider/needs_auth/DeepSeekAPI.py b/g4f/Provider/needs_auth/DeepSeekAPI.py new file mode 100644 index 00000000000..ad7f32fe97c --- /dev/null +++ b/g4f/Provider/needs_auth/DeepSeekAPI.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import os +import json +import time +from typing import AsyncIterator +import asyncio + +from ..base_provider import AsyncAuthedProvider +from ...requests import get_args_from_nodriver +from ...providers.response import AuthResult, RequestLogin, Reasoning, JsonConversation, FinishReason +from ...typing import AsyncResult, Messages + +try: + from curl_cffi import requests + from dsk.api import DeepSeekAPI, AuthenticationError, DeepSeekPOW + + class DeepSeekAPIArgs(DeepSeekAPI): + def __init__(self, args: dict): + args.pop("headers") + self.auth_token = args.pop("api_key") + if not self.auth_token or not isinstance(self.auth_token, str): + raise AuthenticationError("Invalid auth token provided") + self.args = args + self.pow_solver = DeepSeekPOW() + + def _make_request(self, method: str, endpoint: str, json_data: dict, pow_required: bool = False): + url = f"{self.BASE_URL}{endpoint}" + headers = self._get_headers() + if pow_required: + challenge = self._get_pow_challenge() + pow_response = self.pow_solver.solve_challenge(challenge) + headers = self._get_headers(pow_response) + + response = requests.request( + method=method, + url=url, + json=json_data, **{ + "headers":headers, + "impersonate":'chrome', + "timeout":None, + **self.args + } + ) + return response.json() +except ImportError: + pass + +class DeepSeekAPI(AsyncAuthedProvider): + url = "https://chat.deepseek.com" + working = False + needs_auth = True + use_nodriver = True + _access_token = None + + @classmethod + async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator: + yield RequestLogin(cls.__name__, os.environ.get("G4F_LOGIN_URL") or "") + async def callback(page): + while True: + await asyncio.sleep(1) + cls._access_token = json.loads(await page.evaluate("localStorage.getItem('userToken')") or "{}").get("value") + if cls._access_token: + break + args = await get_args_from_nodriver(cls.url, proxy, callback=callback) + yield AuthResult( + api_key=cls._access_token, + **args + ) + + @classmethod + async def create_authed( + cls, + model: str, + messages: Messages, + auth_result: AuthResult, + conversation: JsonConversation = None, + **kwargs + ) -> AsyncResult: + # Initialize with your auth token + api = DeepSeekAPIArgs(auth_result.get_dict()) + + # Create a new chat session + if conversation is None: + chat_id = api.create_chat_session() + conversation = JsonConversation(chat_id=chat_id) + + is_thinking = 0 + for chunk in api.chat_completion( + conversation.chat_id, + messages[-1]["content"], + thinking_enabled=True + ): + if chunk['type'] == 'thinking': + if not is_thinking: + yield Reasoning(None, "Is thinking...") + is_thinking = time.time() + yield Reasoning(chunk['content']) + elif chunk['type'] == 'text': + if is_thinking: + yield Reasoning(None, f"Thought for {time.time() - is_thinking:.2f}s") + is_thinking = 0 + yield chunk['content'] + if chunk['finish_reason']: + yield FinishReason(chunk['finish_reason']) \ No newline at end of file diff --git a/g4f/Provider/needs_auth/GlhfChat.py b/g4f/Provider/needs_auth/GlhfChat.py index fbd7ebcd075..3d2c7bbf86f 100644 --- a/g4f/Provider/needs_auth/GlhfChat.py +++ b/g4f/Provider/needs_auth/GlhfChat.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class GlhfChat(OpenaiTemplate): url = "https://glhf.chat" diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py index 4299d2117dc..a2af3941674 100644 --- a/g4f/Provider/needs_auth/Groq.py +++ b/g4f/Provider/needs_auth/Groq.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class Groq(OpenaiTemplate): url = "https://console.groq.com/playground" diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index 59c8f6ee9a1..c9a15260541 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate from .HuggingChat import HuggingChat from ...providers.types import Messages from ... import debug diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 1720564edcf..34b1c73948a 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -1,6 +1,6 @@ from __future__ import annotations -from . OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class OpenaiAPI(OpenaiTemplate): label = "OpenAI API" diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py index 57b67cb4375..4d8e0ccd5b6 100644 --- a/g4f/Provider/needs_auth/PerplexityApi.py +++ b/g4f/Provider/needs_auth/PerplexityApi.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate class PerplexityApi(OpenaiTemplate): label = "Perplexity API" diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py index 50b95a1c575..41717bd2753 100644 --- a/g4f/Provider/needs_auth/ThebApi.py +++ b/g4f/Provider/needs_auth/ThebApi.py @@ -2,7 +2,7 @@ from ...typing import CreateResult, Messages from ..helper import filter_none -from .OpenaiTemplate import OpenaiTemplate +from ..template import OpenaiTemplate models = { "theb-ai": "TheB.AI", diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index 348d9056558..691bc5bebb7 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -6,6 +6,7 @@ from .Custom import Feature from .DeepInfra import DeepInfra from .DeepSeek import DeepSeek +from .DeepSeekAPI import DeepSeekAPI from .Gemini import Gemini from .GeminiPro import GeminiPro from .GigaChat import GigaChat diff --git a/g4f/Provider/needs_auth/xAI.py b/g4f/Provider/needs_auth/xAI.py index 154d18d024d..8f21e9cc7bd 100644 --- a/g4f/Provider/needs_auth/xAI.py +++ b/g4f/Provider/needs_auth/xAI.py @@ -1,6 +1,6 @@ from __future__ import annotations -from .OpenaiTemplate import OpenaiTemplate +from ..template.OpenaiTemplate import OpenaiTemplate class xAI(OpenaiTemplate): url = "https://console.x.ai" diff --git a/g4f/Provider/template/BackendApi.py b/g4f/Provider/template/BackendApi.py new file mode 100644 index 00000000000..caf282194a5 --- /dev/null +++ b/g4f/Provider/template/BackendApi.py @@ -0,0 +1,124 @@ +from __future__ import annotations + +import re +import json +import time +from urllib.parse import quote_plus + +from ...typing import Messages, AsyncResult +from ...requests import StreamSession +from ...providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...providers.response import ProviderInfo, JsonConversation, PreviewResponse, SynthesizeData, TitleGeneration, RequestLogin +from ...providers.response import Parameters, FinishReason, Usage, Reasoning +from ...errors import ModelNotSupportedError +from ..needs_auth.OpenaiAccount import OpenaiAccount +from ..needs_auth.HuggingChat import HuggingChat +from ... import debug + +class BackendApi(AsyncGeneratorProvider, ProviderModelMixin): + ssl = False + + models = [ + *OpenaiAccount.get_models(), + *HuggingChat.get_models(), + "flux", + "flux-pro", + "MiniMax-01", + "Microsoft Copilot", + ] + + @classmethod + def get_model(cls, model: str): + if "MiniMax" in model: + model = "MiniMax" + elif "Copilot" in model: + model = "Copilot" + elif "FLUX" in model: + model = f"flux-{model.split('-')[-1]}" + elif "flux" in model: + model = model.split(' ')[-1] + elif model in OpenaiAccount.get_models(): + pass + elif model in HuggingChat.get_models(): + pass + else: + raise ModelNotSupportedError(f"Model: {model}") + return model + + @classmethod + def get_provider(cls, model: str): + if model.startswith("MiniMax"): + return "HailuoAI" + elif model == "Copilot": + return "CopilotAccount" + elif model in OpenaiAccount.get_models(): + return "OpenaiAccount" + elif model in HuggingChat.get_models(): + return "HuggingChat" + return None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + api_key: str = None, + proxy: str = None, + timeout: int = 0, + **kwargs + ) -> AsyncResult: + debug.log(f"{__name__}: {api_key}") + + async with StreamSession( + proxy=proxy, + headers={"Accept": "text/event-stream"}, + timeout=timeout + ) as session: + model = cls.get_model(model) + provider = cls.get_provider(model) + async with session.post(f"{cls.url}/backend-api/v2/conversation", json={ + "model": model, + "messages": messages, + "provider": provider, + **kwargs + }, ssl=cls.ssl) as response: + async for line in response.iter_lines(): + data = json.loads(line) + data_type = data.pop("type") + if data_type == "provider": + yield ProviderInfo(**data[data_type]) + provider = data[data_type]["name"] + elif data_type == "conversation": + yield JsonConversation(**data[data_type][provider] if provider in data[data_type] else data[data_type][""]) + elif data_type == "conversation_id": + pass + elif data_type == "message": + yield Exception(data) + elif data_type == "preview": + yield PreviewResponse(data[data_type]) + elif data_type == "content": + def on_image(match): + extension = match.group(3).split(".")[-1].split("?")[0] + extension = "" if not extension or len(extension) > 4 else f".{extension}" + filename = f"{int(time.time())}_{quote_plus(match.group(1)[:100], '')}{extension}" + download_url = f"/download/{filename}?url={cls.url}{match.group(3)}" + return f"[![{match.group(1)}]({download_url})](/images/{filename})" + yield re.sub(r'\[\!\[(.+?)\]\(([^)]+?)\)\]\(([^)]+?)\)', on_image, data["content"]) + elif data_type =="synthesize": + yield SynthesizeData(**data[data_type]) + elif data_type == "parameters": + yield Parameters(**data[data_type]) + elif data_type == "usage": + yield Usage(**data[data_type]) + elif data_type == "reasoning": + yield Reasoning(**data) + elif data_type == "login": + pass + elif data_type == "title": + yield TitleGeneration(data[data_type]) + elif data_type == "finish": + yield FinishReason(data[data_type]["reason"]) + elif data_type == "log": + debug.log(data[data_type]) + else: + debug.log(f"Unknown data: ({data_type}) {data}") \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py similarity index 100% rename from g4f/Provider/needs_auth/OpenaiTemplate.py rename to g4f/Provider/template/OpenaiTemplate.py diff --git a/g4f/Provider/template/__init__.py b/g4f/Provider/template/__init__.py new file mode 100644 index 00000000000..758bc5b45d7 --- /dev/null +++ b/g4f/Provider/template/__init__.py @@ -0,0 +1,2 @@ +from .BackendApi import BackendApi +from .OpenaiTemplate import OpenaiTemplate \ No newline at end of file diff --git a/g4f/cookies.py b/g4f/cookies.py index 2328f0f225f..88d1f019df8 100644 --- a/g4f/cookies.py +++ b/g4f/cookies.py @@ -60,6 +60,8 @@ class CookiesConfig(): "chatgpt.com", ".cerebras.ai", "github.com", + "huggingface.co", + ".huggingface.co" ] if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null": diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index 12ec1b60d8c..713cce7258c 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -157,6 +157,8 @@ def decorated_log(text: str): yield self._format_json('error', type(e).__name__, message=get_error_message(e)) return if not isinstance(provider_handler, BaseRetryProvider): + if not provider: + provider = provider_handler.__name__ yield self.handle_provider(provider_handler, model) if hasattr(provider_handler, "get_parameters"): yield self._format_json("parameters", provider_handler.get_parameters(as_json=True)) @@ -221,6 +223,7 @@ def _format_json(self, response_type: str, content = None, **kwargs): return { 'type': response_type, response_type: content, + **kwargs } return { 'type': response_type, diff --git a/g4f/models.py b/g4f/models.py index 98b642c4241..183be9a1d31 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -198,13 +198,13 @@ class VisionModel(Model): llama_3_1_8b = Model( name = "llama-3.1-8b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI, Cloudflare, PerplexityLabs]) + best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI, Cloudflare]) ) llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, Jmuz, Blackbox, TeachAnything, DarkAI, PerplexityLabs]) + best_provider = IterListProvider([DDG, Jmuz, Blackbox, TeachAnything, DarkAI]) ) llama_3_1_405b = Model( @@ -242,7 +242,7 @@ class VisionModel(Model): llama_3_3_70b = Model( name = "llama-3.3-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat, HuggingFace, PerplexityLabs]) + best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat, HuggingFace]) ) ### Mistral ### @@ -484,14 +484,14 @@ class VisionModel(Model): ) ### Perplexity AI ### -sonar_online = Model( - name = 'sonar-online', +sonar = Model( + name = 'sonar', base_provider = 'Perplexity AI', best_provider = PerplexityLabs ) -sonar_chat = Model( - name = 'sonar-chat', +sonar_pro = Model( + name = 'sonar-pro', base_provider = 'Perplexity AI', best_provider = PerplexityLabs ) @@ -503,13 +503,6 @@ class VisionModel(Model): best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace]) ) -### Liquid ### -lfm_40b = Model( - name = 'lfm-40b', - base_provider = 'Liquid', - best_provider = PerplexityLabs -) - ### Databricks ### dbrx_instruct = Model( name = 'dbrx-instruct', @@ -736,15 +729,14 @@ class ModelUtils: grok_2.name: grok_2, ### Perplexity AI ### - sonar_online.name: sonar_online, - sonar_chat.name: sonar_chat, + sonar.name: sonar, + sonar_pro.name: sonar_pro, ### DeepSeek ### deepseek_chat.name: deepseek_chat, deepseek_r1.name: deepseek_r1, nemotron_70b.name: nemotron_70b, ### Nvidia ### - lfm_40b.name: lfm_40b, ### Liquid ### dbrx_instruct.name: dbrx_instruct, ### Databricks ### p1.name: p1, ### PollinationsAI ### cably_80b.name: cably_80b, ### CablyAI ### diff --git a/g4f/providers/base_provider.py b/g4f/providers/base_provider.py index d46ed3a8c00..d27b90352a8 100644 --- a/g4f/providers/base_provider.py +++ b/g4f/providers/base_provider.py @@ -415,10 +415,10 @@ def create_completion( model: str, messages: Messages, **kwargs - ) -> CreateResult: +) -> CreateResult: + auth_result = AuthResult() + cache_file = cls.get_cache_file() try: - auth_result = AuthResult() - cache_file = cls.get_cache_file() if cache_file.exists(): with cache_file.open("r") as f: auth_result = AuthResult(**json.load(f)) diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py index dcd46d3c778..d80db83f314 100644 --- a/g4f/providers/retry_provider.py +++ b/g4f/providers/retry_provider.py @@ -87,7 +87,7 @@ async def create_async_generator( for provider in self.get_providers(stream and not ignore_stream, ignored): self.last_provider = provider debug.log(f"Using {provider.__name__} provider") - yield ProviderInfo(provider.get_dict()) + yield ProviderInfo(**provider.get_dict()) try: response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs) if hasattr(response, "__aiter__"): diff --git a/g4f/requests/__init__.py b/g4f/requests/__init__.py index 0dfe6e68d26..03516951b8b 100644 --- a/g4f/requests/__init__.py +++ b/g4f/requests/__init__.py @@ -167,7 +167,7 @@ async def get_nodriver( ) except: if util.get_registered_instances(): - browser = util.get_registered_instances()[-1] + browser = util.get_registered_instances().pop() else: raise stop = browser.stop