Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Janus_Pro_7B provider #2643

Merged
merged 5 commits into from
Jan 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion g4f/Provider/CablyAI.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

from ..typing import AsyncResult, Messages
from .needs_auth.OpenaiTemplate import OpenaiTemplate
from .template import OpenaiTemplate

class CablyAI(OpenaiTemplate):
url = "https://cablyai.com"
Expand Down
4 changes: 3 additions & 1 deletion g4f/Provider/Copilot.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from .helper import format_prompt_max_length
from .openai.har_file import get_headers, get_har_files
from ..typing import CreateResult, Messages, ImagesType
from ..errors import MissingRequirementsError, NoValidHarFileError
from ..errors import MissingRequirementsError, NoValidHarFileError, MissingAuthError
from ..requests.raise_for_status import raise_for_status
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, Parameters
from ..providers.asyncio import get_running_loop
Expand Down Expand Up @@ -119,6 +119,8 @@ def create_completion(
# else:
# clarity_token = None
response = session.get("https://copilot.microsoft.com/c/api/user")
if response.status_code == 401:
raise MissingAuthError("Status 401: Invalid access token")
raise_for_status(response)
user = response.json().get('firstName')
if user is None:
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/DeepInfraChat.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

from ..typing import AsyncResult, Messages
from .needs_auth.OpenaiTemplate import OpenaiTemplate
from .template import OpenaiTemplate

class DeepInfraChat(OpenaiTemplate):
url = "https://deepinfra.com/chat"
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/Jmuz.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

from ..typing import AsyncResult, Messages
from .needs_auth.OpenaiTemplate import OpenaiTemplate
from .template import OpenaiTemplate

class Jmuz(OpenaiTemplate):
url = "https://discord.gg/Ew6JzjA2NR"
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/Mhystical.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import annotations

from ..typing import AsyncResult, Messages
from .needs_auth.OpenaiTemplate import OpenaiTemplate
from .template import OpenaiTemplate

class Mhystical(OpenaiTemplate):
url = "https://mhystical.cc"
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/OIVSCode.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from .needs_auth.OpenaiTemplate import OpenaiTemplate
from .template import OpenaiTemplate

class OIVSCode(OpenaiTemplate):
label = "OI VSCode Server"
Expand Down
45 changes: 27 additions & 18 deletions g4f/Provider/PerplexityLabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
from ..providers.response import Reasoning, FinishReason
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin

API_URL = "https://www.perplexity.ai/socket.io/"
Expand All @@ -13,25 +14,16 @@
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai"
working = True
default_model = "llama-3.1-70b-instruct"

default_model = "sonar-pro"
models = [
"llama-3.1-sonar-large-128k-online",
"llama-3.1-sonar-small-128k-online",
"llama-3.1-sonar-large-128k-chat",
"llama-3.1-sonar-small-128k-chat",
"llama-3.1-8b-instruct",
"llama-3.1-70b-instruct",
"llama-3.3-70b-instruct",
"/models/LiquidCloud",
default_model,
"sonar",
"sonar-reasoning",
]

model_aliases = {
"sonar-online": "llama-3.1-sonar-large-128k-online",
"sonar-chat": "llama-3.1-sonar-large-128k-chat",
"llama-3.3-70b": "llama-3.3-70b-instruct",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"llama-3.1-70b": "llama-3.1-70b-instruct",
"lfm-40b": "/models/LiquidCloud",
"sonar-online": default_model,
"sonar-chat": default_model,
}

@classmethod
Expand Down Expand Up @@ -78,13 +70,14 @@ async def create_async_generator(
assert(await ws.receive_str())
assert(await ws.receive_str() == "6")
message_data = {
"version": "2.13",
"version": "2.16",
"source": "default",
"model": model,
"messages": messages
}
await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
last_message = 0
is_thinking = False
while True:
message = await ws.receive_str()
if message == "2":
Expand All @@ -94,9 +87,25 @@ async def create_async_generator(
continue
try:
data = json.loads(message[2:])[1]
yield data["output"][last_message:]
new_content = data["output"][last_message:]

if "<think>" in new_content:
yield Reasoning(None, "thinking")
is_thinking = True
if "</think>" in new_content:
new_content = new_content.split("</think>", 1)
yield Reasoning(f"{new_content[0]}</think>")
yield Reasoning(None, "finished")
yield new_content[1]
is_thinking = False
elif is_thinking:
yield Reasoning(new_content)
else:
yield new_content

last_message = len(data["output"])
if data["final"]:
yield FinishReason("stop")
break
except:
raise RuntimeError(f"Message: {message}")
2 changes: 2 additions & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .local import *
from .hf_space import HuggingSpace
from .mini_max import HailuoAI, MiniMax
from .template import OpenaiTemplate, BackendApi

from .AIChatFree import AIChatFree
from .AIUncensored import AIUncensored
Expand Down Expand Up @@ -55,6 +56,7 @@
if isinstance(provider, type)
and issubclass(provider, BaseProvider)
]
__providers__ = __providers__ + HuggingSpace.providers
__all__: list[str] = [
provider.__name__ for provider in __providers__
]
Expand Down
144 changes: 144 additions & 0 deletions g4f/Provider/hf_space/Janus_Pro_7B.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
from __future__ import annotations

import json
import uuid
import re
from datetime import datetime, timezone, timedelta
import urllib.parse

from ...typing import AsyncResult, Messages, Cookies
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ...providers.response import JsonConversation, ImageResponse
from ...requests.aiohttp import StreamSession, StreamResponse
from ...requests.raise_for_status import raise_for_status
from ...cookies import get_cookies
from ...errors import ResponseError
from ... import debug

class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/spaces/deepseek-ai/Janus-Pro-7B"
api_url = "https://deepseek-ai-janus-pro-7b.hf.space"
referer = f"{api_url}?__theme=light"

working = True
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = "janus-pro-7b"
default_image_model = "janus-pro-7b-image"
models = [default_model, default_image_model]
image_models = [default_image_model]

@classmethod
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation):
if method == "post":
return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{
"headers": {
"content-type": "application/json",
"x-zerogpu-token": conversation.zerogpu_token,
"x-zerogpu-uuid": conversation.uuid,
"referer": cls.referer,
},
"json": {"data":[None,prompt,42,0.95,0.1],"event_data":None,"fn_index":2,"trigger_id":10,"session_hash":conversation.session_hash},
})
elif method == "image":
return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{
"headers": {
"content-type": "application/json",
"x-zerogpu-token": conversation.zerogpu_token,
"x-zerogpu-uuid": conversation.uuid,
"referer": cls.referer,
},
"json": {"data":[prompt,1234,5,1],"event_data":None,"fn_index":3,"trigger_id":20,"session_hash":conversation.session_hash},
})
return session.get(f"{cls.api_url}/gradio_api/queue/data?session_hash={conversation.session_hash}", **{
"headers": {
"accept": "text/event-stream",
"content-type": "application/json",
"referer": cls.referer,
}
})

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
proxy: str = None,
cookies: Cookies = None,
return_conversation: bool = False,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:
def generate_session_hash():
"""Generate a unique session hash."""
return str(uuid.uuid4()).replace('-', '')[:12]

method = "post"
if model == cls.default_image_model or prompt is not None:
method = "image"

prompt = format_prompt(messages) if prompt is None and conversation is None else prompt
prompt = messages[-1]["content"] if prompt is None else prompt

session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
async with StreamSession(proxy=proxy, impersonate="chrome") as session:
session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
user_uuid = None if conversation is None else getattr(conversation, "user_uuid", None)
zerogpu_token = "[object Object]"

cookies = get_cookies("huggingface.co", raise_requirements_error=False) if cookies is None else cookies
if cookies:
# Get current UTC time + 10 minutes
dt = (datetime.now(timezone.utc) + timedelta(minutes=10)).isoformat(timespec='milliseconds')
encoded_dt = urllib.parse.quote(dt)
async with session.get(f"https://huggingface.co/api/spaces/deepseek-ai/Janus-Pro-7B/jwt?expiration={encoded_dt}&include_pro_status=true", cookies=cookies) as response:
zerogpu_token = (await response.json())
zerogpu_token = zerogpu_token["token"]
if user_uuid is None:
async with session.get(cls.url, cookies=cookies) as response:
match = re.search(r"&quot;token&quot;:&quot;([^&]+?)&quot;", await response.text())
if match:
zerogpu_token = match.group(1)
match = re.search(r"&quot;sessionUuid&quot;:&quot;([^&]+?)&quot;", await response.text())
if match:
user_uuid = match.group(1)

if conversation is None or not hasattr(conversation, "session_hash"):
conversation = JsonConversation(session_hash=session_hash, zerogpu_token=zerogpu_token, uuid=user_uuid)
conversation.zerogpu_token = zerogpu_token
if return_conversation:
yield conversation

async with cls.run(method, session, prompt, conversation) as response:
await raise_for_status(response)

async with cls.run("get", session, prompt, conversation) as response:
response: StreamResponse = response
async for line in response.iter_lines():
decoded_line = line.decode(errors="replace")
if decoded_line.startswith('data: '):
try:
json_data = json.loads(decoded_line[6:])
if json_data.get('msg') == 'log':
debug.log(json_data["log"])

if json_data.get('msg') == 'process_generating':
if 'output' in json_data and 'data' in json_data['output']:
yield f"data: {json.dumps(json_data['output']['data'])}"

if json_data.get('msg') == 'process_completed':
if 'output' in json_data and 'error' in json_data['output']:
raise ResponseError("Text model is not working. Try out image model" if "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
if 'output' in json_data and 'data' in json_data['output']:
if "image" in json_data['output']['data'][0][0]:
yield ImageResponse([image["image"]["url"] for image in json_data['output']['data'][0]], prompt)
else:
yield f"data: {json.dumps(json_data['output']['data'])}"
break

except json.JSONDecodeError:
debug.log("Could not parse JSON:", decoded_line)
8 changes: 6 additions & 2 deletions g4f/Provider/hf_space/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from .BlackForestLabsFlux1Schnell import BlackForestLabsFlux1Schnell
from .VoodoohopFlux1Schnell import VoodoohopFlux1Schnell
from .CohereForAI import CohereForAI
from .Janus_Pro_7B import Janus_Pro_7B
from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5M_Demo import Qwen_Qwen_2_5M_Demo
from .Qwen_Qwen_2_72B_Instruct import Qwen_Qwen_2_72B_Instruct
Expand All @@ -25,8 +26,11 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = BlackForestLabsFlux1Dev.default_model
default_vision_model = Qwen_QVQ_72B.default_model
providers = [
BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, VoodoohopFlux1Schnell,
CohereForAI, Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct, StableDiffusion35Large
BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell,
VoodoohopFlux1Schnell,
CohereForAI, Janus_Pro_7B,
Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct,
StableDiffusion35Large
]

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/mini_max/MiniMax.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from ..needs_auth.OpenaiTemplate import OpenaiTemplate
from ..template import OpenaiTemplate

class MiniMax(OpenaiTemplate):
label = "MiniMax API"
Expand Down
32 changes: 17 additions & 15 deletions g4f/Provider/needs_auth/CopilotAccount.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
from ...errors import NoValidHarFileError
from ... import debug

def cookies_to_dict():
return Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}

class CopilotAccount(AsyncAuthedProvider, Copilot):
needs_auth = True
use_nodriver = True
Expand All @@ -24,21 +27,20 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):

@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
if cls._access_token is None:
try:
cls._access_token, cls._cookies = readHAR(cls.url)
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
if has_nodriver:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield RequestLogin(cls.label, login_url)
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
else:
raise h
try:
Copilot._access_token, Copilot._cookies = readHAR(cls.url)
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
if has_nodriver:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield RequestLogin(cls.label, login_url)
Copilot._access_token, Copilot._cookies = await get_access_token_and_cookies(cls.url, proxy)
else:
raise h
yield AuthResult(
api_key=cls._access_token,
cookies=cls._cookies,
api_key=Copilot._access_token,
cookies=cookies_to_dict()
)

@classmethod
Expand All @@ -54,4 +56,4 @@ async def create_authed(
Copilot.needs_auth = cls.needs_auth
for chunk in Copilot.create_completion(model, messages, **kwargs):
yield chunk
auth_result.cookies = Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}
auth_result.cookies = cookies_to_dict()
2 changes: 1 addition & 1 deletion g4f/Provider/needs_auth/Custom.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import annotations

from .OpenaiTemplate import OpenaiTemplate
from ..template import OpenaiTemplate

class Custom(OpenaiTemplate):
label = "Custom Provider"
Expand Down
Loading