Skip to content

Commit

Permalink
Add OpenRouter and DeepInfraImage Provider (#1814)
Browse files Browse the repository at this point in the history
  • Loading branch information
hlohaus authored Apr 10, 2024
1 parent 84475b4 commit 00951eb
Show file tree
Hide file tree
Showing 14 changed files with 164 additions and 38 deletions.
2 changes: 1 addition & 1 deletion g4f/Provider/Bing.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def create_async_generator(
proxy: str = None,
timeout: int = 900,
api_key: str = None,
cookies: Cookies = None,
cookies: Cookies = {},
connector: BaseConnector = None,
tone: str = None,
image: ImageType = None,
Expand Down
74 changes: 74 additions & 0 deletions g4f/Provider/DeepInfraImage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from __future__ import annotations

import requests

from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import StreamSession, raise_for_status
from ..image import ImageResponse

class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://deepinfra.com"
working = True
default_model = 'stability-ai/sdxl'

@classmethod
def get_models(cls):
if not cls.models:
url = 'https://api.deepinfra.com/models/featured'
models = requests.get(url).json()
cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"]
return cls.models

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
**kwargs
) -> AsyncResult:
yield await cls.create_async(messages[-1]["content"], model, **kwargs)

@classmethod
async def create_async(
cls,
prompt: str,
model: str,
api_key: str = None,
api_base: str = "https://api.deepinfra.com/v1/inference",
proxy: str = None,
timeout: int = 180,
extra_data: dict = {},
**kwargs
) -> ImageResponse:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
'Origin': 'https://deepinfra.com',
'Referer': 'https://deepinfra.com/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
'X-Deepinfra-Source': 'web-embed',
'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}"
async with StreamSession(
proxies={"all": proxy},
headers=headers,
timeout=timeout
) as session:
model = cls.get_model(model)
data = {"prompt": prompt, **extra_data}
data = {"input": data} if model == cls.default_model else data
async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
await raise_for_status(response)
data = await response.json()
images = data["output"] if "output" in data else data["images"]
images = images[0] if len(images) == 1 else images
return ImageResponse(images, prompt)
11 changes: 7 additions & 4 deletions g4f/Provider/You.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@
from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..image import to_bytes, ImageResponse
from ..image import ImageResponse, to_bytes, is_accepted_format
from ..requests import StreamSession, FormData, raise_for_status
from ..errors import MissingRequirementsError

from .you.har_file import get_dfp_telemetry_id

Expand Down Expand Up @@ -46,6 +47,7 @@ async def create_async_generator(
image: ImageType = None,
image_name: str = None,
proxy: str = None,
timeout: int = 240,
chat_mode: str = "default",
**kwargs,
) -> AsyncResult:
Expand All @@ -55,12 +57,14 @@ async def create_async_generator(
...
elif model.startswith("dall-e"):
chat_mode = "create"
messages = [messages[-1]]
else:
chat_mode = "custom"
model = cls.get_model(model)
async with StreamSession(
proxies={"all": proxy},
impersonate="chrome"
impersonate="chrome",
timeout=(30, timeout)
) as session:
cookies = await cls.get_cookies(session) if chat_mode != "default" else None
upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
Expand All @@ -73,7 +77,6 @@ async def create_async_generator(
"q": format_prompt(messages),
"domain": "youchat",
"selectedChatMode": chat_mode,
#"chat": json.dumps(chat),
}
params = {
"userFiles": upload,
Expand Down Expand Up @@ -113,7 +116,7 @@ async def upload_file(cls, client: StreamSession, cookies: Cookies, file: bytes,
await raise_for_status(response)
upload_nonce = await response.text()
data = FormData()
data.add_field('file', file, filename=filename)
data.add_field('file', file, content_type=is_accepted_format(file), filename=filename)
async with client.post(
f"{cls.url}/api/upload",
data=data,
Expand Down
1 change: 1 addition & 0 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX
from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .DuckDuckGo import DuckDuckGo
from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt
Expand Down
31 changes: 31 additions & 0 deletions g4f/Provider/needs_auth/OpenRouter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from __future__ import annotations

import requests

from .Openai import Openai
from ...typing import AsyncResult, Messages

class OpenRouter(Openai):
url = "https://openrouter.ai"
working = True
default_model = "openrouter/auto"

@classmethod
def get_models(cls):
if not cls.models:
url = 'https://openrouter.ai/api/v1/models'
models = requests.get(url).json()["data"]
cls.models = [model['id'] for model in models]
return cls.models

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_base: str = "https://openrouter.ai/api/v1",
**kwargs
) -> AsyncResult:
return super().create_async_generator(
model, messages, api_base=api_base, **kwargs
)
13 changes: 3 additions & 10 deletions g4f/Provider/needs_auth/Openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

import json

from ..helper import filter_none
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
from ...typing import Union, Optional, AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ...requests import StreamSession
from ...requests import StreamSession, raise_for_status
from ...errors import MissingAuthError, ResponseError

class Openai(AsyncGeneratorProvider, ProviderModelMixin):
Expand Down Expand Up @@ -98,11 +98,4 @@ def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) ->
else {}
),
**({} if headers is None else headers)
}

def filter_none(**kwargs) -> dict:
return {
key: value
for key, value in kwargs.items()
if value is not None
}
}
2 changes: 1 addition & 1 deletion g4f/Provider/needs_auth/OpenaiChat.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ async def create_async_generator(
RuntimeError: If an error occurs during processing.
"""
async with StreamSession(
proxies={"https": proxy},
proxies={"all": proxy},
impersonate="chrome",
timeout=timeout
) as session:
Expand Down
3 changes: 2 additions & 1 deletion g4f/Provider/needs_auth/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@
from .OpenaiChat import OpenaiChat
from .Poe import Poe
from .Openai import Openai
from .Groq import Groq
from .Groq import Groq
from .OpenRouter import OpenRouter
2 changes: 1 addition & 1 deletion g4f/api/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ async def read_root_v1():
@self.app.get("/v1/models")
async def models():
model_list = dict(
(model, g4f.ModelUtils.convert[model])
(model, g4f.models.ModelUtils.convert[model])
for model in g4f.Model.__all__()
)
model_list = [{
Expand Down
8 changes: 8 additions & 0 deletions g4f/gui/client/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,18 @@
<label for="GeminiPro-api_key" class="label" title="">GeminiPro: api_key</label>
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="OpenRouter-api_key" class="label" title="">OpenRouter: api_key</label>
<textarea id="OpenRouter-api_key" name="OpenRouter[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="HuggingFace-api_key" class="label" title="">HuggingFace: api_key</label>
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="DeepInfra-api_key" class="label" title="">DeepInfra: api_key</label>
<textarea id="DeepInfra-api_key" name="DeepInfra[api_key]" placeholder="..."></textarea>
</div>
</div>
<div class="bottom_buttons">
<button onclick="delete_conversations()">
Expand Down
10 changes: 7 additions & 3 deletions g4f/gui/client/static/css/style.css
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ body {
}

.conversations {
max-width: 280px;
max-width: 300px;
padding: var(--section-gap);
overflow: auto;
flex-shrink: 0;
Expand Down Expand Up @@ -207,9 +207,9 @@ body {
gap: 4px;
}

.conversations .convo .fa-trash {
.conversations .convo .fa-ellipsis-vertical {
position: absolute;
right: 8px;
right: 14px;
}

.conversations .convo .choise {
Expand Down Expand Up @@ -1075,6 +1075,10 @@ a:-webkit-any-link {
resize: vertical;
}

.settings textarea {
height: 51px;
}

.settings {
width: 100%;
display: flex;
Expand Down
32 changes: 18 additions & 14 deletions g4f/gui/client/static/js/chat.v1.js
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ appStorage = window.localStorage || {
const markdown = window.markdownit();
const markdown_render = (content) => {
return markdown.render(content
.replaceAll(/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, "")
.replaceAll(/<!-- generated images start -->|<!-- generated images end -->/gm, "")
.replaceAll(/<img data-prompt="[^>]+">/gm, "")
)
.replaceAll("<a href=", '<a target="_blank" href=')
Expand Down Expand Up @@ -127,9 +127,6 @@ const register_message_buttons = async () => {
sound.controls = 'controls';
sound.src = url;
sound.type = 'audio/wav';
if (ended && !stopped) {
sound.autoplay = true;
}
sound.onended = function() {
ended = true;
};
Expand All @@ -140,6 +137,9 @@ const register_message_buttons = async () => {
container.classList.add("audio");
container.appendChild(sound);
content_el.appendChild(container);
if (ended && !stopped) {
sound.play();
}
}
if (lines.length < 1 || stopped) {
el.classList.remove("active");
Expand Down Expand Up @@ -608,12 +608,11 @@ async function get_messages(conversation_id) {
}

async function add_conversation(conversation_id, content) {
if (content.length > 17) {
title = content.substring(0, 17) + '...'
if (content.length > 18) {
title = content.substring(0, 18) + '...'
} else {
title = content + '&nbsp;'.repeat(19 - content.length)
title = content + '&nbsp;'.repeat(20 - content.length)
}

if (appStorage.getItem(`conversation:${conversation_id}`) == null) {
await save_conversation(conversation_id, {
id: conversation_id,
Expand All @@ -623,7 +622,6 @@ async function add_conversation(conversation_id, content) {
items: [],
});
}

history.pushState({}, null, `/chat/${conversation_id}`);
}

Expand Down Expand Up @@ -695,27 +693,31 @@ const load_conversations = async () => {

await clear_conversations();

for (conversation of conversations) {
conversations.sort((a, b) => (b.updated||0)-(a.updated||0));

let html = "";
conversations.forEach((conversation) => {
let updated = "";
if (conversation.updated) {
const date = new Date(conversation.updated);
updated = date.toLocaleString('en-GB', {dateStyle: 'short', timeStyle: 'short', monthStyle: 'short'});
updated = updated.replace("/" + date.getFullYear(), "")
}
box_conversations.innerHTML += `
html += `
<div class="convo" id="convo-${conversation.id}">
<div class="left" onclick="set_conversation('${conversation.id}')">
<i class="fa-regular fa-comments"></i>
<span class="convo-title"><span class="datetime">${updated}</span> ${conversation.title}</span>
</div>
<i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i>
<i onclick="show_option('${conversation.id}')" class="fa-solid fa-ellipsis-vertical" id="conv-${conversation.id}"></i>
<div id="cho-${conversation.id}" class="choise" style="display:none;">
<i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-check"></i>
<i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-trash"></i>
<i onclick="hide_option('${conversation.id}')" class="fa-regular fa-x"></i>
</div>
</div>
`;
}
});
box_conversations.innerHTML = html;
};

document.getElementById("cancelButton").addEventListener("click", async () => {
Expand Down Expand Up @@ -804,6 +806,7 @@ const register_settings_storage = async () => {
appStorage.setItem(element.id, element.selectedIndex);
break;
case "text":
case "number":
appStorage.setItem(element.id, element.value);
break;
default:
Expand All @@ -828,6 +831,7 @@ const load_settings_storage = async () => {
element.selectedIndex = parseInt(value);
break;
case "text":
case "number":
case "textarea":
element.value = value;
break;
Expand Down
Loading

0 comments on commit 00951eb

Please sign in to comment.