diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index 503a6dcc6d6..00000000000
--- a/Dockerfile
+++ /dev/null
@@ -1,33 +0,0 @@
-# Use the official lightweight Python image.
-# https://hub.docker.com/_/python
-FROM python:3.9-slim
-
-# Ensure Python outputs everything immediately (useful for real-time logging in Docker).
-ENV PYTHONUNBUFFERED 1
-
-# Set the working directory in the container.
-WORKDIR /app
-
-# Update the system packages and install system-level dependencies required for compilation.
-# gcc: Compiler required for some Python packages.
-# build-essential: Contains necessary tools and libraries for building software.
-RUN apt-get update && apt-get install -y --no-install-recommends \
- gcc \
- build-essential \
- && rm -rf /var/lib/apt/lists/*
-
-# Copy the project's requirements file into the container.
-COPY requirements.txt /app/
-
-# Upgrade pip for the latest features and install the project's Python dependencies.
-RUN pip install --upgrade pip && pip install -r requirements.txt
-
-# Copy the entire project into the container.
-# This may include all code, assets, and configuration files required to run the application.
-COPY . /app/
-
-# Expose port 80 and 1337
-EXPOSE 80 1337
-
-# Define the default command to run the app using Python's module mode.
-ENTRYPOINT ["python", "-m", "g4f.cli"]
diff --git a/README.md b/README.md
index be45c06aa3d..30c1c8e58bc 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-![248433934-7886223b-c1d1-4260-82aa-da5741f303bb](https://github.com/xtekky/gpt4free/assets/98614666/ea012c87-76e0-496a-8ac4-e2de090cc6c9)
+![g4f](g4f.png)
diff --git a/docker-compose.yml b/docker-compose.yml
index 43aa6c02eb0..89b11f06709 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,18 +1,17 @@
version: '3'
services:
- gpt4free-api: &gpt4free
+ gpt4free:
image: gpt4free:latest
+ shm_size: 2gb
build:
context: .
- dockerfile: Dockerfile
+ dockerfile: docker/Dockerfile
cache_from:
- gpt4free:latest
- ports:
- - '1337:1337'
- command: api
- gpt4free-gui:
- <<: *gpt4free
+ volumes:
+ - .:/app
ports:
- '8080:80'
- command: gui
+ - '1337:1337'
+ - '7900:7900'
\ No newline at end of file
diff --git a/docker/Dockerfile b/docker/Dockerfile
new file mode 100644
index 00000000000..354f66d1aed
--- /dev/null
+++ b/docker/Dockerfile
@@ -0,0 +1,42 @@
+FROM selenium/node-chrome
+
+ENV SE_SCREEN_WIDTH 1920
+ENV G4F_LOGIN_URL http://localhost:7900/?autoconnect=1&resize=scale&password=secret
+
+USER root
+
+# Python packages
+RUN apt-get -qqy update \
+ && apt-get -qqy install \
+ python3 \
+ python-is-python3 \
+ pip
+
+# Cleanup
+RUN rm -rf /var/lib/apt/lists/* /var/cache/apt/* \
+ && apt-get -qyy autoremove \
+ && apt-get -qyy clean
+
+# Update entrypoint
+COPY docker/start-selenium-node.sh /opt/bin/
+
+# Change background image
+COPY g4f.png /usr/share/images/fluxbox/ubuntu-light.png
+
+# Switch user
+USER 1200
+
+# Set the working directory in the container.
+WORKDIR /app
+
+# Copy the project's requirements file into the container.
+COPY requirements.txt /app/
+
+# Upgrade pip for the latest features and install the project's Python dependencies.
+RUN pip install --upgrade pip && pip install -r requirements.txt
+
+# Copy the entire package into the container.
+COPY g4f /app/g4f
+
+# Expose ports
+EXPOSE 80 1337
\ No newline at end of file
diff --git a/docker/start-selenium-node.sh b/docker/start-selenium-node.sh
new file mode 100755
index 00000000000..a02d0e82d25
--- /dev/null
+++ b/docker/start-selenium-node.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# Start the pulseaudio server
+pulseaudio -D --exit-idle-time=-1
+
+# Load the virtual sink and set it as default
+pacmd load-module module-virtual-sink sink_name=v1
+pacmd set-default-sink v1
+
+# Set the monitor of v1 sink to be the default source
+pacmd set-default-source v1.monitor
+
+rm -f /tmp/.X*lock
+
+# Start app servers
+python -m g4f.cli api &
+python -m g4f.cli gui
\ No newline at end of file
diff --git a/g4f.png b/g4f.png
new file mode 100644
index 00000000000..41bf9e6b806
Binary files /dev/null and b/g4f.png differ
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index b790a6d22b3..9e3e7405d5e 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -156,8 +156,11 @@ async def delete_conversation(session: ClientSession, conversation: Conversation
"optionsSets": ["autosave"]
}
async with session.post(url, json=json, proxy=proxy) as response:
- response = await response.json()
- return response["result"]["value"] == "Success"
+ try:
+ response = await response.json()
+ return response["result"]["value"] == "Success"
+ except:
+ return False
class Defaults:
delimiter = "\x1e"
diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py
index 941ca6d4e4c..ad629aa8507 100644
--- a/g4f/Provider/PerplexityAi.py
+++ b/g4f/Provider/PerplexityAi.py
@@ -1,6 +1,10 @@
from __future__ import annotations
import time
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.keys import Keys
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
@@ -27,11 +31,6 @@ def create_completion(
**kwargs
) -> CreateResult:
with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
- from selenium.webdriver.common.keys import Keys
-
prompt = format_prompt(messages)
driver.get(f"{cls.url}/")
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index 2171f0b787b..61d9cb624f4 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -6,6 +6,7 @@
import random
import string
import secrets
+import os
from os import path
from asyncio import AbstractEventLoop
from platformdirs import user_config_dir
@@ -18,7 +19,7 @@
edge,
vivaldi,
firefox,
- BrowserCookieError
+ _LinuxPasswordManager
)
from ..typing import Dict, Messages
@@ -81,6 +82,10 @@ def open_urls_in_browser(browser):
except webbrowser.Error:
continue
+# Check for broken dbus address in docker image
+if os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
+ _LinuxPasswordManager.get_password = lambda a, b: b"secret"
+
# Load cookies for a domain from all supported browsers.
# Cache the results in the "_cookies" variable.
def get_cookies(domain_name=''):
@@ -100,7 +105,7 @@ def g4f(domain_name):
for cookie in cookie_jar:
if cookie.name not in cookies:
cookies[cookie.name] = cookie.value
- except BrowserCookieError as e:
+ except:
pass
_cookies[domain_name] = cookies
return _cookies[domain_name]
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 877af37e2bb..48e535dd3c7 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -1,6 +1,11 @@
from __future__ import annotations
import time
+import os
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.keys import Keys
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
@@ -27,10 +32,6 @@ def create_completion(
prompt = format_prompt(messages)
session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-
try:
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 10 if headless else 240)
@@ -40,6 +41,9 @@ def create_completion(
if not webdriver:
driver = session.reopen()
driver.get(f"{cls.url}/chat")
+ login_url = os.environ.get("G4F_LOGIN_URL")
+ if login_url:
+ yield f"Please login: [Google Bard]({login_url})\n\n"
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
else:
@@ -61,8 +65,8 @@ def create_completion(
driver.execute_script(script)
# Submit prompt
- driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt)
- driver.find_element(By.CSS_SELECTOR, "button.send-button").click()
+ driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(prompt)
+ driver.find_element(By.CSS_SELECTOR, "div.ql-editor.textarea").send_keys(Keys.ENTER)
# Yield response
while True:
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 59e2da73529..530069c0be5 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -11,7 +11,6 @@
class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat"
- needs_auth = True
working = True
model = "meta-llama/Llama-2-70b-chat-hf"
@@ -22,12 +21,11 @@ async def create_async_generator(
messages: Messages,
stream: bool = True,
proxy: str = None,
+ web_search: bool = False,
cookies: dict = None,
**kwargs
) -> AsyncResult:
model = model if model else cls.model
- if proxy and "://" not in proxy:
- proxy = f"http://{proxy}"
if not cookies:
cookies = get_cookies(".huggingface.co")
@@ -46,7 +44,7 @@ async def create_async_generator(
"inputs": format_prompt(messages),
"is_retry": False,
"response_id": str(uuid.uuid4()),
- "web_search": False
+ "web_search": web_search
}
async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
async for line in response.content:
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index af62382a371..818c163fec8 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -1,12 +1,15 @@
from __future__ import annotations
-import uuid, json, asyncio
+import uuid, json, asyncio, os
from py_arkose_generator.arkose import get_values_for_request
from asyncstdlib.itertools import tee
from async_property import async_cached_property
-
+from selenium.webdriver.common.by import By
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+
from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_event_loop
+from ..helper import get_event_loop, format_prompt
from ...webdriver import get_browser
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
@@ -84,7 +87,12 @@ async def create_async_generator(
if not parent_id:
parent_id = str(uuid.uuid4())
if not access_token:
- access_token = await cls.get_access_token(proxy)
+ access_token = cls._access_token
+ if not access_token:
+ login_url = os.environ.get("G4F_LOGIN_URL")
+ if login_url:
+ yield f"Please login: [ChatGPT]({login_url})\n\n"
+ access_token = cls._access_token = await cls.browse_access_token(proxy)
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
@@ -106,10 +114,11 @@ async def create_async_generator(
"history_and_training_disabled": history_disabled and not auto_continue,
}
if action != "continue":
+ prompt = format_prompt(messages) if not conversation_id else messages[-1]["content"]
data["messages"] = [{
"id": str(uuid.uuid4()),
"author": {"role": "user"},
- "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
+ "content": {"content_type": "text", "parts": [prompt]},
}]
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
try:
@@ -155,14 +164,7 @@ async def create_async_generator(
@classmethod
async def browse_access_token(cls, proxy: str = None) -> str:
def browse() -> str:
- try:
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.support import expected_conditions as EC
-
- driver = get_browser(proxy=proxy)
- except ImportError:
- return
+ driver = get_browser(proxy=proxy)
try:
driver.get(f"{cls.url}/")
WebDriverWait(driver, 1200).until(
@@ -177,15 +179,6 @@ def browse() -> str:
None,
browse
)
-
- @classmethod
- async def get_access_token(cls, proxy: str = None) -> str:
- if not cls._access_token:
- cls._access_token = await cls.browse_access_token(proxy)
- if not cls._access_token:
- raise RuntimeError("Read access token failed")
- return cls._access_token
-
async def get_arkose_token(proxy: str = None, timeout: int = None) -> str:
config = {
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 4c47fe7d9c1..1ea6d3a3d05 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -25,7 +25,8 @@ def get_model_and_provider(model : Union[Model, str],
provider : Union[type[BaseProvider], None],
stream : bool,
ignored : List[str] = None,
- ignore_working: bool = False) -> tuple[Model, type[BaseProvider]]:
+ ignore_working: bool = False,
+ ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]:
if isinstance(model, str):
if model in ModelUtils.convert:
@@ -45,7 +46,7 @@ def get_model_and_provider(model : Union[Model, str],
if not provider.working and not ignore_working:
raise RuntimeError(f'{provider.__name__} is not working')
- if not provider.supports_stream and stream:
+ if not ignore_stream and not provider.supports_stream and stream:
raise ValueError(f'{provider.__name__} does not support "stream" argument')
if debug.logging:
@@ -61,15 +62,17 @@ def create(model : Union[Model, str],
stream : bool = False,
auth : Union[str, None] = None,
ignored : List[str] = None,
- ignore_working: bool = False, **kwargs) -> Union[CreateResult, str]:
+ ignore_working: bool = False,
+ ignore_stream_and_auth: bool = False,
+ **kwargs) -> Union[CreateResult, str]:
- model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working)
+ model, provider = get_model_and_provider(model, provider, stream, ignored, ignore_working, ignore_stream_and_auth)
- if provider.needs_auth and not auth:
+ if not ignore_stream_and_auth and provider.needs_auth and not auth:
raise ValueError(
f'{provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
- if provider.needs_auth:
+ if auth:
kwargs['auth'] = auth
result = provider.create_completion(model.name, messages, stream, **kwargs)
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index 5b7a0bf00d6..2b1fdcb081e 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -161,7 +161,7 @@ const ask_gpt = async (txtMsgs) => {
text += chunk;
- document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text);
+ document.getElementById(`gpt_${window.token}`).innerHTML = markdown.render(text).replace(" {
+ response = await fetch('/backend-api/v2/models')
+ models = await response.json()
+ let select = document.getElementById('model');
+ select.textContent = '';
+
+ let auto = document.createElement('option');
+ auto.value = '';
+ auto.text = 'Default Model';
+ select.appendChild(auto);
for (model of models) {
- let model_info = document.createElement('option');
- model_info.value = model
- model_info.text = model
+ let option = document.createElement('option');
+ option.value = option.text = model;
+ select.appendChild(option);
+ }
+})();
- MODELS_SELECT.appendChild(model_info);
+(async () => {
+ response = await fetch('/backend-api/v2/providers')
+ providers = await response.json()
+
+ let select = document.getElementById('provider');
+ select.textContent = '';
+
+ let auto = document.createElement('option');
+ auto.value = '';
+ auto.text = 'Provider: Auto';
+ select.appendChild(auto);
+
+ for (provider of providers) {
+ let option = document.createElement('option');
+ option.value = option.text = provider;
+ select.appendChild(option);
}
-}
\ No newline at end of file
+})();
\ No newline at end of file
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 3d7bfedc592..033632019ae 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -3,9 +3,8 @@
from flask import request
from .internet import search
from .config import special_instructions
-from .provider import get_provider
-g4f.logging = True
+g4f.debug.logging = True
class Backend_Api:
def __init__(self, app) -> None:
@@ -15,6 +14,10 @@ def __init__(self, app) -> None:
'function': self.models,
'methods' : ['GET']
},
+ '/backend-api/v2/providers': {
+ 'function': self.providers,
+ 'methods' : ['GET']
+ },
'/backend-api/v2/conversation': {
'function': self._conversation,
'methods': ['POST']
@@ -37,6 +40,9 @@ def error(self):
def models(self):
return g4f._all_models
+ def providers(self):
+ return [provider.__name__ for provider in g4f.Provider.__providers__ if provider.working]
+
def _gen_title(self):
return {
'title': ''
@@ -47,26 +53,26 @@ def _conversation(self):
#jailbreak = request.json['jailbreak']
#internet_access = request.json['meta']['content']['internet_access']
#conversation = request.json['meta']['content']['conversation']
- prompt = request.json['meta']['content']['parts']
- model = request.json['model']
- provider = request.json.get('provider').split('g4f.Provider.')[1]
-
- messages = prompt
- print(messages)
+ messages = request.json['meta']['content']['parts']
+ model = request.json.get('model')
+ model = model if model else g4f.models.default
+ provider = request.json.get('provider', 'Auto').replace('g4f.Provider.', '')
+ provider = provider if provider != "Auto" else None
+ if provider != None:
+ provider = g4f.Provider.ProviderUtils.convert.get(provider)
- def stream():
- yield from g4f.ChatCompletion.create(
- model=model,
- provider=get_provider(provider),
- messages=messages,
- stream=True,
- ) if provider else g4f.ChatCompletion.create(
- model=model, messages=messages, stream=True
- )
+ response = g4f.ChatCompletion.create(
+ model=model,
+ provider=provider,
+ messages=messages,
+ stream=True,
+ ignore_stream_and_auth=True
+ )
- return self.app.response_class(stream(), mimetype='text/event-stream')
+ return self.app.response_class(response, mimetype='text/event-stream')
- except Exception as e:
+ except Exception as e:
+ print(e)
return {
'code' : 'G4F_ERROR',
'_action': '_ask',
diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py
deleted file mode 100644
index 8c7ac7551c4..00000000000
--- a/g4f/gui/server/provider.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from __future__ import annotations
-
-import g4f
-from g4f import BaseProvider
-
-
-def get_provider(provider: str) -> BaseProvider | None:
- if not isinstance(provider, str):
- return None
- print(provider)
- if provider == 'g4f.Provider.Auto':
- return None
-
- return g4f.Provider.ProviderUtils.convert.get(provider)
diff --git a/g4f/models.py b/g4f/models.py
index 2f86891d86c..a6cd724bf49 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -7,6 +7,7 @@
ChatgptDemoAi,
ChatAnywhere,
ChatgptNext,
+ HuggingChat,
GptForLove,
ChatgptAi,
DeepInfra,
@@ -100,7 +101,7 @@ def __all__() -> list[str]:
llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "huggingface",
- best_provider = RetryProvider([Llama2, DeepInfra]))
+ best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]))
# Bard
palm = Model(
diff --git a/g4f/webdriver.py b/g4f/webdriver.py
index 288eed0e71b..f0fa1fbafde 100644
--- a/g4f/webdriver.py
+++ b/g4f/webdriver.py
@@ -4,6 +4,8 @@
from platformdirs import user_config_dir
from selenium.webdriver.remote.webdriver import WebDriver
from undetected_chromedriver import Chrome, ChromeOptions
+import os.path
+from . import debug
try:
from pyvirtualdisplay import Display
@@ -19,12 +21,16 @@ def get_browser(
) -> WebDriver:
if user_data_dir == None:
user_data_dir = user_config_dir("g4f")
+ if debug.logging:
+ print("Open browser with config dir:", user_data_dir)
if not options:
options = ChromeOptions()
- options.add_argument("window-size=1920,1080");
if proxy:
options.add_argument(f'--proxy-server={proxy}')
- return Chrome(options=options, user_data_dir=user_data_dir, headless=headless)
+ driver = '/usr/bin/chromedriver'
+ if not os.path.isfile(driver):
+ driver = None
+ return Chrome(options=options, user_data_dir=user_data_dir, driver_executable_path=driver, headless=headless)
class WebDriverSession():
def __init__(
diff --git a/ptest.py b/ptest.py
deleted file mode 100644
index 38dd2aa9ea0..00000000000
--- a/ptest.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import requests, json
-
-
-headers = {
- 'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'Cache-Control': 'no-cache',
- 'Connection': 'keep-alive',
- 'Content-Type': 'application/json',
- 'Origin': 'https://deepinfra.com',
- 'Pragma': 'no-cache',
- 'Referer': 'https://deepinfra.com/',
- 'Sec-Fetch-Dest': 'empty',
- 'Sec-Fetch-Mode': 'cors',
- 'Sec-Fetch-Site': 'same-site',
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
- 'X-Deepinfra-Source': 'web-embed',
- 'accept': 'text/event-stream',
- 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
-}
-
-json_data = json.dumps({
- 'model': 'meta-llama/Llama-2-70b-chat-hf',
- 'messages': [
- {
- 'role': 'user',
- 'content': 'what is the meaning of life ?',
- },
- ],
- 'stream': True}, separators=(',', ':'))
-
-response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions',
- headers=headers, data=json_data, stream=True)
-
-response.raise_for_status()
-first = True
-
-for line in response.iter_content(chunk_size=1024):
- if line.startswith(b"data: [DONE]"):
- break
-
- elif line.startswith(b"data: "):
- chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
-
- if chunk:
- if first:
- chunk = chunk.lstrip()
- if chunk:
- first = False
-
- print(chunk)
-
-# Note: json_data will not be serialized by requests
-# exactly as it was in the original request.
-#data = '{"model":"meta-llama/Llama-2-70b-chat-hf","messages":[{"role":"user","content":"what is the meaning of life ?"},{"role":"assistant","content":" The meaning of life is a question that has puzzled philosophers, religious leaders, scientists, and many others for centuries. There are many different perspectives on this question, and there is no one definitive answer. However, here are some possible approaches to understanding the meaning of life:\\n\\n1. Religious or spiritual perspective: Many people believe that the meaning of life is to fulfill a divine or spiritual purpose. According to this view, our lives have a higher purpose, which is to serve a deity or follow a set of moral principles. The meaning of life is then found in fulfilling this purpose, whether it be through prayer, service, or good works.\\n2. Personal fulfillment: Some people believe that the meaning of life is to find personal fulfillment and happiness. According to this view, we should pursue our passions and interests, cultivate strong relationships, and strive to live a life that is rich in experience and personal growth.\\n3. Social or cultural perspective: Others argue that the meaning of life is tied to our social and cultural contexts. From this perspective, the meaning of life is to contribute to the greater good of society, to uphold certain values or traditions, or to leave a lasting legacy.\\n4. Existentialism: Existentialists believe that the meaning of life is not predetermined or inherent, but rather something that each individual must create for themselves. According to this view, life has no inherent meaning, but we can give it meaning through our choices, actions, and experiences.\\n5. Scientific perspective: Some people believe that the meaning of life can be understood through scientific inquiry. From this perspective, the meaning of life might be seen as a product of evolution, a byproduct of the natural processes that gave rise to consciousness and intelligence.\\n6. Humanistic perspective: Humanists believe that the meaning of life is to promote human flourishing and well-being. According to this view, we should strive to create a world where individuals can live fulfilling lives, pursue their goals and dreams, and reach their full potential.\\n7. Nihilistic perspective: A minority perspective is that life has no inherent meaning and that any attempt to impose meaning on it is ultimately futile. This view is often associated with nihilism, which argues that traditional beliefs and values are baseless and that morality is an illusion.\\n8. Mystical perspective: Finally, some people believe that the meaning of life cannot be expressed in words or concepts, but can only be experienced directly through mystical practices such as meditation or psychedelic experiences.\\n\\nUltimately, the meaning of life may be something that each person must determine for themselves based on their own unique experiences, values, and beliefs. It could also be said that life has multiple meanings, and that these meanings can change throughout our lives as we grow, learn, and evolve as individuals."},{"role":"user","content":"welll well well"},{"role":"assistant","content":" I\'m glad you\'re feeling enthusiastic! \\"Well\\" can be used in various ways to express excitement or surprise, such as:\\n\\n* \\"Well, well, well!\\" - This phrase is often used to express surprise or intrigue, especially when something unexpected happens or when you\'re presented with new information.\\n* \\"Look at you, well done!\\" - Here, \\"well\\" is used to convey approval or admiration for someone\'s achievement or effort.\\n* \\"You think you\'re so smart, don\'t you?\\" - In this sentence, \\"well\\" is used to express sarcasm or skepticism towards someone who thinks they know everything.\\n\\nHowever, I\'d like to point out that using \\"well\\" too frequently can come across as insincere or filler words, especially in formal writing or professional settings. It\'s important to use language that accurately conveys your intended message without relying on unnecessary filler words. Is there anything else you\'d like me to help you with?"},{"role":"user","content":"nice"}],"stream":true}'
-#response = requests.post('https://api.deepinfra.com/v1/openai/chat/completions', headers=headers, data=data)
\ No newline at end of file