Skip to content

Commit

Permalink
Support options like max_depth in downloads.json
Browse files Browse the repository at this point in the history
Add created buckets automatic to messages
Don't add images to scrape result without title
  • Loading branch information
hlohaus committed Jan 28, 2025
1 parent d4f736e commit efacc15
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 16 deletions.
4 changes: 3 additions & 1 deletion g4f/gui/client/static/img/site.webmanifest
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,9 @@
"method": "GET",
"enctype": "application/x-www-form-urlencoded",
"params": {
"title": "prompt"
"title": "title",
"text": "prompt",
"url": "url"
}
}
}
30 changes: 22 additions & 8 deletions g4f/gui/client/static/js/chat.v1.js
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,7 @@ function is_stopped() {
const ask_gpt = async (message_id, message_index = -1, regenerate = false, provider = null, model = null, action = null) => {
if (!model && !provider) {
model = get_selected_model()?.value || null;
provider = providerSelect.options[providerSelect.selectedIndex].value;
provider = providerSelect.options[providerSelect.selectedIndex]?.value;
}
let conversation = await get_conversation(window.conversation_id);
if (!conversation) {
Expand Down Expand Up @@ -1815,8 +1815,14 @@ async function on_load() {
let chat_url = new URL(window.location.href)
let chat_params = new URLSearchParams(chat_url.search);
if (chat_params.get("prompt")) {
messageInput.value = chat_params.get("prompt");
await handle_ask();
messageInput.value = chat_params.title
+ chat_params.title ? "\n\n\n" : ""
+ chat_params.prompt
+ chat_params.prompt && chat_params.url ? "\n\n\n" : ""
+ chat_params.url;
messageInput.style.height = messageInput.scrollHeight + "px";
messageInput.focus();
//await handle_ask();
} else {
say_hello()
}
Expand Down Expand Up @@ -1871,7 +1877,6 @@ async function on_api() {
setTimeout(()=>prompt_lock=false, 3000);
await handle_ask();
} else {
messageInput.style.removeProperty("height");
messageInput.style.height = messageInput.scrollHeight + "px";
}
});
Expand Down Expand Up @@ -1970,7 +1975,11 @@ async function on_api() {
provider_options[provider.name] = option;
}
});
}
if (appStorage.getItem("provider")) {
await load_provider_models(appStorage.getItem("provider"))
} else {
providerSelect.selectedIndex = 0;
}
for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) {
if (!login_url && !is_demo) {
Expand All @@ -1990,7 +1999,7 @@ async function on_api() {
}

register_settings_storage();
await load_settings_storage()
await load_settings_storage();
Object.entries(provider_options).forEach(
([provider_name, option]) => load_provider_option(option.querySelector("input"), provider_name)
);
Expand Down Expand Up @@ -2156,9 +2165,14 @@ async function upload_files(fileInput) {
}
appStorage.setItem(`bucket:${bucket_id}`, data.size);
inputCount.innerText = "Files are loaded successfully";
messageInput.value += (messageInput.value ? "\n" : "") + JSON.stringify({bucket_id: bucket_id}) + "\n";
paperclip.classList.remove("blink");
fileInput.value = "";
if (!messageInput.value) {
messageInput.value = JSON.stringify({bucket_id: bucket_id});
handle_ask(false);
} else {
messageInput.value += (messageInput.value ? "\n" : "") + JSON.stringify({bucket_id: bucket_id}) + "\n";
paperclip.classList.remove("blink");
fileInput.value = "";
}
}
};
eventSource.onerror = (event) => {
Expand Down
15 changes: 9 additions & 6 deletions g4f/tools/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,18 +481,21 @@ def get_downloads_urls(bucket_dir: Path, delete_files: bool = False) -> Iterator
if isinstance(data, list):
for item in data:
if "url" in item:
yield item["url"]
yield {"urls": [item.pop("url")], **item}
elif "urls" in item:
yield item

def read_and_download_urls(bucket_dir: Path, event_stream: bool = False) -> Iterator[str]:
urls = get_downloads_urls(bucket_dir)
if urls:
count = 0
with open(os.path.join(bucket_dir, FILE_LIST), 'a') as f:
for filename in to_sync_generator(download_urls(bucket_dir, urls)):
f.write(f"{filename}\n")
if event_stream:
count += 1
yield f'data: {json.dumps({"action": "download", "count": count})}\n\n'
for url in urls:
for filename in to_sync_generator(download_urls(bucket_dir, **url)):
f.write(f"{filename}\n")
if event_stream:
count += 1
yield f'data: {json.dumps({"action": "download", "count": count})}\n\n'

async def async_read_and_download_urls(bucket_dir: Path, event_stream: bool = False) -> AsyncIterator[str]:
urls = get_downloads_urls(bucket_dir)
Expand Down
2 changes: 1 addition & 1 deletion g4f/tools/web_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def scrape_text(html: str, max_words: int = None, add_source=True, count_images:
if count_images > 0:
image = paragraph.select_one(image_select)
if image:
title = paragraph.get("title") or paragraph.text
title = str(paragraph.get("title", paragraph.text))
if title:
yield f"!{format_link(image['src'], title)}\n"
if max_words is not None:
Expand Down

0 comments on commit efacc15

Please sign in to comment.