Skip to content

Commit efacc15

Browse files
committed
Support options like max_depth in downloads.json
Add created buckets automatic to messages Don't add images to scrape result without title
1 parent d4f736e commit efacc15

File tree

4 files changed

+35
-16
lines changed

4 files changed

+35
-16
lines changed

g4f/gui/client/static/img/site.webmanifest

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,9 @@
2121
"method": "GET",
2222
"enctype": "application/x-www-form-urlencoded",
2323
"params": {
24-
"title": "prompt"
24+
"title": "title",
25+
"text": "prompt",
26+
"url": "url"
2527
}
2628
}
2729
}

g4f/gui/client/static/js/chat.v1.js

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -792,7 +792,7 @@ function is_stopped() {
792792
const ask_gpt = async (message_id, message_index = -1, regenerate = false, provider = null, model = null, action = null) => {
793793
if (!model && !provider) {
794794
model = get_selected_model()?.value || null;
795-
provider = providerSelect.options[providerSelect.selectedIndex].value;
795+
provider = providerSelect.options[providerSelect.selectedIndex]?.value;
796796
}
797797
let conversation = await get_conversation(window.conversation_id);
798798
if (!conversation) {
@@ -1815,8 +1815,14 @@ async function on_load() {
18151815
let chat_url = new URL(window.location.href)
18161816
let chat_params = new URLSearchParams(chat_url.search);
18171817
if (chat_params.get("prompt")) {
1818-
messageInput.value = chat_params.get("prompt");
1819-
await handle_ask();
1818+
messageInput.value = chat_params.title
1819+
+ chat_params.title ? "\n\n\n" : ""
1820+
+ chat_params.prompt
1821+
+ chat_params.prompt && chat_params.url ? "\n\n\n" : ""
1822+
+ chat_params.url;
1823+
messageInput.style.height = messageInput.scrollHeight + "px";
1824+
messageInput.focus();
1825+
//await handle_ask();
18201826
} else {
18211827
say_hello()
18221828
}
@@ -1871,7 +1877,6 @@ async function on_api() {
18711877
setTimeout(()=>prompt_lock=false, 3000);
18721878
await handle_ask();
18731879
} else {
1874-
messageInput.style.removeProperty("height");
18751880
messageInput.style.height = messageInput.scrollHeight + "px";
18761881
}
18771882
});
@@ -1970,7 +1975,11 @@ async function on_api() {
19701975
provider_options[provider.name] = option;
19711976
}
19721977
});
1978+
}
1979+
if (appStorage.getItem("provider")) {
19731980
await load_provider_models(appStorage.getItem("provider"))
1981+
} else {
1982+
providerSelect.selectedIndex = 0;
19741983
}
19751984
for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) {
19761985
if (!login_url && !is_demo) {
@@ -1990,7 +1999,7 @@ async function on_api() {
19901999
}
19912000

19922001
register_settings_storage();
1993-
await load_settings_storage()
2002+
await load_settings_storage();
19942003
Object.entries(provider_options).forEach(
19952004
([provider_name, option]) => load_provider_option(option.querySelector("input"), provider_name)
19962005
);
@@ -2156,9 +2165,14 @@ async function upload_files(fileInput) {
21562165
}
21572166
appStorage.setItem(`bucket:${bucket_id}`, data.size);
21582167
inputCount.innerText = "Files are loaded successfully";
2159-
messageInput.value += (messageInput.value ? "\n" : "") + JSON.stringify({bucket_id: bucket_id}) + "\n";
2160-
paperclip.classList.remove("blink");
2161-
fileInput.value = "";
2168+
if (!messageInput.value) {
2169+
messageInput.value = JSON.stringify({bucket_id: bucket_id});
2170+
handle_ask(false);
2171+
} else {
2172+
messageInput.value += (messageInput.value ? "\n" : "") + JSON.stringify({bucket_id: bucket_id}) + "\n";
2173+
paperclip.classList.remove("blink");
2174+
fileInput.value = "";
2175+
}
21622176
}
21632177
};
21642178
eventSource.onerror = (event) => {

g4f/tools/files.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -481,18 +481,21 @@ def get_downloads_urls(bucket_dir: Path, delete_files: bool = False) -> Iterator
481481
if isinstance(data, list):
482482
for item in data:
483483
if "url" in item:
484-
yield item["url"]
484+
yield {"urls": [item.pop("url")], **item}
485+
elif "urls" in item:
486+
yield item
485487

486488
def read_and_download_urls(bucket_dir: Path, event_stream: bool = False) -> Iterator[str]:
487489
urls = get_downloads_urls(bucket_dir)
488490
if urls:
489491
count = 0
490492
with open(os.path.join(bucket_dir, FILE_LIST), 'a') as f:
491-
for filename in to_sync_generator(download_urls(bucket_dir, urls)):
492-
f.write(f"{filename}\n")
493-
if event_stream:
494-
count += 1
495-
yield f'data: {json.dumps({"action": "download", "count": count})}\n\n'
493+
for url in urls:
494+
for filename in to_sync_generator(download_urls(bucket_dir, **url)):
495+
f.write(f"{filename}\n")
496+
if event_stream:
497+
count += 1
498+
yield f'data: {json.dumps({"action": "download", "count": count})}\n\n'
496499

497500
async def async_read_and_download_urls(bucket_dir: Path, event_stream: bool = False) -> AsyncIterator[str]:
498501
urls = get_downloads_urls(bucket_dir)

g4f/tools/web_search.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def scrape_text(html: str, max_words: int = None, add_source=True, count_images:
9696
if count_images > 0:
9797
image = paragraph.select_one(image_select)
9898
if image:
99-
title = paragraph.get("title") or paragraph.text
99+
title = str(paragraph.get("title", paragraph.text))
100100
if title:
101101
yield f"!{format_link(image['src'], title)}\n"
102102
if max_words is not None:

0 commit comments

Comments
 (0)