From 6700d95f8d5b1fa3373a34a18c4bf3a6233817d5 Mon Sep 17 00:00:00 2001 From: Julien Chaumond Date: Thu, 6 Feb 2025 13:12:23 +0100 Subject: [PATCH] dinguerie --- .../0.huggingface.js.hf-inference.js | 4 ++-- .../0.huggingface.js.together.js | 4 ++-- .../0.huggingface_hub.hf-inference.py | 2 +- .../0.huggingface_hub.together.py | 2 +- .../1.openai.hf-inference.js | 12 +++++------ .../1.openai.hf-inference.py | 4 ++-- .../1.openai.together.js | 4 ++-- .../1.openai.together.py | 2 +- .../0.huggingface.js.hf-inference.js | 2 +- .../1.openai.hf-inference.js | 12 +++++------ .../1.openai.hf-inference.py | 2 +- .../0.huggingface.js.hf-inference.js | 4 ++-- .../0.huggingface_hub.hf-inference.py | 2 +- .../1.openai.hf-inference.js | 20 +++++++++---------- .../1.openai.hf-inference.py | 4 ++-- .../0.huggingface.js.hf-inference.js | 2 +- .../1.openai.hf-inference.js | 20 +++++++++---------- .../1.openai.hf-inference.py | 2 +- 18 files changed, 52 insertions(+), 52 deletions(-) diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.hf-inference.js index 15361c3e4..0bb99c2c8 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.hf-inference.js @@ -11,7 +11,7 @@ const chatCompletion = await client.chatCompletion({ } ], provider: "hf-inference", - max_tokens: 500, + max_tokens: 500 }); -console.log(chatCompletion.choices[0].message); +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.together.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.together.js index 9d64919ed..fb00ca5d8 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.together.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface.js.together.js @@ -11,7 +11,7 @@ const chatCompletion = await client.chatCompletion({ } ], provider: "together", - max_tokens: 500, + max_tokens: 500 }); -console.log(chatCompletion.choices[0].message); +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py index 5edd9b64e..a4e9d17d6 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.hf-inference.py @@ -15,7 +15,7 @@ completion = client.chat.completions.create( model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, - max_tokens=500, + max_tokens=500 ) print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py index 8a1753c52..7464a8b22 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/0.huggingface_hub.together.py @@ -15,7 +15,7 @@ completion = client.chat.completions.create( model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, - max_tokens=500, + max_tokens=500 ) print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.js index d7007dd46..94144ea41 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.js @@ -1,8 +1,8 @@ import { OpenAI } from "openai"; const client = new OpenAI({ - baseURL: "https://router.huggingface.co/hf-inference/v1", - apiKey: "api_token", + baseURL: "https://router.huggingface.co/hf-inference", + apiKey: "api_token" }); const chatCompletion = await client.chat.completions.create({ @@ -10,10 +10,10 @@ const chatCompletion = await client.chat.completions.create({ messages: [ { role: "user", - content: "What is the capital of France?", - }, + content: "What is the capital of France?" + } ], - max_tokens: 500, + max_tokens: 500 }); -console.log(chatCompletion.choices[0].message); +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.py index 4d557b289..153fc238a 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.hf-inference.py @@ -1,7 +1,7 @@ from openai import OpenAI client = OpenAI( - base_url="https://router.huggingface.co/hf-inference/v1", + base_url="https://router.huggingface.co/hf-inference", api_key="api_token" ) @@ -15,7 +15,7 @@ completion = client.chat.completions.create( model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, - max_tokens=500, + max_tokens=500 ) print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.js index 020c6d0be..e53221ea6 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.js @@ -13,7 +13,7 @@ const chatCompletion = await client.chat.completions.create({ content: "What is the capital of France?" } ], - max_tokens: 500, + max_tokens: 500 }); -console.log(chatCompletion.choices[0].message); +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.py index 735b2f16c..282579990 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/1.openai.together.py @@ -15,7 +15,7 @@ completion = client.chat.completions.create( model="meta-llama/Llama-3.1-8B-Instruct", messages=messages, - max_tokens=500, + max_tokens=500 ) print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface.js.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface.js.hf-inference.js index 4bdd0c143..86a645670 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface.js.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/0.huggingface.js.hf-inference.js @@ -13,7 +13,7 @@ const stream = client.chatCompletionStream({ } ], provider: "hf-inference", - max_tokens: 500, + max_tokens: 500 }); for await (const chunk of stream) { diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.js index 1aa7c8cbf..ef4877084 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.js @@ -1,8 +1,8 @@ import { OpenAI } from "openai"; const client = new OpenAI({ - baseURL: "https://router.huggingface.co/hf-inference/v1", - apiKey: "api_token", + baseURL: "https://router.huggingface.co/hf-inference", + apiKey: "api_token" }); let out = ""; @@ -12,8 +12,8 @@ const stream = await client.chat.completions.create({ messages: [ { role: "user", - content: "What is the capital of France?", - }, + content: "What is the capital of France?" + } ], max_tokens: 500, stream: true, @@ -24,5 +24,5 @@ for await (const chunk of stream) { const newContent = chunk.choices[0].delta.content; out += newContent; console.log(newContent); - } -} + } +} \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.py index 12167b83b..f2fcddbcb 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/1.openai.hf-inference.py @@ -1,7 +1,7 @@ from openai import OpenAI client = OpenAI( - base_url="https://router.huggingface.co/hf-inference/v1", + base_url="https://router.huggingface.co/hf-inference", api_key="api_token" ) diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface.js.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface.js.hf-inference.js index 2a69d4d82..5c7c69335 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface.js.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface.js.hf-inference.js @@ -22,7 +22,7 @@ const chatCompletion = await client.chatCompletion({ } ], provider: "hf-inference", - max_tokens: 500, + max_tokens: 500 }); -console.log(chatCompletion.choices[0].message); +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py index 726de28b4..5dc7d45c9 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/0.huggingface_hub.hf-inference.py @@ -26,7 +26,7 @@ completion = client.chat.completions.create( model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=messages, - max_tokens=500, + max_tokens=500 ) print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.js index b61bb889a..da6286936 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.js @@ -1,8 +1,8 @@ import { OpenAI } from "openai"; const client = new OpenAI({ - baseURL: "https://router.huggingface.co/hf-inference/v1", - apiKey: "api_token", + baseURL: "https://router.huggingface.co/hf-inference", + apiKey: "api_token" }); const chatCompletion = await client.chat.completions.create({ @@ -13,18 +13,18 @@ const chatCompletion = await client.chat.completions.create({ content: [ { type: "text", - text: "Describe this image in one sentence.", + text: "Describe this image in one sentence." }, { type: "image_url", image_url: { - url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", - }, - }, - ], - }, + url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + } + } + ] + } ], - max_tokens: 500, + max_tokens: 500 }); -console.log(chatCompletion.choices[0].message); +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.py index 517466190..a784ec449 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/1.openai.hf-inference.py @@ -1,7 +1,7 @@ from openai import OpenAI client = OpenAI( - base_url="https://router.huggingface.co/hf-inference/v1", + base_url="https://router.huggingface.co/hf-inference", api_key="api_token" ) @@ -26,7 +26,7 @@ completion = client.chat.completions.create( model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=messages, - max_tokens=500, + max_tokens=500 ) print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface.js.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface.js.hf-inference.js index 45886e770..bcab793db 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface.js.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/0.huggingface.js.hf-inference.js @@ -24,7 +24,7 @@ const stream = client.chatCompletionStream({ } ], provider: "hf-inference", - max_tokens: 500, + max_tokens: 500 }); for await (const chunk of stream) { diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.js index e73d6c746..2dc229e63 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.js @@ -1,8 +1,8 @@ import { OpenAI } from "openai"; const client = new OpenAI({ - baseURL: "https://router.huggingface.co/hf-inference/v1", - apiKey: "api_token", + baseURL: "https://router.huggingface.co/hf-inference", + apiKey: "api_token" }); let out = ""; @@ -15,16 +15,16 @@ const stream = await client.chat.completions.create({ content: [ { type: "text", - text: "Describe this image in one sentence.", + text: "Describe this image in one sentence." }, { type: "image_url", image_url: { - url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg", - }, - }, - ], - }, + url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + } + } + ] + } ], max_tokens: 500, stream: true, @@ -35,5 +35,5 @@ for await (const chunk of stream) { const newContent = chunk.choices[0].delta.content; out += newContent; console.log(newContent); - } -} + } +} \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.py index 6c788816e..ca0a0609d 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/1.openai.hf-inference.py @@ -1,7 +1,7 @@ from openai import OpenAI client = OpenAI( - base_url="https://router.huggingface.co/hf-inference/v1", + base_url="https://router.huggingface.co/hf-inference", api_key="api_token" )