Skip to content

Commit

Permalink
dinguerie
Browse files Browse the repository at this point in the history
  • Loading branch information
julien-c committed Feb 6, 2025
1 parent bd147a9 commit 6700d95
Show file tree
Hide file tree
Showing 18 changed files with 52 additions and 52 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ const chatCompletion = await client.chatCompletion({
}
],
provider: "hf-inference",
max_tokens: 500,
max_tokens: 500
});

console.log(chatCompletion.choices[0].message);
console.log(chatCompletion.choices[0].message);
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ const chatCompletion = await client.chatCompletion({
}
],
provider: "together",
max_tokens: 500,
max_tokens: 500
});

console.log(chatCompletion.choices[0].message);
console.log(chatCompletion.choices[0].message);
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
max_tokens=500,
max_tokens=500
)

print(completion.choices[0].message)
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
max_tokens=500,
max_tokens=500
)

print(completion.choices[0].message)
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
import { OpenAI } from "openai";

const client = new OpenAI({
baseURL: "https://router.huggingface.co/hf-inference/v1",
apiKey: "api_token",
baseURL: "https://router.huggingface.co/hf-inference",
apiKey: "api_token"
});

const chatCompletion = await client.chat.completions.create({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [
{
role: "user",
content: "What is the capital of France?",
},
content: "What is the capital of France?"
}
],
max_tokens: 500,
max_tokens: 500
});

console.log(chatCompletion.choices[0].message);
console.log(chatCompletion.choices[0].message);
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from openai import OpenAI

client = OpenAI(
base_url="https://router.huggingface.co/hf-inference/v1",
base_url="https://router.huggingface.co/hf-inference",
api_key="api_token"
)

Expand All @@ -15,7 +15,7 @@
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
max_tokens=500,
max_tokens=500
)

print(completion.choices[0].message)
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ const chatCompletion = await client.chat.completions.create({
content: "What is the capital of France?"
}
],
max_tokens: 500,
max_tokens: 500
});

console.log(chatCompletion.choices[0].message);
console.log(chatCompletion.choices[0].message);
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
completion = client.chat.completions.create(
model="meta-llama/Llama-3.1-8B-Instruct",
messages=messages,
max_tokens=500,
max_tokens=500
)

print(completion.choices[0].message)
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ const stream = client.chatCompletionStream({
}
],
provider: "hf-inference",
max_tokens: 500,
max_tokens: 500
});

for await (const chunk of stream) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { OpenAI } from "openai";

const client = new OpenAI({
baseURL: "https://router.huggingface.co/hf-inference/v1",
apiKey: "api_token",
baseURL: "https://router.huggingface.co/hf-inference",
apiKey: "api_token"
});

let out = "";
Expand All @@ -12,8 +12,8 @@ const stream = await client.chat.completions.create({
messages: [
{
role: "user",
content: "What is the capital of France?",
},
content: "What is the capital of France?"
}
],
max_tokens: 500,
stream: true,
Expand All @@ -24,5 +24,5 @@ for await (const chunk of stream) {
const newContent = chunk.choices[0].delta.content;
out += newContent;
console.log(newContent);
}
}
}
}
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from openai import OpenAI

client = OpenAI(
base_url="https://router.huggingface.co/hf-inference/v1",
base_url="https://router.huggingface.co/hf-inference",
api_key="api_token"
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ const chatCompletion = await client.chatCompletion({
}
],
provider: "hf-inference",
max_tokens: 500,
max_tokens: 500
});

console.log(chatCompletion.choices[0].message);
console.log(chatCompletion.choices[0].message);
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
completion = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
max_tokens=500,
max_tokens=500
)

print(completion.choices[0].message)
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { OpenAI } from "openai";

const client = new OpenAI({
baseURL: "https://router.huggingface.co/hf-inference/v1",
apiKey: "api_token",
baseURL: "https://router.huggingface.co/hf-inference",
apiKey: "api_token"
});

const chatCompletion = await client.chat.completions.create({
Expand All @@ -13,18 +13,18 @@ const chatCompletion = await client.chat.completions.create({
content: [
{
type: "text",
text: "Describe this image in one sentence.",
text: "Describe this image in one sentence."
},
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
],
},
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
],
max_tokens: 500,
max_tokens: 500
});

console.log(chatCompletion.choices[0].message);
console.log(chatCompletion.choices[0].message);
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from openai import OpenAI

client = OpenAI(
base_url="https://router.huggingface.co/hf-inference/v1",
base_url="https://router.huggingface.co/hf-inference",
api_key="api_token"
)

Expand All @@ -26,7 +26,7 @@
completion = client.chat.completions.create(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=messages,
max_tokens=500,
max_tokens=500
)

print(completion.choices[0].message)
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ const stream = client.chatCompletionStream({
}
],
provider: "hf-inference",
max_tokens: 500,
max_tokens: 500
});

for await (const chunk of stream) {
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import { OpenAI } from "openai";

const client = new OpenAI({
baseURL: "https://router.huggingface.co/hf-inference/v1",
apiKey: "api_token",
baseURL: "https://router.huggingface.co/hf-inference",
apiKey: "api_token"
});

let out = "";
Expand All @@ -15,16 +15,16 @@ const stream = await client.chat.completions.create({
content: [
{
type: "text",
text: "Describe this image in one sentence.",
text: "Describe this image in one sentence."
},
{
type: "image_url",
image_url: {
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
},
],
},
url: "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
],
max_tokens: 500,
stream: true,
Expand All @@ -35,5 +35,5 @@ for await (const chunk of stream) {
const newContent = chunk.choices[0].delta.content;
out += newContent;
console.log(newContent);
}
}
}
}
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from openai import OpenAI

client = OpenAI(
base_url="https://router.huggingface.co/hf-inference/v1",
base_url="https://router.huggingface.co/hf-inference",
api_key="api_token"
)

Expand Down

0 comments on commit 6700d95

Please sign in to comment.