From 795b77856d0563e3c642284e351d1a8098c671af Mon Sep 17 00:00:00 2001 From: David Alonso Date: Sat, 18 May 2024 10:10:01 +0200 Subject: [PATCH 1/2] update docs & groq model names --- docs/docs/plugins/cohere.md | 14 ++--- examples/README.md | 84 ++++++++++++++--------------- plugins/anthropic/README.md | 58 +++++++++++++++++--- plugins/azure-openai/README.md | 14 ++--- plugins/cohere/README.md | 93 +++++++++++++++++++++++++++------ plugins/groq/README.md | 93 +++++++++++++++++++++++++++------ plugins/groq/src/groq_models.ts | 16 +++--- plugins/groq/src/index.ts | 10 ++-- plugins/groq/tests/groq_test.ts | 1 - plugins/mistral/README.md | 55 ++++++++++++++++++- plugins/mistral/src/index.ts | 11 +++- plugins/openai/README.md | 73 +++++++++++++++++++++++--- 12 files changed, 396 insertions(+), 126 deletions(-) diff --git a/docs/docs/plugins/cohere.md b/docs/docs/plugins/cohere.md index 84260471..c177fce5 100644 --- a/docs/docs/plugins/cohere.md +++ b/docs/docs/plugins/cohere.md @@ -38,11 +38,11 @@ Install the plugin in your project with your favorite package manager: The simplest way to call the text generation model is by using the helper function `generate`: -``` +```typescript // Basic usage of an LLM const response = await generate({ - model: commandRPlus, - prompt: 'Tell me a joke.', + model: commandRPlus, + prompt: 'Tell me a joke.', }); console.log(await response.text()); @@ -50,18 +50,18 @@ console.log(await response.text()); Using the same interface, you can prompt a multimodal model: -``` +```typescript const response = await generate({ model: commandRPlus, prompt: [ { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, + { media: { url: imageUrl } }, ], - config:{ + config: { // control of the level of visual detail when processing image embeddings // Low detail level also decreases the token usage visualDetailLevel: 'low', - } + }, }); console.log(await response.text()); ``` diff --git a/examples/README.md b/examples/README.md index e0818d4e..b9943af3 100644 --- a/examples/README.md +++ b/examples/README.md @@ -23,8 +23,8 @@ NOTE: as you will see, you do not need to have a Firebase project to use Genkit Genkit is configured from the `index.ts`, where you can import and initialize the plugin and define prompts, flows, models and other tools which could be accessed directly through Gekit Dev UI: -``` -import { configureGenkit } from '@genkit-ai/core'; +```typescript +imort { configureGenkit } from '@genkit-ai/core'; import { openAI } from 'genkitx-openai-plugin'; import { anthropic } from 'genkitx-anthropicai'; @@ -51,14 +51,14 @@ List of all available models as well as their pricing, specification and capabil The simplest way to call the text generation model is by using the helper function `generate`: -``` +```typescript import { generate } from '@genkit-ai/ai'; -import {claude3Haiku} from 'genkitx-anthropicai'; +import { claude3Haiku } from 'genkitx-anthropicai'; // Basic usage of an LLM const response = await generate({ - model: claude3Haiku, - prompt: 'Tell me a joke.', + model: claude3Haiku, + prompt: 'Tell me a joke.', }); console.log(await response.text()); @@ -66,14 +66,14 @@ console.log(await response.text()); Using the same interface, you can prompt a multimodal model: -``` -import {gpt4Vision} from 'genkitx-openai-plugin'; +```typescript +import { gpt4Vision } from 'genkitx-openai-plugin'; const response = await generate({ model: gpt4Vision, prompt: [ { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, + { media: { url: imageUrl } }, ], }); console.log(await response.text()); @@ -81,8 +81,8 @@ console.log(await response.text()); or define a tool in Genkit, test in the dev UI and then use it in the code: -``` -import {defineTool } from '@genkit-ai/ai'; +```typescript +import { defineTool } from '@genkit-ai/ai'; // defining the tool const tool = defineTool( @@ -113,8 +113,8 @@ Genkit doesn't prevent you from using any of the available models from various a One of the main benefits of Genkit is the ability to define the prompt as code and register it with Genkit,and for that you can use `definePrompt` function: -``` -import {definePrompt} from '@genkit-ai/ai'; +```typescript +import { definePrompt } from '@genkit-ai/ai'; const helloPrompt = definePrompt( { @@ -127,9 +127,8 @@ const helloPrompt = definePrompt( return { messages: [{ role: 'user', content: [{ text: promptText }] }], - config: { temperature: 0.3, - } - } + config: { temperature: 0.3 }, + }; } ); ``` @@ -137,16 +136,16 @@ const helloPrompt = definePrompt( In this way, you can test your prompts independently of the code or specific model in the Genkit Dev UI. This also enables the definition of input schemas, which enable you to customize each prompt call with a specific set of arguments, or specify the output format, as showcased a bit later below. To use this prompt in your development, you can use the `renderPrompt` function: -``` +```typescript import { generate, renderPrompt } from '@genkit-ai/ai'; -import {gemma_7b} from 'genkitx-groq'; +import { gemma_7b } from 'genkitx-groq'; const response = await generate( - renderPrompt({ - prompt: helloPrompt, - input: { name: 'Fred' }, - model: gemma_7b - }) + renderPrompt({ + prompt: helloPrompt, + input: { name: 'Fred' }, + model: gemma_7b, + }) ); console.log(await response.text()); ``` @@ -155,12 +154,10 @@ console.log(await response.text()); Genkit introduced a concept of Dotprompt, which is a plugin that enables you to store prompts in dedicated files, track changes and organize them in a JSON format or as a code. To use it, you must enable the Dotprompt plugin first: -``` -import {dotprompt} from '@genkit-ai/dotprompt'; +```typescript +import { dotprompt } from '@genkit-ai/dotprompt'; export default configureGenkit({ - plugins: [ - dotprompt(), - ], + plugins: [dotprompt()], }); ``` @@ -184,8 +181,8 @@ Greet a guest named {{name}}. To register it with Genkit and use it in development, you could use a `prompt` helper function from Dotprompt plugin: -``` -import {prompt} from '@genkit-ai/dotprompt'; +```typescript +import { prompt } from '@genkit-ai/dotprompt'; const greetingPrompt = await prompt('basic'); ``` @@ -193,8 +190,8 @@ const greetingPrompt = await prompt('basic'); where `basic` represents the name of the file, `/prompts/basic.prompt`, in which the Dotprompt is stored. This plugin also enables you to write prompts directly as a code: -``` -import {defineDotprompt} from '@genkit-ai/dotprompt'; +```typescript +import { defineDotprompt } from '@genkit-ai/dotprompt'; const codeDotPrompt = defineDotprompt( { @@ -228,19 +225,18 @@ const codeDotPrompt = defineDotprompt( Finally, you can use the same `generate` helper function to call the model with the given Dotprompt: -``` +```typescript const response = await codeDotPrompt.generate({ - input:{ - object_name: 'Ball', - image_url: 'https://example_url.jpg', - } - } -); + input: { + object_name: 'Ball', + image_url: 'https://example_url.jpg', + }, +}); ``` In this case, to obtain the structured output which we specified in a prompt, we can run: -``` +```typescript console.log(await response.output()); ``` @@ -248,7 +244,7 @@ console.log(await response.output()); Flows are the enhanced version of the standard functions, which are strongly typed, streamable, and locally and remotely callable. They can also be registered and later tested in Genkit Dev UI. To define and run a flow, one can use `defineFlow` and `runFlow` functions: -``` +```typescript import { defineFlow, runFlow } from '@genkit-ai/flow'; import {llama_3_70b} from 'genkitx-groq'; \\define Flow @@ -280,19 +276,19 @@ Apart from the text generation models, Genkit also features the access to the te implements retrievers which can retrieve documents, given a query. To use the text embedding models, you should utilize the `embed` method: -``` +```typescript import { textEmbedding3Small } from 'genkitx-openai-plugin'; import { embed } from '@genkit-ai/ai/embedder'; const embedding = embed({ embedder: textEmbedding3Small, - content: "Embed this text.", + content: 'Embed this text.', }); ``` Here, the variable `embedding` will be a vector of numbers, which is a latent space representation of the given text, which can find use in many downstream tasks. In this case, we can use the text embeddings in a retriever, to query similar documents from Firestore based on the extracted embeddings: -``` +```typescript import { embed } from '@genkit-ai/ai/embedder'; import { Document, defineRetriever } from '@genkit-ai/ai/retriever'; import { textEmbedding3Small } from 'genkitx-openai-plugin'; diff --git a/plugins/anthropic/README.md b/plugins/anthropic/README.md index f75fe672..73bae734 100644 --- a/plugins/anthropic/README.md +++ b/plugins/anthropic/README.md @@ -27,38 +27,80 @@ Install the plugin in your project with your favorite package manager: ## Usage +### Initialize + +```typescript +import 'dotenv/config'; + +import { configureGenkit } from '@genkit-ai/core'; +import { defineFlow, startFlowsServer } from '@genkit-ai/flow'; +import { anthropic } from 'genkitx-anthropicai'; + +configureGenkit({ + plugins: [ + // Anthropic API key is required and defaults to the ANTHROPIC_API_KEY environment variable + anthropic({ apiKey: process.env.ANTHROPIC_API_KEY }), + ], + logLevel: 'debug', + enableTracingAndMetrics: true, +}); +``` + ### Basic examples The simplest way to call the text generation model is by using the helper function `generate`: -``` +```typescript // Basic usage of an LLM const response = await generate({ - model: claude3Haiku, - prompt: 'Tell me a joke.', + model: claude3Haiku, // model imported from genkitx-anthropicai + prompt: 'Tell me a joke.', }); console.log(await response.text()); ``` -Using the same interface, you can prompt a multimodal model: +### Multi-modal prompt -``` +```typescript const response = await generate({ model: claude3Haiku, prompt: [ { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, + { media: { url: imageUrl } }, ], - config:{ + config: { // control of the level of visual detail when processing image embeddings // Low detail level also decreases the token usage visualDetailLevel: 'low', - } + }, }); console.log(await response.text()); ``` +### Within a flow + +```typescript +// ...configure Genkit (as shown above)... + +export const myFlow = defineFlow( + { + name: 'menuSuggestionFlow', + inputSchema: z.string(), + outputSchema: z.string(), + }, + async (subject) => { + const llmResponse = await generate({ + prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, + model: claude3Open, + }); + + return llmResponse.text(); + } +); +startFlowsServer(); +``` + ## Contributing Want to contribute to the project? That's awesome! Head over to our [Contribution Guidelines](CONTRIBUTING.md). diff --git a/plugins/azure-openai/README.md b/plugins/azure-openai/README.md index ee9bd868..539eb341 100644 --- a/plugins/azure-openai/README.md +++ b/plugins/azure-openai/README.md @@ -90,11 +90,11 @@ export default configureGenkit({ The simplest way to call the text generation model is by using the helper function `generate`: -``` +```typescript // Basic usage of an LLM const response = await generate({ - model: gpt35Turbo, - prompt: 'Tell me a joke.', + model: gpt35Turbo, + prompt: 'Tell me a joke.', }); console.log(await response.text()); @@ -102,18 +102,18 @@ console.log(await response.text()); Using the same interface, you can prompt a multimodal model: -``` +```typescript const response = await generate({ model: gpt4o, prompt: [ { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, + { media: { url: imageUrl } }, ], - config:{ + config: { // control of the level of visual detail when processing image embeddings // Low detail level also decreases the token usage visualDetailLevel: 'low', - } + }, }); console.log(await response.text()); ``` diff --git a/plugins/cohere/README.md b/plugins/cohere/README.md index 2c26a92a..ed82076d 100644 --- a/plugins/cohere/README.md +++ b/plugins/cohere/README.md @@ -29,36 +29,95 @@ Install the plugin in your project with your favorite package manager: ## Usage +### Initialize + +```typescript +import 'dotenv/config'; + +import { configureGenkit } from '@genkit-ai/core'; +import { defineFlow, startFlowsServer } from '@genkit-ai/flow'; +import { cohere } from 'genkitx-cohere'; + +configureGenkit({ + plugins: [ + // Cohere API key is required and defaults to the COHERE_API_KEY environment variable + cohere({ apiKey: process.env.COHERE_API_KEY }), + ], + logLevel: 'debug', + enableTracingAndMetrics: true, +}); +``` + ### Basic examples The simplest way to call the text generation model is by using the helper function `generate`: -``` -// Basic usage of an LLM +```typescript +// ...configure Genkit (as shown above)... + const response = await generate({ - model: commandRPlus, - prompt: 'Tell me a joke.', + model: commandRPlus, // model imported from genkitx-cohere + prompt: 'Tell me a joke.', }); console.log(await response.text()); ``` -Using the same interface, you can prompt a multimodal model: +### Within a flow -``` -const response = await generate({ - model: commandRPlus, - prompt: [ - { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, - ], - config:{ - // control of the level of visual detail when processing image embeddings - // Low detail level also decreases the token usage - visualDetailLevel: 'low', +```typescript +// ...configure Genkit (as shown above)... + +export const myFlow = defineFlow( + { + name: 'menuSuggestionFlow', + inputSchema: z.string(), + outputSchema: z.string(), + }, + async (subject) => { + const llmResponse = await generate({ + prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, + model: commandRPlus, + }); + + return llmResponse.text(); } +); +startFlowsServer(); +``` + +### Tool use + +```typescript +// ...configure Genkit (as shown above)... + +const createReminder = defineTool( + { + name: 'createReminder', + description: 'Use this to create reminders for things in the future', + inputSchema: z.object({ + time: z + .string() + .describe('ISO timestamp string, e.g. 2024-04-03T12:23:00Z'), + reminder: z.string().describe('the content of the reminder'), + }), + outputSchema: z.number().describe('the ID of the created reminder'), + }, + (reminder) => Promise.resolve(3) +); + +const result = generate({ + model: llama3x70b, + tools: [createReminder], + prompt: ` + You are a reminder assistant. + If you create a reminder, describe in text the reminder you created as a response. + + Query: I have a meeting with Anna at 3 for dinner - can you set a reminder for the time? + `, }); -console.log(await response.text()); + +console.log(result.then((res) => res.text())); ``` ## Contributing diff --git a/plugins/groq/README.md b/plugins/groq/README.md index 40762b4e..4e71df9a 100644 --- a/plugins/groq/README.md +++ b/plugins/groq/README.md @@ -31,36 +31,95 @@ Install the plugin in your project with your favorite package manager: ## Usage +### Initialize + +```typescript +import 'dotenv/config'; + +import { configureGenkit } from '@genkit-ai/core'; +import { defineFlow, startFlowsServer } from '@genkit-ai/flow'; +import { groq } from 'genkitx-groq'; + +configureGenkit({ + plugins: [ + // Groq API key is required and defaults to the GROQ_API_KEY environment variable + groq({ apiKey: process.env.GROQ_API_KEY }), + ], + logLevel: 'debug', + enableTracingAndMetrics: true, +}); +``` + ### Basic examples The simplest way to call the text generation model is by using the helper function `generate`: -``` -// Basic usage of an LLM +```typescript +// ...configure Genkit (as shown above)... + const response = await generate({ - model: llama_3_70b, - prompt: 'Tell me a joke.', + model: llama3x70b, // model imported from genkitx-groq + prompt: 'Tell me a joke.', }); console.log(await response.text()); ``` -Using the same interface, you can prompt a multimodal model: +### Within a flow -``` -const response = await generate({ - model: llama_3_70b, - prompt: [ - { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, - ], - config:{ - // control of the level of visual detail when processing image embeddings - // Low detail level also decreases the token usage - visualDetailLevel: 'low', +```typescript +// ...configure Genkit (as shown above)... + +export const myFlow = defineFlow( + { + name: 'menuSuggestionFlow', + inputSchema: z.string(), + outputSchema: z.string(), + }, + async (subject) => { + const llmResponse = await generate({ + prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, + model: openMixtral8x22B, + }); + + return llmResponse.text(); } +); +startFlowsServer(); +``` + +### Tool use + +```typescript +// ...configure Genkit (as shown above)... + +const createReminder = defineTool( + { + name: 'createReminder', + description: 'Use this to create reminders for things in the future', + inputSchema: z.object({ + time: z + .string() + .describe('ISO timestamp string, e.g. 2024-04-03T12:23:00Z'), + reminder: z.string().describe('the content of the reminder'), + }), + outputSchema: z.number().describe('the ID of the created reminder'), + }, + (reminder) => Promise.resolve(3) +); + +const result = generate({ + model: llama3x70b, + tools: [createReminder], + prompt: ` + You are a reminder assistant. + If you create a reminder, describe in text the reminder you created as a response. + + Query: I have a meeting with Anna at 3 for dinner - can you set a reminder for the time? + `, }); -console.log(await response.text()); + +console.log(result.then((res) => res.text())); ``` ## Contributing diff --git a/plugins/groq/src/groq_models.ts b/plugins/groq/src/groq_models.ts index c2786197..ce75280a 100644 --- a/plugins/groq/src/groq_models.ts +++ b/plugins/groq/src/groq_models.ts @@ -56,7 +56,7 @@ export const GroqConfigSchema = z.object({ }); // Worst at JSON mode -export const llama_3_8b = modelRef({ +export const llama3x8b = modelRef({ name: 'groq/llama-3-8b', info: { versions: ['llama3-8b-8192'], @@ -74,7 +74,7 @@ export const llama_3_8b = modelRef({ // Worst at JSON mode // Only model recommended for Tool Use -export const llama_3_70b = modelRef({ +export const llama3x70b = modelRef({ name: 'groq/llama-3-70b', info: { versions: ['llama3-70b-8192'], @@ -91,7 +91,7 @@ export const llama_3_70b = modelRef({ }); // Best at JSON mode -export const mixtral_8_7b = modelRef({ +export const mixtral8x7b = modelRef({ name: 'groq/mixtral-8x7b-32768', info: { versions: ['mixtral-8x7b-32768'], @@ -108,7 +108,7 @@ export const mixtral_8_7b = modelRef({ }); // Runner up at JSON mode -export const gemma_7b = modelRef({ +export const gemma7b = modelRef({ name: 'groq/gemma-7b-it', info: { versions: ['gemma-7b-it'], @@ -125,10 +125,10 @@ export const gemma_7b = modelRef({ }); export const SUPPORTED_GROQ_MODELS = { - 'llama-3-8b': llama_3_8b, - 'llama-3-70b': llama_3_70b, - 'mixtral-8-7b': mixtral_8_7b, - 'gemma-7b': gemma_7b, + 'llama-3-8b': llama3x8b, + 'llama-3-70b': llama3x70b, + 'mixtral-8-7b': mixtral8x7b, + 'gemma-7b': gemma7b, }; export const DEFAULT_MODEL_VERSION = { diff --git a/plugins/groq/src/index.ts b/plugins/groq/src/index.ts index df1046c7..48962da3 100644 --- a/plugins/groq/src/index.ts +++ b/plugins/groq/src/index.ts @@ -17,15 +17,15 @@ import { genkitPlugin, Plugin } from '@genkit-ai/core'; import Groq from 'groq-sdk'; import { - llama_3_70b, - llama_3_8b, - gemma_7b, - mixtral_8_7b, + llama3x70b, + llama3x8b, + gemma7b, + mixtral8x7b, groqModel, SUPPORTED_GROQ_MODELS, } from './groq_models'; -export { llama_3_70b, llama_3_8b, gemma_7b, mixtral_8_7b }; +export { llama3x70b, llama3x8b, gemma7b, mixtral8x7b }; export interface PluginOptions { /** diff --git a/plugins/groq/tests/groq_test.ts b/plugins/groq/tests/groq_test.ts index cbdacd3b..9cf9afb4 100644 --- a/plugins/groq/tests/groq_test.ts +++ b/plugins/groq/tests/groq_test.ts @@ -11,7 +11,6 @@ import { toGroqRole, toGroqTool, toGroqMessages, - groqModel, } from '../src/groq_models'; import { ChatCompletionCreateParamsBase } from 'groq-sdk/resources/chat/completions.mjs'; diff --git a/plugins/mistral/README.md b/plugins/mistral/README.md index fb400f99..8c5031c1 100644 --- a/plugins/mistral/README.md +++ b/plugins/mistral/README.md @@ -29,8 +29,59 @@ Install the plugin in your project with your favorite package manager: ## Usage -> \[!WARNING\]\ -> Documentation is currently work in progress. +### Initialize + +```typescript +import 'dotenv/config'; + +import { configureGenkit } from '@genkit-ai/core'; +import { defineFlow, startFlowsServer } from '@genkit-ai/flow'; +import { mistral } from 'genkitx-mistral'; + +configureGenkit({ + plugins: [ + // Mistral API key is required and defaults to the MISTRAL_API_KEY environment variable + mistral({ apiKey: process.env.MISTRAL_API_KEY }), + ], + logLevel: 'debug', + enableTracingAndMetrics: true, +}); +``` + +### Basic examples + +The simplest way to call the text generation model is by using the helper function `generate`: + +```typescript +// Basic usage of an LLM +const response = await generate({ + model: openMixtral8x22B, // model imported from genkitx-mistral + prompt: 'Tell me a joke.', +}); + +console.log(await response.text()); +``` + +### Within a flow + +```typescript +export const myFlow = defineFlow( + { + name: 'menuSuggestionFlow', + inputSchema: z.string(), + outputSchema: z.string(), + }, + async (subject) => { + const llmResponse = await generate({ + prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, + model: openMixtral8x22B, + }); + + return llmResponse.text(); + } +); +startFlowsServer(); +``` ## Contributing diff --git a/plugins/mistral/src/index.ts b/plugins/mistral/src/index.ts index 638bd015..08aa2d2a 100644 --- a/plugins/mistral/src/index.ts +++ b/plugins/mistral/src/index.ts @@ -15,8 +15,15 @@ */ import { genkitPlugin, Plugin } from '@genkit-ai/core'; -import { SUPPORTED_MISTRAL_MODELS, mistralModel } from './mistral_llms'; +import { + openMistral7B, + openMistral8x7B, + openMixtral8x22B, + SUPPORTED_MISTRAL_MODELS, + mistralModel, +} from './mistral_llms'; +export { openMistral7B, openMistral8x7B, openMixtral8x22B }; export interface PluginOptions { apiKey?: string; } @@ -27,7 +34,7 @@ export const mistral: Plugin<[PluginOptions] | []> = genkitPlugin( let apiKey = options?.apiKey || process.env.MISTRAL_API_KEY; if (!apiKey) throw new Error( - 'Please pass in the API key or set the MISTRALAI_API_KEY environment variable' + 'Please pass in the API key or set the MISTRAL_API_KEY environment variable' ); // Dynamically import the MistralClient const { default: MistralClient } = await import('@mistralai/mistralai'); diff --git a/plugins/openai/README.md b/plugins/openai/README.md index 6e34c82f..ad96ed95 100644 --- a/plugins/openai/README.md +++ b/plugins/openai/README.md @@ -37,34 +37,91 @@ Install the plugin in your project with your favorite package manager: The simplest way to call the text generation model is by using the helper function `generate`: -``` +```typescript // Basic usage of an LLM const response = await generate({ - model: gpt35Turbo, - prompt: 'Tell me a joke.', + model: gpt4o, // model imported from genkitx-openai-plugin + prompt: 'Tell me a joke.', }); console.log(await response.text()); ``` -Using the same interface, you can prompt a multimodal model: +### Multi-modal prompt -``` +```typescript const response = await generate({ model: gpt4o, prompt: [ { text: 'What animal is in the photo?' }, - { media: { url: imageUrl} }, + { media: { url: imageUrl } }, ], - config:{ + config: { // control of the level of visual detail when processing image embeddings // Low detail level also decreases the token usage visualDetailLevel: 'low', - } + }, }); console.log(await response.text()); ``` +### Within a flow + +```typescript +// ...configure Genkit (as shown above)... + +export const myFlow = defineFlow( + { + name: 'menuSuggestionFlow', + inputSchema: z.string(), + outputSchema: z.string(), + }, + async (subject) => { + const llmResponse = await generate({ + prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, + model: gpt4o, + }); + + return llmResponse.text(); + } +); +startFlowsServer(); +``` + +### Tool use + +```typescript +// ...configure Genkit (as shown above)... + +const createReminder = defineTool( + { + name: 'createReminder', + description: 'Use this to create reminders for things in the future', + inputSchema: z.object({ + time: z + .string() + .describe('ISO timestamp string, e.g. 2024-04-03T12:23:00Z'), + reminder: z.string().describe('the content of the reminder'), + }), + outputSchema: z.number().describe('the ID of the created reminder'), + }, + (reminder) => Promise.resolve(3) +); + +const result = generate({ + model: gpt4o, + tools: [createReminder], + prompt: ` + You are a reminder assistant. + If you create a reminder, describe in text the reminder you created as a response. + + Query: I have a meeting with Anna at 3 for dinner - can you set a reminder for the time? + `, +}); + +console.log(result.then((res) => res.text())); +``` + For more detailed examples and the explanation of other functionalities, refer to the examples in the [official Github repo of the plugin](https://github.com/TheFireCo/genkit-plugins/blob/main/examples/README.md) or in the [official Genkit documentation](https://firebase.google.com/docs/genkit/get-started). ## Contributing From 53c59317d9e99556aaa99f804bac1da3b0581cd6 Mon Sep 17 00:00:00 2001 From: David Alonso Date: Sat, 18 May 2024 10:21:04 +0200 Subject: [PATCH 2/2] small fixes --- plugins/anthropic/README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/anthropic/README.md b/plugins/anthropic/README.md index 73bae734..40b25451 100644 --- a/plugins/anthropic/README.md +++ b/plugins/anthropic/README.md @@ -51,7 +51,8 @@ configureGenkit({ The simplest way to call the text generation model is by using the helper function `generate`: ```typescript -// Basic usage of an LLM +// ...configure Genkit (as shown above)... + const response = await generate({ model: claude3Haiku, // model imported from genkitx-anthropicai prompt: 'Tell me a joke.', @@ -63,6 +64,8 @@ console.log(await response.text()); ### Multi-modal prompt ```typescript +// ...configure Genkit (as shown above)... + const response = await generate({ model: claude3Haiku, prompt: [ @@ -92,7 +95,7 @@ export const myFlow = defineFlow( async (subject) => { const llmResponse = await generate({ prompt: `Suggest an item for the menu of a ${subject} themed restaurant`, - model: claude3Open, + model: claude3Opus, }); return llmResponse.text();