From 1a77c72776d53739847b42128d8ed627587dcdea Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Fri, 12 Jul 2024 11:50:13 -0700 Subject: [PATCH 1/4] Add import comments to all samples (#207) --- samples/cache.js | 17 +++ samples/chat.js | 6 + samples/code_execution.js | 6 + samples/controlled_generation.js | 4 + samples/count_tokens.js | 19 +++ samples/embed.js | 4 + samples/files.js | 18 +++ samples/function_calling.js | 2 + samples/model_configuration.js | 2 + samples/package.json | 1 + samples/safety_settings.js | 4 + samples/system_instruction.js | 2 + samples/text_generation.js | 20 ++++ samples/utils/check-samples.js | 70 ++++++----- samples/utils/common.js | 64 ++++++++++ samples/utils/insert-import-comments.js | 148 ++++++++++++++++++++++++ 16 files changed, 351 insertions(+), 36 deletions(-) create mode 100644 samples/utils/common.js create mode 100644 samples/utils/insert-import-comments.js diff --git a/samples/cache.js b/samples/cache.js index 2e991f38..2e783aea 100644 --- a/samples/cache.js +++ b/samples/cache.js @@ -28,6 +28,9 @@ const mediaPath = __dirname + "/media"; async function cacheCreate() { // [START cache_create] + // Make sure to include these imports: + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); @@ -66,6 +69,9 @@ async function cacheCreate() { async function cacheCreateFromName() { // [START cache_create_from_name] + // Make sure to include these imports: + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); @@ -102,6 +108,9 @@ async function cacheCreateFromName() { async function cacheCreateFromChat() { // [START cache_create_from_chat] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); @@ -147,6 +156,8 @@ async function cacheCreateFromChat() { async function cacheDelete() { // [START cache_delete] + // Make sure to include these imports: + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); @@ -176,6 +187,8 @@ async function cacheDelete() { async function cacheGet() { // [START cache_get] + // Make sure to include these imports: + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); @@ -207,6 +220,8 @@ async function cacheGet() { async function cacheList() { // [START cache_list] + // Make sure to include these imports: + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); @@ -241,6 +256,8 @@ async function cacheList() { async function cacheUpdate() { // [START cache_update] + // Make sure to include these imports: + // import { GoogleAICacheManager, GoogleAIFileManager } from "@google/generative-ai/server"; const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const fileManager = new GoogleAIFileManager(process.env.API_KEY); diff --git a/samples/chat.js b/samples/chat.js index b3b8712d..6f0ca5ff 100644 --- a/samples/chat.js +++ b/samples/chat.js @@ -25,6 +25,8 @@ const mediaPath = __dirname + "/media"; async function chat() { // [START chat] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); const chat = model.startChat({ @@ -48,6 +50,8 @@ async function chat() { async function chatStreaming() { // [START chat_streaming] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); const chat = model.startChat({ @@ -77,6 +81,8 @@ async function chatStreaming() { async function chatStreamingWithImages() { // [START chat_streaming_with_images] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); const chat = model.startChat(); diff --git a/samples/code_execution.js b/samples/code_execution.js index cd3fdbbb..83a2b174 100644 --- a/samples/code_execution.js +++ b/samples/code_execution.js @@ -19,6 +19,8 @@ import { GoogleGenerativeAI } from "@google/generative-ai"; async function codeExecutionBasic() { // [START code_execution_basic] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", @@ -37,6 +39,8 @@ async function codeExecutionBasic() { async function codeExecutionRequestOverride() { // [START code_execution_request_override] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", @@ -65,6 +69,8 @@ async function codeExecutionRequestOverride() { async function codeExecutionChat() { // [START code_execution_chat] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", diff --git a/samples/controlled_generation.js b/samples/controlled_generation.js index 533f881f..f1f1ce75 100644 --- a/samples/controlled_generation.js +++ b/samples/controlled_generation.js @@ -22,6 +22,8 @@ import { async function jsonControlledGeneration() { // [START json_controlled_generation] + // Make sure to include these imports: + // import { GoogleGenerativeAI, FunctionDeclarationSchemaType } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const schema = { @@ -57,6 +59,8 @@ async function jsonControlledGeneration() { async function jsonNoSchema() { // [START json_no_schema] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ diff --git a/samples/count_tokens.js b/samples/count_tokens.js index ab83c91c..d93a4c2d 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -30,6 +30,8 @@ const mediaPath = __dirname + "/media"; async function tokensTextOnly() { // [START tokens_text_only] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", @@ -57,6 +59,8 @@ async function tokensTextOnly() { async function tokensChat() { // [START tokens_chat] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", @@ -70,6 +74,8 @@ async function tokensChat() { async function tokensMultimodalImageInline() { // [START tokens_multimodal_image_inline] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", @@ -99,6 +105,9 @@ async function tokensMultimodalImageInline() { async function tokensMultimodalImageFileApi() { // [START tokens_multimodal_image_file_api] + // Make sure to include these imports: + // import { GoogleAIFileManager } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResult = await fileManager.uploadFile( @@ -129,6 +138,9 @@ async function tokensMultimodalImageFileApi() { async function tokensMultimodalVideoAudioFileApi() { // [START tokens_multimodal_video_audio_file_api] + // Make sure to include these imports: + // import { GoogleAIFileManager, FileState } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); function waitForProcessing(fileName) { @@ -195,6 +207,9 @@ async function tokensMultimodalVideoAudioFileApi() { async function tokensCachedContent() { // [START tokens_cached_content] + // Make sure to include these imports: + // import { GoogleAICacheManager } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; // Generate a very long string let longContentString = ""; for (let i = 0; i < 32001; i++) { @@ -235,6 +250,8 @@ async function tokensCachedContent() { async function tokensSystemInstruction() { // [START tokens_system_instruction] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "models/gemini-1.5-flash", @@ -266,6 +283,8 @@ async function tokensSystemInstruction() { async function tokensTools() { // [START tokens_tools] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "models/gemini-1.5-flash", diff --git a/samples/embed.js b/samples/embed.js index 0b39f88d..890465cb 100644 --- a/samples/embed.js +++ b/samples/embed.js @@ -19,6 +19,8 @@ import { GoogleGenerativeAI } from "@google/generative-ai"; async function embedContent() { // [START embed_content] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "text-embedding-004", @@ -32,6 +34,8 @@ async function embedContent() { async function batchEmbedContents() { // [START batch_embed_contents] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "text-embedding-004", diff --git a/samples/files.js b/samples/files.js index 8bf99b82..71a899e7 100644 --- a/samples/files.js +++ b/samples/files.js @@ -25,6 +25,9 @@ const mediaPath = __dirname + "/media"; async function filesCreateImage() { // [START files_create_image] + // Make sure to include these imports: + // import { GoogleAIFileManager } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResult = await fileManager.uploadFile( @@ -56,6 +59,9 @@ async function filesCreateImage() { async function filesCreateAudio() { // [START files_create_audio] + // Make sure to include these imports: + // import { GoogleAIFileManager, FileState } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResult = await fileManager.uploadFile( @@ -101,6 +107,9 @@ async function filesCreateAudio() { async function filesCreateText() { // [START files_create_text] + // Make sure to include these imports: + // import { GoogleAIFileManager } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { @@ -129,6 +138,9 @@ async function filesCreateText() { async function filesCreateVideo() { // [START files_create_video] + // Make sure to include these imports: + // import { GoogleAIFileManager, FileState } from "@google/generative-ai/server"; + // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResult = await fileManager.uploadFile( @@ -174,6 +186,8 @@ async function filesCreateVideo() { async function filesList() { // [START files_list] + // Make sure to include these imports: + // import { GoogleAIFileManager } from "@google/generative-ai/server"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const listFilesResponse = await fileManager.listFiles(); @@ -187,6 +201,8 @@ async function filesList() { async function filesGet() { // [START files_get] + // Make sure to include these imports: + // import { GoogleAIFileManager } from "@google/generative-ai/server"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResponse = await fileManager.uploadFile( @@ -209,6 +225,8 @@ async function filesGet() { async function filesDelete() { // [START files_delete] + // Make sure to include these imports: + // import { GoogleAIFileManager } from "@google/generative-ai/server"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); const uploadResult = await fileManager.uploadFile( diff --git a/samples/function_calling.js b/samples/function_calling.js index 98e34fb3..a864efb5 100644 --- a/samples/function_calling.js +++ b/samples/function_calling.js @@ -19,6 +19,8 @@ import { GoogleGenerativeAI } from "@google/generative-ai"; async function functionCalling() { // [START function_calling] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; async function setLightValues(brightness, colorTemperature) { // This mock API returns the requested lighting values return { diff --git a/samples/model_configuration.js b/samples/model_configuration.js index a2b76910..2146f902 100644 --- a/samples/model_configuration.js +++ b/samples/model_configuration.js @@ -19,6 +19,8 @@ import { GoogleGenerativeAI } from "@google/generative-ai"; async function configureModel() { // [START configure_model] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", diff --git a/samples/package.json b/samples/package.json index aadabcd0..6a79a2a3 100644 --- a/samples/package.json +++ b/samples/package.json @@ -5,6 +5,7 @@ }, "scripts": { "check-samples": "node ./utils/check-samples.js", + "import-comments": "node ./utils/insert-import-comments.js", "test": "yarn check-samples" } } diff --git a/samples/safety_settings.js b/samples/safety_settings.js index 6a6d2355..0a6070c9 100644 --- a/samples/safety_settings.js +++ b/samples/safety_settings.js @@ -23,6 +23,8 @@ import { async function safetySettings() { // [START safety_settings] + // Make sure to include these imports: + // import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", @@ -52,6 +54,8 @@ async function safetySettings() { async function safetySettingsMulti() { // [START safety_settings_multi] + // Make sure to include these imports: + // import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", diff --git a/samples/system_instruction.js b/samples/system_instruction.js index 8a7b682b..bbca5f75 100644 --- a/samples/system_instruction.js +++ b/samples/system_instruction.js @@ -19,6 +19,8 @@ import { GoogleGenerativeAI } from "@google/generative-ai"; async function systemInstruction() { // [START system_instruction] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", diff --git a/samples/text_generation.js b/samples/text_generation.js index 1049a2b5..23fc52d5 100644 --- a/samples/text_generation.js +++ b/samples/text_generation.js @@ -26,6 +26,8 @@ const mediaPath = __dirname + "/media"; async function textGenTextOnlyPrompt() { // [START text_gen_text_only_prompt] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -38,6 +40,8 @@ async function textGenTextOnlyPrompt() { async function textGenTextOnlyPromptStreaming() { // [START text_gen_text_only_prompt_streaming] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -55,6 +59,8 @@ async function textGenTextOnlyPromptStreaming() { async function textGenMultimodalOneImagePrompt() { // [START text_gen_multimodal_one_image_prompt] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -81,6 +87,8 @@ async function textGenMultimodalOneImagePrompt() { async function textGenMultimodalOneImagePromptStreaming() { // [START text_gen_multimodal_one_image_prompt_streaming] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -112,6 +120,8 @@ async function textGenMultimodalOneImagePromptStreaming() { async function textGenMultimodalMultiImagePrompt() { // [START text_gen_multimodal_multi_image_prompt] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -142,6 +152,8 @@ async function textGenMultimodalMultiImagePrompt() { async function textGenMultimodalMultiImagePromptStreaming() { // [START text_gen_multimodal_multi_image_prompt_streaming] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -177,6 +189,8 @@ async function textGenMultimodalMultiImagePromptStreaming() { async function textGenMultimodalAudio() { // [START text_gen_multimodal_audio] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -203,6 +217,9 @@ async function textGenMultimodalAudio() { async function textGenMultimodalVideoPrompt() { // [START text_gen_multimodal_video_prompt] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; + // import { GoogleAIFileManager, FileState } from "@google/generative-ai/server"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); @@ -242,6 +259,9 @@ async function textGenMultimodalVideoPrompt() { async function textGenMultimodalVideoPromptStreaming() { // [START text_gen_multimodal_video_prompt_streaming] + // Make sure to include these imports: + // import { GoogleGenerativeAI } from "@google/generative-ai"; + // import { GoogleAIFileManager, FileState } from "@google/generative-ai/server"; const genAI = new GoogleGenerativeAI(process.env.API_KEY); const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" }); diff --git a/samples/utils/check-samples.js b/samples/utils/check-samples.js index 47b03448..efd3559f 100644 --- a/samples/utils/check-samples.js +++ b/samples/utils/check-samples.js @@ -15,48 +15,43 @@ * limitations under the License. */ +import { findFunctions, samplesDir } from "./common.js"; import fs from "fs"; -import { dirname, join } from "path"; -import { fileURLToPath } from "url"; - -const __dirname = dirname(fileURLToPath(import.meta.url)); -const samplesDir = join(__dirname, '../') +import { join } from "path"; +/** + * Checks samples to make sure they have region tags and the tags match the + * function name. + */ async function checkSamples() { const files = fs.readdirSync(samplesDir); for (const filename of files) { - if (filename.match(/.+\.js$/) && !filename.includes('-')) { - const file = fs.readFileSync(join(samplesDir, filename), 'utf-8'); - const lines = file.split('\n'); - let currentFunctionName = ''; - let currentStartTag = ''; - let tagsOk = false; - for (const line of lines) { - const functionStartParts = line.match(/^(async function|function) (.+)\(/); - if (functionStartParts) { - currentFunctionName = functionStartParts[2]; + if (filename.match(/.+\.js$/) && !filename.includes("-")) { + const file = fs.readFileSync(join(samplesDir, filename), "utf-8"); + const functions = findFunctions(file); + for (const sampleFn in functions) { + if (sampleFn === "runAll" || sampleFn === "run") { + continue; } - const tagStartParts = line.match(/\/\/ \[START (.+)\]/); - if (tagStartParts) { - currentStartTag = tagStartParts[1]; - if (camelCaseToUnderscore(currentFunctionName) !== currentStartTag) { - console.error(`[${filename}]: Region start tag ${currentStartTag} doesn't match function name ${currentFunctionName}`); - } + if (!functions[sampleFn].startTag || !functions[sampleFn].endTag) { + console.error( + `[${filename}]: Start and end tag not found or not correct in function ${sampleFn}`, + ); } - const tagEndParts = line.match(/\/\/ \[END (.+)\]/); - if (tagEndParts) { - if (tagEndParts[1] !== currentStartTag) { - console.error(`[${filename}]: Region end tag ${currentEndTag} doesn't match start tag ${currentStartTag}`); - } else { - tagsOk = true; - } + if ( + camelCaseToUnderscore(sampleFn) !== functions[sampleFn].startTag.tag + ) { + console.error( + `[${filename}]: Region start tag ${functions[sampleFn].startTag.tag} doesn't match function name ${sampleFn}`, + ); } - if (line.match(/^}$/)) { - if (!tagsOk && currentFunctionName !== 'runAll') { - console.error(`[${filename}]: Start and end tag not found or not correct in function ${currentFunctionName}`); - } - currentFunctionName = ''; - tagsOk = false; + if ( + functions[sampleFn].startTag.tag !== functions[sampleFn].endTag.tag || + functions[sampleFn].endTag.line <= functions[sampleFn].startTag.line + ) { + console.error( + `[${filename}]: Region end tag ${functions[sampleFn].endTag.tag} doesn't match start tag ${functions[sampleFn].startTag.tag}`, + ); } } } @@ -64,7 +59,10 @@ async function checkSamples() { } function camelCaseToUnderscore(camelCaseName) { - return camelCaseName.split(/\.?(?=[A-Z])/).join('_').toLowerCase(); + return camelCaseName + .split(/\.?(?=[A-Z])/) + .join("_") + .toLowerCase(); } -checkSamples(); \ No newline at end of file +checkSamples(); diff --git a/samples/utils/common.js b/samples/utils/common.js new file mode 100644 index 00000000..f7eeba99 --- /dev/null +++ b/samples/utils/common.js @@ -0,0 +1,64 @@ +/** + * @license + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { dirname, join } from "path"; +import { fileURLToPath } from "url"; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +export const samplesDir = join(__dirname, "../"); + +/** + * Extracts individual function information, given the text of a samples file. + */ +export function findFunctions(fileText) { + const lines = fileText.split("\n"); + const functions = {}; + let currentFunctionName = ""; + for (const [index, line] of lines.entries()) { + const functionStartParts = line.match(/^(async function|function) (.+)\(/); + if (functionStartParts) { + currentFunctionName = functionStartParts[2]; + functions[currentFunctionName] = { body: [] }; + } else if (line.match(/^}$/)) { + currentFunctionName = ""; + } else if (currentFunctionName) { + const tagStartParts = line.match(/\/\/ \[START (.+)\]/); + const tagEndParts = line.match(/\/\/ \[END (.+)\]/); + const importHead = line.match(/\/\/ Make sure to include/); + const importComment = line.match(/\/\/ import /); + if (tagStartParts) { + functions[currentFunctionName].startTag = { + line: index, + tag: tagStartParts[1], + }; + } else if (tagEndParts) { + functions[currentFunctionName].endTag = { + line: index, + tag: tagEndParts[1], + }; + } else if (importHead || importComment) { + if (!functions[currentFunctionName].importComments) { + functions[currentFunctionName].importComments = []; + } + functions[currentFunctionName].importComments.push(line); + } else { + functions[currentFunctionName].body.push(line); + } + } + } + return functions; +} diff --git a/samples/utils/insert-import-comments.js b/samples/utils/insert-import-comments.js new file mode 100644 index 00000000..54fd1ea3 --- /dev/null +++ b/samples/utils/insert-import-comments.js @@ -0,0 +1,148 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { findFunctions, samplesDir } from "./common.js"; +import fs from "fs"; +import ts from "typescript"; +import { dirname, join } from "path"; +import { createRequire } from "module"; + +const require = createRequire(import.meta.url); + +function getTopLevelSymbols(filePath) { + const typings = fs.readFileSync(filePath, "utf-8"); + + const sourceFile = ts.createSourceFile( + filePath, + typings, + ts.ScriptTarget.ES2015, + ); + let symbols = []; + ts.forEachChild(sourceFile, (node) => { + if (node.name) { + symbols.push(node.name.text); + } + }); + return symbols; +} + +export function getAvailableSymbols() { + let packagePath = require.resolve('@google/generative-ai/package.json'); + const pkg = require(packagePath); + const coreSymbols = getTopLevelSymbols(join(dirname(packagePath), pkg.exports["."].types)); + const serverSymbolsRaw = getTopLevelSymbols(join(dirname(packagePath), pkg.exports["./server"].types)); + const serverSymbols = serverSymbolsRaw.filter( + (serverSymbol) => !coreSymbols.includes(serverSymbol), + ); + return [ + { + importPath: "@google/generative-ai", + symbols: coreSymbols, + }, + { + importPath: "@google/generative-ai/server", + symbols: serverSymbols, + }, + ]; +} + +const requiredImports = getAvailableSymbols(); + +function listRequiredImports(line) { + const results = []; + for (const requiredImport of requiredImports) { + for (const symbol of requiredImport.symbols) { + if (line.match(new RegExp(`[^a-zA-Z0-9]${symbol}[^a-zA-Z0-9]`))) { + if (!results[requiredImport.importPath]) { + results[requiredImport.importPath] = []; + } + results[requiredImport.importPath].push(symbol); + results.push({ symbol, importPath: requiredImport.importPath }); + } + } + } + return results; +} + +/** + * Inserts comments describing the required imports for making the code + * sample work, since we cannot add actual import statements inside + * the samples. + */ +async function insertImportComments() { + const files = fs.readdirSync(samplesDir); + for (const filename of files) { + if (filename.match(/.+\.js$/) && !filename.includes("-")) { + const file = fs.readFileSync(join(samplesDir, filename), "utf-8"); + const functions = findFunctions(file); + for (const fnName in functions) { + const sampleFn = functions[fnName]; + let results = []; + for (const line of sampleFn.body) { + results = results.concat(listRequiredImports(line)); + } + if (results.length > 0) { + functions[fnName].requiredImports = {}; + for (const result of results) { + if (!functions[fnName].requiredImports[result.importPath]) { + functions[fnName].requiredImports[result.importPath] = new Set(); + } + functions[fnName].requiredImports[result.importPath].add( + result.symbol, + ); + } + } + } + const fileLines = file.split("\n"); + const newFileLines = []; + for (const fileLine of fileLines) { + const importHead = fileLine.match(/\/\/ Make sure to include/); + const importComment = fileLine.match(/\/\/ import /); + if (!importHead && !importComment) { + newFileLines.push(fileLine); + } + const tagStartParts = fileLine.match(/\/\/ \[START (.+)\]/); + if (tagStartParts) { + const fnName = underscoreToCamelCase(tagStartParts[1]); + if (functions[fnName].requiredImports) { + newFileLines.push(` // Make sure to include these imports:`); + for (const importPath in functions[fnName].requiredImports) { + const symbols = Array.from( + functions[fnName].requiredImports[importPath], + ); + newFileLines.push( + ` // import { ${symbols.join(", ")} } from "${importPath}";`, + ); + } + } + } + } + fs.writeFileSync(join(samplesDir, filename), newFileLines.join("\n")); + } + } +} + +function underscoreToCamelCase(underscoreName) { + return underscoreName + .split("_") + .map((part, i) => + i === 0 ? part : part.charAt(0).toUpperCase() + part.slice(1), + ) + .join(""); +} + +insertImportComments(); From 8eff40abdf4344f4e413caaee70c1cc414f85bdd Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Fri, 12 Jul 2024 10:15:07 -0700 Subject: [PATCH 2/4] Update count_tokens to match python changes --- samples/count_tokens.js | 218 +++++++++++++++++++++++++++------------- 1 file changed, 149 insertions(+), 69 deletions(-) diff --git a/samples/count_tokens.js b/samples/count_tokens.js index d93a4c2d..976aec86 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -45,13 +45,15 @@ async function tokensTextOnly() { console.log(countResult.totalTokens); // 11 console.log(countResult.contentTokens[0]); // { partTokens: [ 10 ], roleTokens: 1 } - - // Retrieve token count data (including a count of tokens in response) after - // text generation. const generateResult = await model.generateContent( "The quick brown fox jumps over the lazy dog.", ); + + // On the response for `generateContent`, use `usageMetadata` + // to get separate input and output token counts + // (`promptTokenCount` and `candidatesTokenCount`, respectively), + // as well as the combined token count (`totalTokenCount`). console.log(generateResult.response.usageMetadata); // { promptTokenCount: 11, candidatesTokenCount: 131, totalTokenCount: 142 } // [END tokens_text_only] @@ -65,10 +67,35 @@ async function tokensChat() { const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash", }); - const chat = model.startChat(); - const result = await chat.sendMessage("Hi, my name is Bob."); - console.log(result.response.usageMetadata); - // { promptTokenCount: 8, candidatesTokenCount: 20, totalTokenCount: 28 } + + const chat = model.startChat({ + history: [ + { + role: "user", + parts: [{ text: "Hi my name is Bob" }], + }, + { + role: "model", + parts: [{ text: "Hi Bob!" }], + }, + ], + }); + + const countResult = await model.countTokens({ + generateContentRequest: { contents: await chat.getHistory() }, + }); + console.log(countResult.totalTokens); // 10 + + const chatResult = await chat.sendMessage( + "In one sentence, explain how a computer works to a young child.", + ); + + // On the response for `sendMessage`, use `usageMetadata` + // to get separate input and output token counts + // (`promptTokenCount` and `candidatesTokenCount`, respectively), + // as well as the combined token count (`totalTokenCount`). + console.log(chatResult.response.usageMetadata); + // { promptTokenCount: 25, candidatesTokenCount: 22, totalTokenCount: 47 } // [END tokens_chat] } @@ -95,11 +122,23 @@ async function tokensMultimodalImageInline() { "image/jpeg", ); - const result = await model.countTokens([ - "Tell me about this image.", - imagePart, - ]); - console.log(result.totalTokens); + const prompt = "Tell me about this image."; + + // Call `countTokens` to get the input token count + // of the combined text and file (`totalTokens`). + // An image's display or file size does not affect its token count. + // Optionally, you can call `countTokens` for the text and file separately. + const countResult = await model.countTokens([prompt, imagePart]); + console.log(countResult.totalTokens); // 265 + + const generateResult = await model.generateContent([prompt, imagePart]); + + // On the response for `generateContent`, use `usageMetadata` + // to get separate input and output token counts + // (`promptTokenCount` and `candidatesTokenCount`, respectively), + // as well as the combined token count (`totalTokenCount`). + console.log(generateResult.response.usageMetadata); + // { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 } // [END tokens_multimodal_image_inline] } @@ -127,13 +166,26 @@ async function tokensMultimodalImageFileApi() { model: "gemini-1.5-flash", }); - const result = await model.countTokens([ - "Tell me about this image.", - imagePart, - ]); + const prompt = "Tell me about this image."; + + // Call `countTokens` to get the input token count + // of the combined text and file (`totalTokens`). + // An image's display or file size does not affect its token count. + // Optionally, you can call `countTokens` for the text and file separately. + const countResult = await model.countTokens([prompt, imagePart]); + + console.log(countResult.totalTokens); // 265 - console.log(result.totalTokens); + const generateResult = await model.generateContent([prompt, imagePart]); + + // On the response for `generateContent`, use `usageMetadata` + // to get separate input and output token counts + // (`promptTokenCount` and `candidatesTokenCount`, respectively), + // as well as the combined token count (`totalTokenCount`). + console.log(generateResult.response.usageMetadata); + // { promptTokenCount: 265, candidatesTokenCount: 157, totalTokenCount: 422 } // [END tokens_multimodal_image_file_api] + await fileManager.deleteFile(uploadResult.file.name); } async function tokensMultimodalVideoAudioFileApi() { @@ -143,45 +195,28 @@ async function tokensMultimodalVideoAudioFileApi() { // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); - function waitForProcessing(fileName) { - return new Promise(async (resolve, reject) => { - let file = await fileManager.getFile(fileName); - while (file.state === FileState.PROCESSING) { - process.stdout.write("."); - // Sleep for 10 seconds - await new Promise((resolve) => setTimeout(resolve, 10_000)); - // Fetch the file from the API again - file = await fileManager.getFile(fileName); - } - - if (file.state === FileState.FAILED) { - reject(new Error("Video processing failed.")); - } - resolve(); - }); - } - - const uploadAudioResult = await fileManager.uploadFile( - `${mediaPath}/samplesmall.mp3`, - { mimeType: "audio/mp3" }, - ); + function waitForProcessing(fileName) {} const uploadVideoResult = await fileManager.uploadFile( `${mediaPath}/Big_Buck_Bunny.mp4`, { mimeType: "video/mp4" }, ); - await Promise.all([ - waitForProcessing(uploadAudioResult.file.name), - waitForProcessing(uploadVideoResult.file.name), - ]); + let file = await fileManager.getFile(uploadVideoResult.file.name); + process.stdout.write("processing video"); + while (file.state === FileState.PROCESSING) { + process.stdout.write("."); + // Sleep for 10 seconds + await new Promise((resolve) => setTimeout(resolve, 10_000)); + // Fetch the file from the API again + file = await fileManager.getFile(uploadVideoResult.file.name); + } - const audioPart = { - fileData: { - fileUri: uploadAudioResult.file.uri, - mimeType: uploadAudioResult.file.mimeType, - }, - }; + if (file.state === FileState.FAILED) { + throw new Error("Video processing failed."); + } else { + process.stdout.write("\n"); + } const videoPart = { fileData: { @@ -195,14 +230,26 @@ async function tokensMultimodalVideoAudioFileApi() { model: "gemini-1.5-flash", }); - const result = await model.countTokens([ - "Tell me about this audio and video.", - audioPart, - videoPart, - ]); + const prompt = "Tell me about this video."; + + // Call `countTokens` to get the input token count + // of the combined text and file (`totalTokens`). + // An video or audio file's display or file size does not affect its token count. + // Optionally, you can call `countTokens` for the text and file separately. + const countResult = await model.countTokens([prompt, videoPart]); + + console.log(countResult.totalTokens); // 302 + + const generateResult = await model.generateContent([prompt, videoPart]); - console.log(result.totalTokens); + // On the response for `generateContent`, use `usageMetadata` + // to get separate input and output token counts + // (`promptTokenCount` and `candidatesTokenCount`, respectively), + // as well as the combined token count (`totalTokenCount`). + console.log(generateResult.response.usageMetadata); + // { promptTokenCount: 302, candidatesTokenCount: 46, totalTokenCount: 348 } // [END tokens_multimodal_video_audio_file_api] + await fileManager.deleteFile(uploadVideoResult.file.name); } async function tokensCachedContent() { @@ -210,13 +257,14 @@ async function tokensCachedContent() { // Make sure to include these imports: // import { GoogleAICacheManager } from "@google/generative-ai/server"; // import { GoogleGenerativeAI } from "@google/generative-ai"; - // Generate a very long string - let longContentString = ""; - for (let i = 0; i < 32001; i++) { - longContentString += "Purple cats drink lemonade."; - longContentString += i % 8 === 7 ? "\n" : " "; - } + // Upload large text file. + const fileManager = new GoogleAIFileManager(process.env.API_KEY); + const uploadResult = await fileManager.uploadFile(`${mediaPath}/a11.txt`, { + mimeType: "text/plain", + }); + + // Create a cache that uses the uploaded file. const cacheManager = new GoogleAICacheManager(process.env.API_KEY); const cacheResult = await cacheManager.create({ ttlSeconds: 600, @@ -224,7 +272,18 @@ async function tokensCachedContent() { contents: [ { role: "user", - parts: [{ text: longContentString }], + parts: [{ text: "Here's the Apollo 11 transcript:" }], + }, + { + role: "user", + parts: [ + { + fileData: { + fileUri: uploadResult.file.uri, + mimeType: uploadResult.file.mimeType, + }, + }, + ], }, ], }); @@ -234,16 +293,33 @@ async function tokensCachedContent() { model: "models/gemini-1.5-flash", }); + // Call `countTokens` to get the input token count + // of the combined text and file (`totalTokens`). const result = await model.countTokens({ generateContentRequest: { contents: [ - { role: "user", parts: [{ text: "What do purple cats drink?" }] }, + { + role: "user", + parts: [{ text: "Please give a short summary of this file." }], + }, ], cachedContent: cacheResult.name, }, }); - console.log(result.totalTokens); + console.log(result.totalTokens); // 10 + + const generateResult = await model.generateContent( + "Please give a short summary of this file.", + ); + + // On the response for `generateContent`, use `usageMetadata` + // to get separate input and output token counts + // (`promptTokenCount` and `candidatesTokenCount`, respectively), + // as well as the combined token count (`totalTokenCount`). + console.log(generateResult.response.usageMetadata); + // { promptTokenCount: 10, candidatesTokenCount: 31, totalTokenCount: 41 } + await cacheManager.delete(cacheResult.name); // [END tokens_cached_content] } @@ -267,15 +343,15 @@ async function tokensSystemInstruction() { ], systemInstruction: { role: "system", - parts: [{ text: "Talk like a pirate!" }], + parts: [{ text: "You are a cat. Your name is Neko." }], }, }, }); console.log(result); // { - // totalTokens: 17, - // systemInstructionsTokens: { partTokens: [ 5 ], roleTokens: 1 }, + // totalTokens: 23, + // systemInstructionsTokens: { partTokens: [ 11 ], roleTokens: 1 }, // contentTokens: [ { partTokens: [Array], roleTokens: 1 } ] // } // [END tokens_system_instruction] @@ -302,7 +378,11 @@ async function tokensTools() { contents: [ { role: "user", - parts: [{ text: "The quick brown fox jumps over the lazy dog." }], + parts: [ + { + text: "I have 57 cats, each owns 44 mittens, how many mittens is that in total?", + }, + ], }, ], tools: [{ functionDeclarations }], @@ -311,7 +391,7 @@ async function tokensTools() { console.log(result); // { - // totalTokens: 87, + // totalTokens: 99, // systemInstructionsTokens: {}, // contentTokens: [ { partTokens: [Array], roleTokens: 1 } ], // toolTokens: [ { functionDeclarationTokens: [Array] } ] From f72b3b0caf8d6db53667a5f8c735e9c74f184602 Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Fri, 12 Jul 2024 10:16:00 -0700 Subject: [PATCH 3/4] update import comments --- samples/count_tokens.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/count_tokens.js b/samples/count_tokens.js index 976aec86..ed7ffbc6 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -255,7 +255,7 @@ async function tokensMultimodalVideoAudioFileApi() { async function tokensCachedContent() { // [START tokens_cached_content] // Make sure to include these imports: - // import { GoogleAICacheManager } from "@google/generative-ai/server"; + // import { GoogleAIFileManager, GoogleAICacheManager } from "@google/generative-ai/server"; // import { GoogleGenerativeAI } from "@google/generative-ai"; // Upload large text file. From 81729da045dfa2d374132167b3dae3eda448f7c7 Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Fri, 12 Jul 2024 13:10:23 -0700 Subject: [PATCH 4/4] remove empty function --- samples/count_tokens.js | 2 -- 1 file changed, 2 deletions(-) diff --git a/samples/count_tokens.js b/samples/count_tokens.js index ed7ffbc6..447f76ea 100644 --- a/samples/count_tokens.js +++ b/samples/count_tokens.js @@ -195,8 +195,6 @@ async function tokensMultimodalVideoAudioFileApi() { // import { GoogleGenerativeAI } from "@google/generative-ai"; const fileManager = new GoogleAIFileManager(process.env.API_KEY); - function waitForProcessing(fileName) {} - const uploadVideoResult = await fileManager.uploadFile( `${mediaPath}/Big_Buck_Bunny.mp4`, { mimeType: "video/mp4" },