diff --git a/.changeset/tough-beds-serve.md b/.changeset/tough-beds-serve.md new file mode 100644 index 00000000..2b35de93 --- /dev/null +++ b/.changeset/tough-beds-serve.md @@ -0,0 +1,5 @@ +--- +"@google/generative-ai": minor +--- + +Adds `SingleRequestOptions` with `AbortSignal` support to most of the asynchronous methods of `GenerativeModel`, `GoogleAIFileManager` and `ChatSession`. diff --git a/common/api-review/generative-ai-server.api.md b/common/api-review/generative-ai-server.api.md index ae98439c..456c5273 100644 --- a/common/api-review/generative-ai-server.api.md +++ b/common/api-review/generative-ai-server.api.md @@ -355,8 +355,9 @@ export class GoogleAIFileManager { // (undocumented) apiKey: string; deleteFile(fileId: string): Promise; - getFile(fileId: string): Promise; - listFiles(listParams?: ListParams): Promise; + getFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; + // Warning: (ae-forgotten-export) The symbol "SingleRequestOptions" needs to be exported by the entry point index.d.ts + listFiles(listParams?: ListParams, requestOptions?: SingleRequestOptions): Promise; uploadFile(filePath: string, fileMetadata: FileMetadata): Promise; } diff --git a/common/api-review/generative-ai.api.md b/common/api-review/generative-ai.api.md index 83f151fc..2275a977 100644 --- a/common/api-review/generative-ai.api.md +++ b/common/api-review/generative-ai.api.md @@ -62,16 +62,14 @@ export interface CachedContentBase { // @public export class ChatSession { - constructor(apiKey: string, model: string, params?: StartChatParams, requestOptions?: RequestOptions); + constructor(apiKey: string, model: string, params?: StartChatParams, _requestOptions?: RequestOptions); getHistory(): Promise; // (undocumented) model: string; // (undocumented) params?: StartChatParams; - // (undocumented) - requestOptions?: RequestOptions; - sendMessage(request: string | Array): Promise; - sendMessageStream(request: string | Array): Promise; + sendMessage(request: string | Array, requestOptions?: SingleRequestOptions): Promise; + sendMessageStream(request: string | Array, requestOptions?: SingleRequestOptions): Promise; } // @public @@ -462,16 +460,16 @@ export interface GenerativeContentBlob { // @public export class GenerativeModel { - constructor(apiKey: string, modelParams: ModelParams, requestOptions?: RequestOptions); + constructor(apiKey: string, modelParams: ModelParams, _requestOptions?: RequestOptions); // (undocumented) apiKey: string; - batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest): Promise; + batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest, requestOptions?: SingleRequestOptions): Promise; // (undocumented) cachedContent: CachedContent; - countTokens(request: CountTokensRequest | string | Array): Promise; - embedContent(request: EmbedContentRequest | string | Array): Promise; - generateContent(request: GenerateContentRequest | string | Array): Promise; - generateContentStream(request: GenerateContentRequest | string | Array): Promise; + countTokens(request: CountTokensRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; + embedContent(request: EmbedContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; + generateContent(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; + generateContentStream(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; // (undocumented) generationConfig: GenerationConfig; // (undocumented) @@ -667,6 +665,11 @@ export interface Schema { type?: FunctionDeclarationSchemaType; } +// @public +export interface SingleRequestOptions extends RequestOptions { + signal?: AbortSignal; +} + // @public export interface StartChatParams extends BaseParams { cachedContent?: string; diff --git a/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md b/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md index e5ecfdad..0fae42e6 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.deletefile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.deleteFile() method -Delete file with given ID +Delete file with given ID. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -deleteFile(fileId: string): Promise; +deleteFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ deleteFile(fileId: string): Promise; | Parameter | Type | Description | | --- | --- | --- | | fileId | string | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/files/generative-ai.googleaifilemanager.getfile.md b/docs/reference/files/generative-ai.googleaifilemanager.getfile.md index 613d6b6c..a8a08472 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.getfile.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.getfile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.getFile() method -Get metadata for file with given ID +Get metadata for file with given ID. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -getFile(fileId: string): Promise; +getFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ getFile(fileId: string): Promise; | Parameter | Type | Description | | --- | --- | --- | | fileId | string | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md b/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md index ef116fb6..1c229fbb 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.listfiles.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.listFiles() method -List all uploaded files +List all uploaded files. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -listFiles(listParams?: ListParams): Promise; +listFiles(listParams?: ListParams, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ listFiles(listParams?: ListParams): Promise; | Parameter | Type | Description | | --- | --- | --- | | listParams | [ListParams](./generative-ai.listparams.md) | _(Optional)_ | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/files/generative-ai.googleaifilemanager.md b/docs/reference/files/generative-ai.googleaifilemanager.md index 655c8d0c..e0f3144c 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.md @@ -28,8 +28,8 @@ export declare class GoogleAIFileManager | Method | Modifiers | Description | | --- | --- | --- | -| [deleteFile(fileId)](./generative-ai.googleaifilemanager.deletefile.md) | | Delete file with given ID | -| [getFile(fileId)](./generative-ai.googleaifilemanager.getfile.md) | | Get metadata for file with given ID | -| [listFiles(listParams)](./generative-ai.googleaifilemanager.listfiles.md) | | List all uploaded files | -| [uploadFile(filePath, fileMetadata)](./generative-ai.googleaifilemanager.uploadfile.md) | | Upload a file | +| [deleteFile(fileId, requestOptions)](./generative-ai.googleaifilemanager.deletefile.md) | |

Delete file with given ID.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [getFile(fileId, requestOptions)](./generative-ai.googleaifilemanager.getfile.md) | |

Get metadata for file with given ID.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [listFiles(listParams, requestOptions)](./generative-ai.googleaifilemanager.listfiles.md) | |

List all uploaded files.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [uploadFile(filePath, fileMetadata, requestOptions)](./generative-ai.googleaifilemanager.uploadfile.md) | |

Upload a file.

Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| diff --git a/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md b/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md index 90648e90..0b29cb68 100644 --- a/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md +++ b/docs/reference/files/generative-ai.googleaifilemanager.uploadfile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.uploadFile() method -Upload a file +Upload a file. + +Any fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -uploadFile(filePath: string, fileMetadata: FileMetadata): Promise; +uploadFile(filePath: string, fileMetadata: FileMetadata, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -18,6 +20,7 @@ uploadFile(filePath: string, fileMetadata: FileMetadata): Promise + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) + +## SingleRequestOptions interface + +Params passed to atomic asynchronous operations. + +**Signature:** + +```typescript +export interface SingleRequestOptions extends RequestOptions +``` +**Extends:** [RequestOptions](./generative-ai.requestoptions.md) + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [signal?](./generative-ai.singlerequestoptions.signal.md) | | AbortSignal | _(Optional)_ An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided, and if the timeout occurs first. | + diff --git a/docs/reference/files/generative-ai.singlerequestoptions.signal.md b/docs/reference/files/generative-ai.singlerequestoptions.signal.md new file mode 100644 index 00000000..9f0672b7 --- /dev/null +++ b/docs/reference/files/generative-ai.singlerequestoptions.signal.md @@ -0,0 +1,13 @@ + + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) > [signal](./generative-ai.singlerequestoptions.signal.md) + +## SingleRequestOptions.signal property + +An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided, and if the timeout occurs first. + +**Signature:** + +```typescript +signal?: AbortSignal; +``` diff --git a/docs/reference/main/generative-ai.chatsession._constructor_.md b/docs/reference/main/generative-ai.chatsession._constructor_.md index 918fd310..8bd24a6e 100644 --- a/docs/reference/main/generative-ai.chatsession._constructor_.md +++ b/docs/reference/main/generative-ai.chatsession._constructor_.md @@ -9,7 +9,7 @@ Constructs a new instance of the `ChatSession` class **Signature:** ```typescript -constructor(apiKey: string, model: string, params?: StartChatParams, requestOptions?: RequestOptions); +constructor(apiKey: string, model: string, params?: StartChatParams, _requestOptions?: RequestOptions); ``` ## Parameters @@ -19,5 +19,5 @@ constructor(apiKey: string, model: string, params?: StartChatParams, requestOpti | apiKey | string | | | model | string | | | params | [StartChatParams](./generative-ai.startchatparams.md) | _(Optional)_ | -| requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | +| \_requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | diff --git a/docs/reference/main/generative-ai.chatsession.md b/docs/reference/main/generative-ai.chatsession.md index 948cd4cd..360a0ef9 100644 --- a/docs/reference/main/generative-ai.chatsession.md +++ b/docs/reference/main/generative-ai.chatsession.md @@ -16,7 +16,7 @@ export declare class ChatSession | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(apiKey, model, params, requestOptions)](./generative-ai.chatsession._constructor_.md) | | Constructs a new instance of the ChatSession class | +| [(constructor)(apiKey, model, params, \_requestOptions)](./generative-ai.chatsession._constructor_.md) | | Constructs a new instance of the ChatSession class | ## Properties @@ -24,13 +24,12 @@ export declare class ChatSession | --- | --- | --- | --- | | [model](./generative-ai.chatsession.model.md) | | string | | | [params?](./generative-ai.chatsession.params.md) | | [StartChatParams](./generative-ai.startchatparams.md) | _(Optional)_ | -| [requestOptions?](./generative-ai.chatsession.requestoptions.md) | | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | ## Methods | Method | Modifiers | Description | | --- | --- | --- | | [getHistory()](./generative-ai.chatsession.gethistory.md) | | Gets the chat history so far. Blocked prompts are not added to history. Blocked candidates are not added to history, nor are the prompts that generated them. | -| [sendMessage(request)](./generative-ai.chatsession.sendmessage.md) | | Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md) | -| [sendMessageStream(request)](./generative-ai.chatsession.sendmessagestream.md) | | Sends a chat message and receives the response as a [GenerateContentStreamResult](./generative-ai.generatecontentstreamresult.md) containing an iterable stream and a response promise. | +| [sendMessage(request, requestOptions)](./generative-ai.chatsession.sendmessage.md) | |

Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md).

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [sendMessageStream(request, requestOptions)](./generative-ai.chatsession.sendmessagestream.md) | |

Sends a chat message and receives the response as a [GenerateContentStreamResult](./generative-ai.generatecontentstreamresult.md) containing an iterable stream and a response promise.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| diff --git a/docs/reference/main/generative-ai.chatsession.requestoptions.md b/docs/reference/main/generative-ai.chatsession.requestoptions.md deleted file mode 100644 index bc7402c4..00000000 --- a/docs/reference/main/generative-ai.chatsession.requestoptions.md +++ /dev/null @@ -1,11 +0,0 @@ - - -[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [ChatSession](./generative-ai.chatsession.md) > [requestOptions](./generative-ai.chatsession.requestoptions.md) - -## ChatSession.requestOptions property - -**Signature:** - -```typescript -requestOptions?: RequestOptions; -``` diff --git a/docs/reference/main/generative-ai.chatsession.sendmessage.md b/docs/reference/main/generative-ai.chatsession.sendmessage.md index 08a5ff6c..dfa66713 100644 --- a/docs/reference/main/generative-ai.chatsession.sendmessage.md +++ b/docs/reference/main/generative-ai.chatsession.sendmessage.md @@ -4,12 +4,14 @@ ## ChatSession.sendMessage() method -Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md) +Sends a chat message and receives a non-streaming [GenerateContentResult](./generative-ai.generatecontentresult.md). + +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. **Signature:** ```typescript -sendMessage(request: string | Array): Promise; +sendMessage(request: string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ sendMessage(request: string | Array): Promise> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.chatsession.sendmessagestream.md b/docs/reference/main/generative-ai.chatsession.sendmessagestream.md index e6f7cbe3..ac409f77 100644 --- a/docs/reference/main/generative-ai.chatsession.sendmessagestream.md +++ b/docs/reference/main/generative-ai.chatsession.sendmessagestream.md @@ -6,10 +6,12 @@ Sends a chat message and receives the response as a [GenerateContentStreamResult](./generative-ai.generatecontentstreamresult.md) containing an iterable stream and a response promise. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -sendMessageStream(request: string | Array): Promise; +sendMessageStream(request: string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ sendMessageStream(request: string | Array): Promise> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel._constructor_.md b/docs/reference/main/generative-ai.generativemodel._constructor_.md index 47030ab7..1a410e65 100644 --- a/docs/reference/main/generative-ai.generativemodel._constructor_.md +++ b/docs/reference/main/generative-ai.generativemodel._constructor_.md @@ -9,7 +9,7 @@ Constructs a new instance of the `GenerativeModel` class **Signature:** ```typescript -constructor(apiKey: string, modelParams: ModelParams, requestOptions?: RequestOptions); +constructor(apiKey: string, modelParams: ModelParams, _requestOptions?: RequestOptions); ``` ## Parameters @@ -18,5 +18,5 @@ constructor(apiKey: string, modelParams: ModelParams, requestOptions?: RequestOp | --- | --- | --- | | apiKey | string | | | modelParams | [ModelParams](./generative-ai.modelparams.md) | | -| requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | +| \_requestOptions | [RequestOptions](./generative-ai.requestoptions.md) | _(Optional)_ | diff --git a/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md b/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md index cd3ccadc..76d31f98 100644 --- a/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md +++ b/docs/reference/main/generative-ai.generativemodel.batchembedcontents.md @@ -6,10 +6,12 @@ Embeds an array of [EmbedContentRequest](./generative-ai.embedcontentrequest.md)s. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest): Promise; +batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ batchEmbedContents(batchEmbedContentRequest: BatchEmbedContentsRequest): Promise | Parameter | Type | Description | | --- | --- | --- | | batchEmbedContentRequest | [BatchEmbedContentsRequest](./generative-ai.batchembedcontentsrequest.md) | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.counttokens.md b/docs/reference/main/generative-ai.generativemodel.counttokens.md index 1e3b982d..d81236a2 100644 --- a/docs/reference/main/generative-ai.generativemodel.counttokens.md +++ b/docs/reference/main/generative-ai.generativemodel.counttokens.md @@ -6,10 +6,12 @@ Counts the tokens in the provided request. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -countTokens(request: CountTokensRequest | string | Array): Promise; +countTokens(request: CountTokensRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ countTokens(request: CountTokensRequest | string | Array): Promis | Parameter | Type | Description | | --- | --- | --- | | request | [CountTokensRequest](./generative-ai.counttokensrequest.md) \| string \| Array<string \| [Part](./generative-ai.part.md)> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.embedcontent.md b/docs/reference/main/generative-ai.generativemodel.embedcontent.md index 445d130d..8c2105d8 100644 --- a/docs/reference/main/generative-ai.generativemodel.embedcontent.md +++ b/docs/reference/main/generative-ai.generativemodel.embedcontent.md @@ -6,10 +6,12 @@ Embeds the provided content. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -embedContent(request: EmbedContentRequest | string | Array): Promise; +embedContent(request: EmbedContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ embedContent(request: EmbedContentRequest | string | Array): Prom | Parameter | Type | Description | | --- | --- | --- | | request | [EmbedContentRequest](./generative-ai.embedcontentrequest.md) \| string \| Array<string \| [Part](./generative-ai.part.md)> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.generatecontent.md b/docs/reference/main/generative-ai.generativemodel.generatecontent.md index 86d0ac70..8cc74496 100644 --- a/docs/reference/main/generative-ai.generativemodel.generatecontent.md +++ b/docs/reference/main/generative-ai.generativemodel.generatecontent.md @@ -6,10 +6,12 @@ Makes a single non-streaming call to the model and returns an object containing a single [GenerateContentResponse](./generative-ai.generatecontentresponse.md). +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -generateContent(request: GenerateContentRequest | string | Array): Promise; +generateContent(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ generateContent(request: GenerateContentRequest | string | Array) | Parameter | Type | Description | | --- | --- | --- | | request | [GenerateContentRequest](./generative-ai.generatecontentrequest.md) \| string \| Array<string \| [Part](./generative-ai.part.md)> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md b/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md index 5288ad82..6cfd125c 100644 --- a/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md +++ b/docs/reference/main/generative-ai.generativemodel.generatecontentstream.md @@ -6,10 +6,12 @@ Makes a single streaming call to the model and returns an object containing an iterable stream that iterates over all chunks in the streaming response as well as a promise that returns the final aggregated response. +Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization. + **Signature:** ```typescript -generateContentStream(request: GenerateContentRequest | string | Array): Promise; +generateContentStream(request: GenerateContentRequest | string | Array, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ generateContentStream(request: GenerateContentRequest | string | Array> | | +| requestOptions | [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | _(Optional)_ | **Returns:** diff --git a/docs/reference/main/generative-ai.generativemodel.md b/docs/reference/main/generative-ai.generativemodel.md index f78e6b8a..c822694c 100644 --- a/docs/reference/main/generative-ai.generativemodel.md +++ b/docs/reference/main/generative-ai.generativemodel.md @@ -16,7 +16,7 @@ export declare class GenerativeModel | Constructor | Modifiers | Description | | --- | --- | --- | -| [(constructor)(apiKey, modelParams, requestOptions)](./generative-ai.generativemodel._constructor_.md) | | Constructs a new instance of the GenerativeModel class | +| [(constructor)(apiKey, modelParams, \_requestOptions)](./generative-ai.generativemodel._constructor_.md) | | Constructs a new instance of the GenerativeModel class | ## Properties @@ -36,10 +36,10 @@ export declare class GenerativeModel | Method | Modifiers | Description | | --- | --- | --- | -| [batchEmbedContents(batchEmbedContentRequest)](./generative-ai.generativemodel.batchembedcontents.md) | | Embeds an array of [EmbedContentRequest](./generative-ai.embedcontentrequest.md)s. | -| [countTokens(request)](./generative-ai.generativemodel.counttokens.md) | | Counts the tokens in the provided request. | -| [embedContent(request)](./generative-ai.generativemodel.embedcontent.md) | | Embeds the provided content. | -| [generateContent(request)](./generative-ai.generativemodel.generatecontent.md) | | Makes a single non-streaming call to the model and returns an object containing a single [GenerateContentResponse](./generative-ai.generatecontentresponse.md). | -| [generateContentStream(request)](./generative-ai.generativemodel.generatecontentstream.md) | | Makes a single streaming call to the model and returns an object containing an iterable stream that iterates over all chunks in the streaming response as well as a promise that returns the final aggregated response. | +| [batchEmbedContents(batchEmbedContentRequest, requestOptions)](./generative-ai.generativemodel.batchembedcontents.md) | |

Embeds an array of [EmbedContentRequest](./generative-ai.embedcontentrequest.md)s.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [countTokens(request, requestOptions)](./generative-ai.generativemodel.counttokens.md) | |

Counts the tokens in the provided request.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [embedContent(request, requestOptions)](./generative-ai.generativemodel.embedcontent.md) | |

Embeds the provided content.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [generateContent(request, requestOptions)](./generative-ai.generativemodel.generatecontent.md) | |

Makes a single non-streaming call to the model and returns an object containing a single [GenerateContentResponse](./generative-ai.generatecontentresponse.md).

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| +| [generateContentStream(request, requestOptions)](./generative-ai.generativemodel.generatecontentstream.md) | |

Makes a single streaming call to the model and returns an object containing an iterable stream that iterates over all chunks in the streaming response as well as a promise that returns the final aggregated response.

Fields set in the optional [SingleRequestOptions](./generative-ai.singlerequestoptions.md) parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the initialization.

| | [startChat(startChatParams)](./generative-ai.generativemodel.startchat.md) | | Gets a new [ChatSession](./generative-ai.chatsession.md) instance which can be used for multi-turn chats. | diff --git a/docs/reference/main/generative-ai.md b/docs/reference/main/generative-ai.md index b4a67bb9..8c02731c 100644 --- a/docs/reference/main/generative-ai.md +++ b/docs/reference/main/generative-ai.md @@ -81,6 +81,7 @@ | [SafetyRating](./generative-ai.safetyrating.md) | A safety rating associated with a [GenerateContentCandidate](./generative-ai.generatecontentcandidate.md) | | [SafetySetting](./generative-ai.safetysetting.md) | Safety setting that can be sent as part of request parameters. | | [Schema](./generative-ai.schema.md) | Schema is used to define the format of input/output data. Represents a select subset of an OpenAPI 3.0 schema object. More fields may be added in the future as needed. | +| [SingleRequestOptions](./generative-ai.singlerequestoptions.md) | Params passed to atomic asynchronous operations. | | [StartChatParams](./generative-ai.startchatparams.md) | Params for [GenerativeModel.startChat()](./generative-ai.generativemodel.startchat.md). | | [TextPart](./generative-ai.textpart.md) | Content part interface if the part represents a text string. | | [ToolConfig](./generative-ai.toolconfig.md) | Tool config. This config is shared for all tools provided in the request. | diff --git a/docs/reference/main/generative-ai.singlerequestoptions.md b/docs/reference/main/generative-ai.singlerequestoptions.md new file mode 100644 index 00000000..4d9a23d3 --- /dev/null +++ b/docs/reference/main/generative-ai.singlerequestoptions.md @@ -0,0 +1,21 @@ + + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) + +## SingleRequestOptions interface + +Params passed to atomic asynchronous operations. + +**Signature:** + +```typescript +export interface SingleRequestOptions extends RequestOptions +``` +**Extends:** [RequestOptions](./generative-ai.requestoptions.md) + +## Properties + +| Property | Modifiers | Type | Description | +| --- | --- | --- | --- | +| [signal?](./generative-ai.singlerequestoptions.signal.md) | | AbortSignal |

_(Optional)_ An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided.

NOTE: AbortSignal is a client-only operation. Using it to cancel an operation will not cancel the request in the service. You will still be charged usage for any applicable operations.

| + diff --git a/docs/reference/main/generative-ai.singlerequestoptions.signal.md b/docs/reference/main/generative-ai.singlerequestoptions.signal.md new file mode 100644 index 00000000..ac064709 --- /dev/null +++ b/docs/reference/main/generative-ai.singlerequestoptions.signal.md @@ -0,0 +1,15 @@ + + +[Home](./index.md) > [@google/generative-ai](./generative-ai.md) > [SingleRequestOptions](./generative-ai.singlerequestoptions.md) > [signal](./generative-ai.singlerequestoptions.signal.md) + +## SingleRequestOptions.signal property + +An object that may be used to abort asynchronous requests. The request may also be aborted due to the expiration of the timeout value, if provided. + +NOTE: AbortSignal is a client-only operation. Using it to cancel an operation will not cancel the request in the service. You will still be charged usage for any applicable operations. + +**Signature:** + +```typescript +signal?: AbortSignal; +``` diff --git a/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md b/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md index e5ecfdad..acaefb74 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.deletefile.md @@ -4,7 +4,7 @@ ## GoogleAIFileManager.deleteFile() method -Delete file with given ID +Delete file with given ID. **Signature:** diff --git a/docs/reference/server/generative-ai.googleaifilemanager.getfile.md b/docs/reference/server/generative-ai.googleaifilemanager.getfile.md index 613d6b6c..79f46753 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.getfile.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.getfile.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.getFile() method -Get metadata for file with given ID +Get metadata for file with given ID. + +Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -getFile(fileId: string): Promise; +getFile(fileId: string, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ getFile(fileId: string): Promise; | Parameter | Type | Description | | --- | --- | --- | | fileId | string | | +| requestOptions | SingleRequestOptions | _(Optional)_ | **Returns:** diff --git a/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md b/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md index ef116fb6..f8449bd0 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.listfiles.md @@ -4,12 +4,14 @@ ## GoogleAIFileManager.listFiles() method -List all uploaded files +List all uploaded files. + +Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization. **Signature:** ```typescript -listFiles(listParams?: ListParams): Promise; +listFiles(listParams?: ListParams, requestOptions?: SingleRequestOptions): Promise; ``` ## Parameters @@ -17,6 +19,7 @@ listFiles(listParams?: ListParams): Promise; | Parameter | Type | Description | | --- | --- | --- | | listParams | [ListParams](./generative-ai.listparams.md) | _(Optional)_ | +| requestOptions | SingleRequestOptions | _(Optional)_ | **Returns:** diff --git a/docs/reference/server/generative-ai.googleaifilemanager.md b/docs/reference/server/generative-ai.googleaifilemanager.md index 655c8d0c..04d22232 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.md @@ -28,8 +28,8 @@ export declare class GoogleAIFileManager | Method | Modifiers | Description | | --- | --- | --- | -| [deleteFile(fileId)](./generative-ai.googleaifilemanager.deletefile.md) | | Delete file with given ID | -| [getFile(fileId)](./generative-ai.googleaifilemanager.getfile.md) | | Get metadata for file with given ID | -| [listFiles(listParams)](./generative-ai.googleaifilemanager.listfiles.md) | | List all uploaded files | -| [uploadFile(filePath, fileMetadata)](./generative-ai.googleaifilemanager.uploadfile.md) | | Upload a file | +| [deleteFile(fileId)](./generative-ai.googleaifilemanager.deletefile.md) | | Delete file with given ID. | +| [getFile(fileId, requestOptions)](./generative-ai.googleaifilemanager.getfile.md) | |

Get metadata for file with given ID.

Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [listFiles(listParams, requestOptions)](./generative-ai.googleaifilemanager.listfiles.md) | |

List all uploaded files.

Any fields set in the optional parameter will take precedence over the [RequestOptions](./generative-ai.requestoptions.md) values provided at the time of the [GoogleAIFileManager](./generative-ai.googleaifilemanager.md) initialization.

| +| [uploadFile(filePath, fileMetadata)](./generative-ai.googleaifilemanager.uploadfile.md) | | Upload a file. | diff --git a/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md b/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md index 90648e90..71e4f76b 100644 --- a/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md +++ b/docs/reference/server/generative-ai.googleaifilemanager.uploadfile.md @@ -4,7 +4,7 @@ ## GoogleAIFileManager.uploadFile() method -Upload a file +Upload a file. **Signature:** diff --git a/packages/main/src/methods/chat-session.ts b/packages/main/src/methods/chat-session.ts index fd469bfe..2d1c7e6b 100644 --- a/packages/main/src/methods/chat-session.ts +++ b/packages/main/src/methods/chat-session.ts @@ -22,6 +22,7 @@ import { GenerateContentStreamResult, Part, RequestOptions, + SingleRequestOptions, StartChatParams, } from "../../types"; import { formatNewContent } from "../requests/request-helpers"; @@ -49,7 +50,7 @@ export class ChatSession { apiKey: string, public model: string, public params?: StartChatParams, - public requestOptions?: RequestOptions, + private _requestOptions: RequestOptions = {}, ) { this._apiKey = apiKey; if (params?.history) { @@ -70,10 +71,15 @@ export class ChatSession { /** * Sends a chat message and receives a non-streaming - * {@link GenerateContentResult} + * {@link GenerateContentResult}. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async sendMessage( request: string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { await this._sendPromise; const newContent = formatNewContent(request); @@ -86,6 +92,10 @@ export class ChatSession { cachedContent: this.params?.cachedContent, contents: [...this._history, newContent], }; + const chatSessionRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; let finalResult; // Add onto the chain. this._sendPromise = this._sendPromise @@ -94,7 +104,7 @@ export class ChatSession { this._apiKey, this.model, generateContentRequest, - this.requestOptions, + chatSessionRequestOptions, ), ) .then((result) => { @@ -128,9 +138,14 @@ export class ChatSession { * Sends a chat message and receives the response as a * {@link GenerateContentStreamResult} containing an iterable stream * and a response promise. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async sendMessageStream( request: string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { await this._sendPromise; const newContent = formatNewContent(request); @@ -143,11 +158,15 @@ export class ChatSession { cachedContent: this.params?.cachedContent, contents: [...this._history, newContent], }; + const chatSessionRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; const streamPromise = generateContentStream( this._apiKey, this.model, generateContentRequest, - this.requestOptions, + chatSessionRequestOptions, ); // Add onto the chain. diff --git a/packages/main/src/methods/count-tokens.ts b/packages/main/src/methods/count-tokens.ts index a8a38e93..49d5ba1c 100644 --- a/packages/main/src/methods/count-tokens.ts +++ b/packages/main/src/methods/count-tokens.ts @@ -18,7 +18,7 @@ import { CountTokensRequest, CountTokensResponse, - RequestOptions, + SingleRequestOptions, } from "../../types"; import { Task, makeModelRequest } from "../requests/request"; @@ -26,7 +26,7 @@ export async function countTokens( apiKey: string, model: string, params: CountTokensRequest, - requestOptions?: RequestOptions, + singleRequestOptions: SingleRequestOptions, ): Promise { const response = await makeModelRequest( model, @@ -34,7 +34,7 @@ export async function countTokens( apiKey, false, JSON.stringify(params), - requestOptions, + singleRequestOptions, ); return response.json(); } diff --git a/packages/main/src/methods/generate-content.ts b/packages/main/src/methods/generate-content.ts index 327cd96e..8c1e1393 100644 --- a/packages/main/src/methods/generate-content.ts +++ b/packages/main/src/methods/generate-content.ts @@ -20,7 +20,7 @@ import { GenerateContentResponse, GenerateContentResult, GenerateContentStreamResult, - RequestOptions, + SingleRequestOptions, } from "../../types"; import { Task, makeModelRequest } from "../requests/request"; import { addHelpers } from "../requests/response-helpers"; @@ -30,7 +30,7 @@ export async function generateContentStream( apiKey: string, model: string, params: GenerateContentRequest, - requestOptions?: RequestOptions, + requestOptions: SingleRequestOptions, ): Promise { const response = await makeModelRequest( model, @@ -47,7 +47,7 @@ export async function generateContent( apiKey: string, model: string, params: GenerateContentRequest, - requestOptions?: RequestOptions, + requestOptions?: SingleRequestOptions, ): Promise { const response = await makeModelRequest( model, diff --git a/packages/main/src/models/generative-model.ts b/packages/main/src/models/generative-model.ts index bbc17601..375d9ddf 100644 --- a/packages/main/src/models/generative-model.ts +++ b/packages/main/src/models/generative-model.ts @@ -36,6 +36,7 @@ import { Part, RequestOptions, SafetySetting, + SingleRequestOptions, StartChatParams, Tool, ToolConfig, @@ -67,7 +68,7 @@ export class GenerativeModel { constructor( public apiKey: string, modelParams: ModelParams, - requestOptions?: RequestOptions, + private _requestOptions: RequestOptions = {}, ) { if (modelParams.model.includes("/")) { // Models may be named "models/model-name" or "tunedModels/model-name" @@ -84,17 +85,25 @@ export class GenerativeModel { modelParams.systemInstruction, ); this.cachedContent = modelParams.cachedContent; - this.requestOptions = requestOptions || {}; } /** * Makes a single non-streaming call to the model * and returns an object containing a single {@link GenerateContentResponse}. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async generateContent( request: GenerateContentRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatGenerateContentInput(request); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return generateContent( this.apiKey, this.model, @@ -107,20 +116,29 @@ export class GenerativeModel { cachedContent: this.cachedContent?.name, ...formattedParams, }, - this.requestOptions, + generativeModelRequestOptions, ); } /** - * Makes a single streaming call to the model - * and returns an object containing an iterable stream that iterates - * over all chunks in the streaming response as well as - * a promise that returns the final aggregated response. + * Makes a single streaming call to the model and returns an object + * containing an iterable stream that iterates over all chunks in the + * streaming response as well as a promise that returns the final + * aggregated response. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async generateContentStream( request: GenerateContentRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatGenerateContentInput(request); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return generateContentStream( this.apiKey, this.model, @@ -133,7 +151,7 @@ export class GenerativeModel { cachedContent: this.cachedContent?.name, ...formattedParams, }, - this.requestOptions, + generativeModelRequestOptions, ); } @@ -160,9 +178,14 @@ export class GenerativeModel { /** * Counts the tokens in the provided request. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async countTokens( request: CountTokensRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatCountTokensInput(request, { model: this.model, @@ -173,40 +196,62 @@ export class GenerativeModel { systemInstruction: this.systemInstruction, cachedContent: this.cachedContent, }); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return countTokens( this.apiKey, this.model, formattedParams, - this.requestOptions, + generativeModelRequestOptions, ); } /** * Embeds the provided content. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async embedContent( request: EmbedContentRequest | string | Array, + requestOptions: SingleRequestOptions = {}, ): Promise { const formattedParams = formatEmbedContentInput(request); + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return embedContent( this.apiKey, this.model, formattedParams, - this.requestOptions, + generativeModelRequestOptions, ); } /** * Embeds an array of {@link EmbedContentRequest}s. + * + * Fields set in the optional {@link SingleRequestOptions} parameter will + * take precedence over the {@link RequestOptions} values provided at the + * time of the {@link GoogleAIFileManager} initialization. */ async batchEmbedContents( batchEmbedContentRequest: BatchEmbedContentsRequest, + requestOptions: SingleRequestOptions = {}, ): Promise { + const generativeModelRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; return batchEmbedContents( this.apiKey, this.model, batchEmbedContentRequest, - this.requestOptions, + generativeModelRequestOptions, ); } } diff --git a/packages/main/src/requests/request.ts b/packages/main/src/requests/request.ts index 9249aba0..0828b65a 100644 --- a/packages/main/src/requests/request.ts +++ b/packages/main/src/requests/request.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { RequestOptions } from "../../types"; +import { RequestOptions, SingleRequestOptions } from "../../types"; import { GoogleGenerativeAIError, GoogleGenerativeAIFetchError, @@ -116,7 +116,7 @@ export async function constructModelRequest( apiKey: string, stream: boolean, body: string, - requestOptions?: RequestOptions, + requestOptions: SingleRequestOptions, ): Promise<{ url: string; fetchOptions: RequestInit }> { const url = new RequestUrl(model, task, apiKey, stream, requestOptions); return { @@ -136,7 +136,7 @@ export async function makeModelRequest( apiKey: string, stream: boolean, body: string, - requestOptions?: RequestOptions, + requestOptions: SingleRequestOptions = {}, // Allows this to be stubbed for tests fetchFn = fetch, ): Promise { @@ -217,13 +217,19 @@ async function handleResponseNotOk( * @param requestOptions - The user-defined request options. * @returns The generated request options. */ -function buildFetchOptions(requestOptions?: RequestOptions): RequestInit { +function buildFetchOptions(requestOptions?: SingleRequestOptions): RequestInit { const fetchOptions = {} as RequestInit; - if (requestOptions?.timeout >= 0) { - const abortController = new AbortController(); - const signal = abortController.signal; - setTimeout(() => abortController.abort(), requestOptions.timeout); - fetchOptions.signal = signal; + if (requestOptions?.signal !== undefined || requestOptions?.timeout >= 0) { + const controller = new AbortController(); + if (requestOptions?.timeout >= 0) { + setTimeout(() => controller.abort(), requestOptions.timeout); + } + if (requestOptions?.signal) { + requestOptions.signal.addEventListener("abort", () => { + controller.abort(); + }); + } + fetchOptions.signal = controller.signal; } return fetchOptions; } diff --git a/packages/main/src/server/file-manager.ts b/packages/main/src/server/file-manager.ts index e839b8f7..c34abb2b 100644 --- a/packages/main/src/server/file-manager.ts +++ b/packages/main/src/server/file-manager.ts @@ -15,7 +15,7 @@ * limitations under the License. */ -import { RequestOptions } from "../../types"; +import { RequestOptions, SingleRequestOptions } from "../../types"; import { readFileSync } from "fs"; import { FilesRequestUrl, getHeaders, makeServerRequest } from "./request"; import { @@ -44,11 +44,11 @@ export interface UploadMetadata { export class GoogleAIFileManager { constructor( public apiKey: string, - private _requestOptions?: RequestOptions, + private _requestOptions: RequestOptions = {}, ) {} /** - * Upload a file + * Upload a file. */ async uploadFile( filePath: string, @@ -93,13 +93,24 @@ export class GoogleAIFileManager { } /** - * List all uploaded files + * List all uploaded files. + * + * Any fields set in the optional {@link SingleRequestOptions} parameter will take + * precedence over the {@link RequestOptions} values provided at the time of the + * {@link GoogleAIFileManager} initialization. */ - async listFiles(listParams?: ListParams): Promise { + async listFiles( + listParams?: ListParams, + requestOptions: SingleRequestOptions = {}, + ): Promise { + const filesRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; const url = new FilesRequestUrl( RpcTask.LIST, this.apiKey, - this._requestOptions, + filesRequestOptions, ); if (listParams?.pageSize) { url.appendParam("pageSize", listParams.pageSize.toString()); @@ -113,13 +124,24 @@ export class GoogleAIFileManager { } /** - * Get metadata for file with given ID + * Get metadata for file with given ID. + * + * Any fields set in the optional {@link SingleRequestOptions} parameter will take + * precedence over the {@link RequestOptions} values provided at the time of the + * {@link GoogleAIFileManager} initialization. */ - async getFile(fileId: string): Promise { + async getFile( + fileId: string, + requestOptions: SingleRequestOptions = {}, + ): Promise { + const filesRequestOptions: SingleRequestOptions = { + ...this._requestOptions, + ...requestOptions, + }; const url = new FilesRequestUrl( RpcTask.GET, this.apiKey, - this._requestOptions, + filesRequestOptions, ); url.appendPath(parseFileId(fileId)); const uploadHeaders = getHeaders(url); @@ -128,7 +150,7 @@ export class GoogleAIFileManager { } /** - * Delete file with given ID + * Delete file with given ID. */ async deleteFile(fileId: string): Promise { const url = new FilesRequestUrl( diff --git a/packages/main/src/server/request.ts b/packages/main/src/server/request.ts index 1c8a2339..464c1d2e 100644 --- a/packages/main/src/server/request.ts +++ b/packages/main/src/server/request.ts @@ -21,7 +21,7 @@ import { getClientHeaders, makeRequest, } from "../requests/request"; -import { RequestOptions } from "../../types"; +import { RequestOptions, SingleRequestOptions } from "../../types"; import { RpcTask } from "./constants"; const taskToMethod = { @@ -38,7 +38,7 @@ export class ServerRequestUrl { constructor( public task: RpcTask, public apiKey: string, - public requestOptions?: RequestOptions, + public requestOptions?: SingleRequestOptions, ) {} appendPath(path: string): void { @@ -118,13 +118,20 @@ export async function makeServerRequest( } /** - * Get AbortSignal if timeout is specified + * Create an AbortSignal based on the timeout and signal in the + * RequestOptions. */ -function getSignal(requestOptions?: RequestOptions): AbortSignal | null { - if (requestOptions?.timeout >= 0) { - const abortController = new AbortController(); - const signal = abortController.signal; - setTimeout(() => abortController.abort(), requestOptions.timeout); - return signal; +function getSignal(requestOptions?: SingleRequestOptions): AbortSignal | null { + if (requestOptions?.signal !== undefined || requestOptions?.timeout >= 0) { + const controller = new AbortController(); + if (requestOptions?.timeout >= 0) { + setTimeout(() => controller.abort(), requestOptions.timeout); + } + if (requestOptions.signal) { + requestOptions.signal.addEventListener("abort", () => { + controller.abort(); + }); + } + return controller.signal; } } diff --git a/packages/main/test-integration/node/abort-signal.test.ts b/packages/main/test-integration/node/abort-signal.test.ts new file mode 100644 index 00000000..d540b2df --- /dev/null +++ b/packages/main/test-integration/node/abort-signal.test.ts @@ -0,0 +1,250 @@ +/** + * @license + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { expect, use } from "chai"; +import * as chaiAsPromised from "chai-as-promised"; +import { + GoogleGenerativeAI, + RequestOptions, + SingleRequestOptions, +} from "../.."; +import { GoogleAIFileManager } from "../../dist/server"; + +use(chaiAsPromised); + +/** + * Integration tests against live backend. + */ +describe("signal", function () { + this.timeout(60e3); + this.slow(10e3); + /* GoogleAIFileManager */ + it("GoogleAIFileManager getFile() SingleRequestOption.timeout", async () => { + // Ensure SingleRequestOptions.timeout takes precendence over the value of + // RequestOptions.timeout configured at construction. Also, a control test + // to ensure that timeout still works without an AbortSignal present. + const requestOptions: RequestOptions = { timeout: 9000 }; + const fileManager = new GoogleAIFileManager( + process.env.GEMINI_API_KEY, + requestOptions, + ); + // Ensure the file isn't hosted on the service. + try { + await fileManager.deleteFile("files/signal"); + } catch (error) {} + const singleRequestOptions: SingleRequestOptions = { timeout: 1 }; + // Use getFile, which should fail with a fetch error since the file + // doesn't exist. This should let us discern if the error was + // a timeout abort, or the fetch failure in our expect() below. + const promise = fileManager.getFile("signal.jpg", singleRequestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager getFile() aborted", async () => { + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = fileManager.getFile("signal.jpg", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager getFile() timeout before signal aborts", async () => { + // Ensure the manually configured timeout works in conjunction with the + // AbortSignal timeout. + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + // Ensure the file isn't hosted on the service. + try { + await fileManager.deleteFile("files/signal"); + } catch (error) {} + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = fileManager.getFile("signal.jpg", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager listFiles() aborted", async () => { + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = fileManager.listFiles(/* listParams= */ {}, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GoogleAIFileManager listFiles() timeout before signal aborts", async () => { + const fileManager = new GoogleAIFileManager(process.env.GEMINI_API_KEY); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = fileManager.listFiles(/* listParams= */ {}, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + + /* GenerativeModel */ + it("GenerativeModel generateContent() SingleRequestOption.timeout", async () => { + // Ensure SingleRequestOptions.timeout takes precendence over the value of + // RequestOptions.timeout configured at construction. Also, a control test + // to ensure that timeout still works without an AbortSignal present. + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const requestOptions: RequestOptions = { + timeout: 9000, // This is much longer than a generateContent request. + }; + const model = genAI.getGenerativeModel( + { + model: "gemini-1.5-flash-latest", + }, + requestOptions, + ); + const singleRequestOptions: SingleRequestOptions = { timeout: 1 }; + const promise = model.generateContent( + "This is not an image", + singleRequestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel generateContent() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = model.generateContent( + "This is not an image", + requestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel generateContent() timeout before signal aborts", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = model.generateContent( + "This is not an image", + requestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel countTokens() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = model.countTokens("This is not an image", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel embedContent() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = model.embedContent("This is not an image", requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("GenerativeModel batchEmbedContent() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const content1 = { + content: { role: "user", parts: [{ text: "embed me" }] }, + }; + const content2 = { + content: { role: "user", parts: [{ text: "embed me" }] }, + }; + const promise = model.batchEmbedContents( + { + requests: [content1, content2], + }, + requestOptions, + ); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + + /* ChatSessionManager */ + it("ChatSessionManager sendMessage() SingleRequestOption.timeout", async () => { + // Ensure SingleRequestOptions.timeout takes precendence over the value of + // RequestOptions.timeout configured at construction. Also, a control test + // to ensure that timeout still works without an AbortSignal present. + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const requestOptions: RequestOptions = { + timeout: 9000, // This is much longer than a generateContent request. + }; + const model = genAI.getGenerativeModel( + { + model: "gemini-1.5-flash-latest", + }, + requestOptions, + ); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const singleRequestOptions: SingleRequestOptions = { timeout: 1 }; + const promise = chat.sendMessage(question1, singleRequestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessage() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = chat.sendMessage(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessage() timeout before signal aborts", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = chat.sendMessage(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessageStream() aborted", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(1); + const requestOptions: SingleRequestOptions = { signal }; + const promise = chat.sendMessageStream(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); + it("ChatSession sendMessageStream() timeout before signal aborts", async () => { + const genAI = new GoogleGenerativeAI(process.env.GEMINI_API_KEY || ""); + const model = genAI.getGenerativeModel({ + model: "gemini-1.5-flash-latest", + }); + const question1 = "What is the capital of Oregon?"; + const chat = model.startChat(); + const signal = AbortSignal.timeout(9000); + const requestOptions: SingleRequestOptions = { timeout: 1, signal }; + const promise = chat.sendMessageStream(question1, requestOptions); + await expect(promise).to.be.rejectedWith("This operation was aborted"); + }); +}); diff --git a/packages/main/types/requests.ts b/packages/main/types/requests.ts index 9fefde23..4eec1e9c 100644 --- a/packages/main/types/requests.ts +++ b/packages/main/types/requests.ts @@ -189,6 +189,22 @@ export interface RequestOptions { customHeaders?: Headers | Record; } +/** + * Params passed to atomic asynchronous operations. + * @public + */ +export interface SingleRequestOptions extends RequestOptions { + /** + * An object that may be used to abort asynchronous requests. The request may + * also be aborted due to the expiration of the timeout value, if provided. + * + * NOTE: AbortSignal is a client-only operation. Using it to cancel an + * operation will not cancel the request in the service. You will still + * be charged usage for any applicable operations. + */ + signal?: AbortSignal; +} + /** * Defines a tool that model can call to access external knowledge. * @public