diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts index 5036d4f60..7d88b27cb 100644 --- a/src/lib/ChatCompletionStream.ts +++ b/src/lib/ChatCompletionStream.ts @@ -743,7 +743,7 @@ export interface ChatCompletionSnapshot { * Can be used in conjunction with the `seed` request parameter to understand when * backend changes have been made that might impact determinism. */ - system_fingerprint?: string; + system_fingerprint?: string | null; } export namespace ChatCompletionSnapshot { diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 82bb2e27c..e8480dd4c 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -280,9 +280,9 @@ export interface ChatCompletion { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - If set to 'flex', the request will be processed with the Flex Processing * service tier. * [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -299,7 +299,7 @@ export interface ChatCompletion { * Can be used in conjunction with the `seed` request parameter to understand when * backend changes have been made that might impact determinism. */ - system_fingerprint?: string; + system_fingerprint?: string | null; /** * Usage statistics for the completion request. @@ -531,9 +531,9 @@ export interface ChatCompletionChunk { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - If set to 'flex', the request will be processed with the Flex Processing * service tier. * [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -549,7 +549,7 @@ export interface ChatCompletionChunk { * Can be used in conjunction with the `seed` request parameter to understand when * backend changes have been made that might impact determinism. */ - system_fingerprint?: string; + system_fingerprint?: string | null; /** * An optional field that will only be present when you set @@ -1453,9 +1453,9 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - If set to 'flex', the request will be processed with the Flex Processing * service tier. * [Learn more](https://platform.openai.com/docs/guides/flex-processing). diff --git a/src/resources/completions.ts b/src/resources/completions.ts index 042e51693..8a0862ad0 100644 --- a/src/resources/completions.ts +++ b/src/resources/completions.ts @@ -71,7 +71,8 @@ export interface Completion { * Can be used in conjunction with the `seed` request parameter to understand when * backend changes have been made that might impact determinism. */ - system_fingerprint?: string; + + system_fingerprint?: string | null; /** * Usage statistics for the completion request. diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts index e5400dcea..bde199252 100644 --- a/src/resources/responses/responses.ts +++ b/src/resources/responses/responses.ts @@ -488,9 +488,9 @@ export interface Response { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - If set to 'flex', the request will be processed with the Flex Processing * service tier. * [Learn more](https://platform.openai.com/docs/guides/flex-processing). @@ -4631,9 +4631,9 @@ export interface ResponseCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarentee. + * latency guarantee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarentee. + * tier with a lower uptime SLA and no latency guarantee. * - If set to 'flex', the request will be processed with the Flex Processing * service tier. * [Learn more](https://platform.openai.com/docs/guides/flex-processing).