Skip to content

Commit

Permalink
feat(api): add 'gpt-3.5-turbo-instruct', fine-tune error objects, upd…
Browse files Browse the repository at this point in the history
…ate documentation (#329)
  • Loading branch information
stainless-bot authored Sep 21, 2023
1 parent fd72de1 commit e5f3852
Show file tree
Hide file tree
Showing 6 changed files with 74 additions and 34 deletions.
22 changes: 12 additions & 10 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,11 @@ export namespace ChatCompletion {
/**
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, or `function_call` if the
* model called a function.
* number of tokens specified in the request was reached, `content_filter` if
* content was omitted due to a flag from our content filters, or `function_call`
* if the model called a function.
*/
finish_reason: 'stop' | 'length' | 'function_call';
finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter';

/**
* The index of the choice in the list of choices.
Expand Down Expand Up @@ -134,8 +135,9 @@ export namespace ChatCompletionChunk {
/**
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, or `function_call` if the
* model called a function.
* number of tokens specified in the request was reached, `content_filter` if
* content was omitted due to a flag from our content filters, or `function_call`
* if the model called a function.
*/
finish_reason: 'stop' | 'length' | 'function_call' | null;

Expand Down Expand Up @@ -331,11 +333,11 @@ export interface ChatCompletionCreateParamsBase {
frequency_penalty?: number | null;

/**
* Controls how the model responds to function calls. "none" means the model does
* not call a function, and responds to the end-user. "auto" means the model can
* Controls how the model responds to function calls. `none` means the model does
* not call a function, and responds to the end-user. `auto` means the model can
* pick between an end-user or calling a function. Specifying a particular function
* via `{"name": "my_function"}` forces the model to call that function. "none" is
* the default when no functions are present. "auto" is the default if functions
* via `{"name": "my_function"}` forces the model to call that function. `none` is
* the default when no functions are present. `auto` is the default if functions
* are present.
*/
function_call?: 'none' | 'auto' | ChatCompletionCreateParams.FunctionCallOption;
Expand Down Expand Up @@ -365,7 +367,7 @@ export interface ChatCompletionCreateParamsBase {
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
* for counting tokens.
*/
max_tokens?: number;
max_tokens?: number | null;

/**
* How many chat completion choices to generate for each input message.
Expand Down
8 changes: 5 additions & 3 deletions src/resources/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,11 @@ export interface Completion {
export interface CompletionChoice {
/**
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, or `length` if the maximum
* number of tokens specified in the request was reached.
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, or `content_filter` if
* content was omitted due to a flag from our content filters.
*/
finish_reason: 'stop' | 'length';
finish_reason: 'stop' | 'length' | 'content_filter';

index: number;

Expand Down Expand Up @@ -126,6 +127,7 @@ export interface CompletionCreateParamsBase {
| (string & {})
| 'babbage-002'
| 'davinci-002'
| 'gpt-3.5-turbo-instruct'
| 'text-davinci-003'
| 'text-davinci-002'
| 'text-davinci-001'
Expand Down
5 changes: 3 additions & 2 deletions src/resources/edits.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,9 @@ export namespace Edit {
export interface Choice {
/**
* The reason the model stopped generating tokens. This will be `stop` if the model
* hit a natural stop point or a provided stop sequence, or `length` if the maximum
* number of tokens specified in the request was reached.
* hit a natural stop point or a provided stop sequence, `length` if the maximum
* number of tokens specified in the request was reached, or `content_filter` if
* content was omitted due to a flag from our content filters.
*/
finish_reason: 'stop' | 'length';

Expand Down
2 changes: 1 addition & 1 deletion src/resources/embeddings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ export interface EmbeddingCreateParams {
* Input text to embed, encoded as a string or array of tokens. To embed multiple
* inputs in a single request, pass an array of strings or array of token arrays.
* Each input must not exceed the max input tokens for the model (8191 tokens for
* `text-embedding-ada-002`).
* `text-embedding-ada-002`) and cannot be an empty string.
* [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
* for counting tokens.
*/
Expand Down
2 changes: 1 addition & 1 deletion src/resources/files.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ export class Files extends APIResource {
}

/**
* Returns the contents of the specified file
* Returns the contents of the specified file.
*/
retrieveContent(fileId: string, options?: Core.RequestOptions): Core.APIPromise<string> {
return this.get(`/files/${fileId}/content`, {
Expand Down
69 changes: 52 additions & 17 deletions src/resources/fine-tuning/jobs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import * as Core from 'openai/core';
import { APIResource } from 'openai/resource';
import { isRequestOptions } from 'openai/core';
import * as Files from 'openai/resources/files';
import * as API from './index';
import { CursorPage, CursorPageParams } from 'openai/pagination';

Expand Down Expand Up @@ -105,10 +104,23 @@ export interface FineTuningJob {
created_at: number;

/**
* The name of the fine-tuned model that is being created.
* For fine-tuning jobs that have `failed`, this will contain more information on
* the cause of the failure.
*/
error: FineTuningJob.Error | null;

/**
* The name of the fine-tuned model that is being created. The value will be null
* if the fine-tuning job is still running.
*/
fine_tuned_model: string | null;

/**
* The Unix timestamp (in seconds) for when the fine-tuning job was finished. The
* value will be null if the fine-tuning job is still running.
*/
finished_at: number | null;

/**
* The hyperparameters used for the fine-tuning job. See the
* [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for
Expand All @@ -132,38 +144,61 @@ export interface FineTuningJob {
organization_id: string;

/**
* The compiled results files for the fine-tuning job.
* The compiled results file ID(s) for the fine-tuning job. You can retrieve the
* results with the
* [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
*/
result_files: Array<Files.FileObject>;
result_files: Array<string>;

/**
* The current status of the fine-tuning job, which can be either `created`,
* `pending`, `running`, `succeeded`, `failed`, or `cancelled`.
* The current status of the fine-tuning job, which can be either
* `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
*/
status: string;

/**
* The total number of billable tokens processed by this fine tuning job.
* The total number of billable tokens processed by this fine-tuning job. The value
* will be null if the fine-tuning job is still running.
*/
trained_tokens: number;
trained_tokens: number | null;

/**
* The file ID used for training.
* The file ID used for training. You can retrieve the training data with the
* [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
*/
training_file: string;

/**
* The file ID used for validation.
* The file ID used for validation. You can retrieve the validation results with
* the
* [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
*/
validation_file: string | null;
}

export namespace FineTuningJob {
/**
* The Unix timestamp (in seconds) for when the fine-tuning job was finished.
* For fine-tuning jobs that have `failed`, this will contain more information on
* the cause of the failure.
*/
finished_at?: number;
}
export interface Error {
/**
* A machine-readable error code.
*/
code: string;

/**
* A human-readable error message.
*/
message: string;

/**
* The parameter that was invalid, usually `training_file` or `validation_file`.
* This field will be null if the failure was not parameter-specific.
*/
param: string | null;
}

export namespace FineTuningJob {
/**
* The hyperparameters used for the fine-tuning job. See the
* [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for
Expand All @@ -172,11 +207,11 @@ export namespace FineTuningJob {
export interface Hyperparameters {
/**
* The number of epochs to train the model for. An epoch refers to one full cycle
* through the training dataset. "Auto" decides the optimal number of epochs based
* through the training dataset. "auto" decides the optimal number of epochs based
* on the size of the dataset. If setting the number manually, we support any
* number between 1 and 50 epochs.
*/
n_epochs?: 'auto' | number;
n_epochs: 'auto' | number;
}
}

Expand Down Expand Up @@ -219,7 +254,7 @@ export interface JobCreateParams {
hyperparameters?: JobCreateParams.Hyperparameters;

/**
* A string of up to 40 characters that will be added to your fine-tuned model
* A string of up to 18 characters that will be added to your fine-tuned model
* name.
*
* For example, a `suffix` of "custom-model-name" would produce a model name like
Expand Down

0 comments on commit e5f3852

Please sign in to comment.