Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add GPT-4-Turbo/Vision + Updated GPT-3.5-Turbo models #406

Merged
merged 10 commits into from
Nov 10, 2023
27 changes: 27 additions & 0 deletions OpenAI.SDK/ObjectModels/Models.cs
Original file line number Diff line number Diff line change
Expand Up @@ -79,13 +79,16 @@ public enum Model
Gpt_3_5_Turbo_16k,
Gpt_3_5_Turbo_16k_0613,
Gpt_3_5_Turbo_0613,
Gpt_3_5_Turbo_1106,

Gpt_4,
Gpt_4_0314,
Gpt_4_0613,
Gpt_4_32k,
Gpt_4_32k_0314,
Gpt_4_32k_0613,
Gpt_4_1106_preview,
Gpt_4_vision_preview,

WhisperV1,

Expand Down Expand Up @@ -152,6 +155,21 @@ public enum Subject
/// </summary>
public static string Gpt_4_32k_0613 => "gpt-4-32k-0613";

/// <summary>
/// The latest GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more.
/// Returns a maximum of 4,096 output tokens. This preview model is not yet suited for production traffic.
/// 128,000 tokens Up to Apr 2023
/// </summary>
public static string Gpt_4_1106_preview => "gpt-4-1106-preview";

/// <summary>
/// Ability to understand images, in addition to all other GPT-4 Turbo capabilties.
/// Returns a maximum of 4,096 output tokens. This is a preview model version and not suited yet for production traffic.
/// 128,000 tokens Up to Apr 2023
/// </summary>
public static string Gpt_4_vision_preview => "gpt-4-vision-preview";



public static string Ada => "ada";
public static string Babbage => "babbage";
Expand Down Expand Up @@ -240,6 +258,12 @@ public enum Subject
/// 4,096 tokens Up to Sep 2021
/// </summary>
public static string Gpt_3_5_Turbo_0613 => "gpt-3.5-turbo-0613";

/// <summary>
/// The latest GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more.
/// 16,384 tokens Up to Sep 2021
/// </summary>
public static string Gpt_3_5_Turbo_1106 => "gpt-3.5-turbo-1106";

/// <summary>
/// Snapshot of gpt-3.5-turbo from June 13th 2023 with function calling data. Unlike gpt-3.5-turbo,
Expand Down Expand Up @@ -328,6 +352,7 @@ public static string EnumToString(this Model model)
Model.ChatGpt3_5Turbo0301 => ChatGpt3_5Turbo0301,
Model.Gpt_3_5_Turbo_0301 => Gpt_3_5_Turbo_0301,
Model.Gpt_3_5_Turbo_0613 => Gpt_3_5_Turbo_0613,
Model.Gpt_3_5_Turbo_1106 => Gpt_3_5_Turbo_1106,
Model.Gpt_3_5_Turbo_16k_0613 => Gpt_3_5_Turbo_16k_0613,
Model.Gpt_3_5_Turbo_16k => Gpt_3_5_Turbo_16k,
Model.WhisperV1 => WhisperV1,
Expand All @@ -340,6 +365,8 @@ public static string EnumToString(this Model model)
Model.Gpt_4_32k_0613 => Gpt_4_32k_0613,
Model.Dall_e_2 => Dall_e_2,
Model.Dall_e_3 => Dall_e_3,
Model.Gpt_4_1106_preview => Gpt_4_1106_preview,
Model.Gpt_4_vision_preview => Gpt_4_vision_preview,
_ => throw new ArgumentOutOfRangeException(nameof(model), model, null)
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,12 @@ namespace OpenAI.ObjectModels.RequestModels;

public class ChatCompletionCreateRequest : IModelValidate, IOpenAiModels.ITemperature, IOpenAiModels.IModel, IOpenAiModels.IUser
{
public enum ResponseFormats
{
Text,
Json
}

/// <summary>
/// The messages to generate chat completions for, in the chat format.
/// The main input is the messages parameter. Messages must be an array of message objects, where each object has a
Expand Down Expand Up @@ -86,7 +92,7 @@ public IList<string>? StopCalculated

if (Stop != null)
{
return new List<string> {Stop};
return new List<string> { Stop };
}

return StopAsList;
Expand Down Expand Up @@ -146,6 +152,68 @@ public IList<string>? StopCalculated
[JsonPropertyName("function_call")]
public object? FunctionCall { get; set; }

/// <summary>
/// The format that the model must output. Used to enable JSON mode.
/// Must be one of "text" or "json_object".<br />
/// <see cref="StaticValues.CompletionStatics.ResponseFormat" /><br />
/// <example>
/// Sample Usage:<br />
/// new ResponseFormat { Type = StaticValues.CompletionStatics.ResponseFormat.Json }
/// </example>
/// </summary>
[JsonPropertyName("response_format")]
public ResponseFormat? ResponseFormat { get; set; }

/// <summary>
/// The format that the model must output. Used to enable JSON mode.
/// Must be one of "text" or "json_object".
/// </summary>
/// <example>
/// This example shows how to set the ChatResponseFormat to JSON:
/// <code>
/// var chatResponse = new ChatResponse
/// {
/// ChatResponseFormat = ChatResponseFormats.Json
/// };
/// </code>
/// </example>
/// <exception cref="ArgumentOutOfRangeException">
/// Thrown when an unsupported <see cref="ResponseFormats" /> value is provided.
/// </exception>
/// <exception cref="ValidationException">
/// Thrown when <see cref="ResponseFormat" /> is already set.
/// </exception>
[JsonIgnore]
public ResponseFormats? ChatResponseFormat
{
set
{
if (value == null) return;
if (ResponseFormat?.Type != null)
{
throw new ValidationException("ResponseFormat and ChatResponseFormat can not be assigned at the same time. One of them is should be null.");
}

ResponseFormat = new ResponseFormat
{
Type = value switch
{
ResponseFormats.Json => StaticValues.CompletionStatics.ResponseFormat.Json,
ResponseFormats.Text => StaticValues.CompletionStatics.ResponseFormat.Text,
_ => throw new ArgumentOutOfRangeException(nameof(value), value, null)
}
};
}
}

/// <summary>
/// This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that
/// repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed,
/// and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
/// </summary>
[JsonPropertyName("seed")]
public int? Seed { get; set; }

/// <summary>
/// ID of the model to use. For models supported see <see cref="OpenAI.ObjectModels.Models" /> start with <c>Gpt_</c>
/// </summary>
Expand Down
20 changes: 20 additions & 0 deletions OpenAI.SDK/ObjectModels/RequestModels/ResponseFormat.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
using System.Text.Json.Serialization;

namespace OpenAI.ObjectModels.RequestModels;

/// <summary>
/// An object specifying the format that the model must output.
/// Used to enable JSON mode.
/// </summary>
public class ResponseFormat
{
/// <summary>
/// Setting to json_object enables JSON mode.
/// This guarantees that the message the model generates is valid JSON.
/// Note that the message content may be partial if finish_reason="length",
/// which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
/// </summary>

[JsonPropertyName("type")]
public string? Type { get; set; }
}
8 changes: 8 additions & 0 deletions OpenAI.SDK/ObjectModels/StaticValueHelper.cs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,14 @@

public class StaticValues
{
public static class CompletionStatics
{
public static class ResponseFormat
{
public static string Json => "json_object";
public static string Text => "text";
}
}
public static class ImageStatics
{
public static class Size
Expand Down