diff --git a/README.md b/README.md
index f02f0f3..bc5a4c7 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,7 @@ builder.Services.AddChatGpt(options =>
options.DefaultParameters = new ChatGptParameters
{
MaxTokens = 800,
+ //MaxCompletionTokens = 800, // o1 series models support this property instead of MaxTokens
Temperature = 0.7
};
});
@@ -85,6 +86,8 @@ Currently available models are:
- gpt-4-turbo
- gpt-4o
- gpt-4o-mini
+- o1-preview
+- o1-mini
They have fixed names, available in the [OpenAIChatGptModels.cs file](https://github.com/marcominerva/ChatGptNet/blob/master/src/ChatGptNet/Models/OpenAIChatGptModels.cs).
@@ -163,6 +166,7 @@ The configuration can be automatically read from [IConfiguration](https://learn.
// "Temperature": 0.8,
// "TopP": 1,
// "MaxTokens": 500,
+ // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens
// "PresencePenalty": 0,
// "FrequencyPenalty": 0,
// "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object
diff --git a/samples/ChatGptApi/ChatGptApi.csproj b/samples/ChatGptApi/ChatGptApi.csproj
index 982e462..22e23df 100644
--- a/samples/ChatGptApi/ChatGptApi.csproj
+++ b/samples/ChatGptApi/ChatGptApi.csproj
@@ -9,8 +9,8 @@
-
-
+
+
diff --git a/samples/ChatGptApi/appsettings.json b/samples/ChatGptApi/appsettings.json
index 855cd8e..7446548 100644
--- a/samples/ChatGptApi/appsettings.json
+++ b/samples/ChatGptApi/appsettings.json
@@ -17,6 +17,7 @@
// "Temperature": 0.8,
// "TopP": 1,
// "MaxTokens": 500,
+ // "MaxCompletionTokens": null, // o1 series models supports this property instead of MaxTokens
// "PresencePenalty": 0,
// "FrequencyPenalty": 0,
// "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object
diff --git a/samples/ChatGptBlazor.Wasm/ChatGptBlazor.Wasm.csproj b/samples/ChatGptBlazor.Wasm/ChatGptBlazor.Wasm.csproj
index c062d86..c99c52a 100644
--- a/samples/ChatGptBlazor.Wasm/ChatGptBlazor.Wasm.csproj
+++ b/samples/ChatGptBlazor.Wasm/ChatGptBlazor.Wasm.csproj
@@ -8,7 +8,7 @@
-
+
diff --git a/samples/ChatGptConsole/appsettings.json b/samples/ChatGptConsole/appsettings.json
index e7a2f19..bf70f22 100644
--- a/samples/ChatGptConsole/appsettings.json
+++ b/samples/ChatGptConsole/appsettings.json
@@ -17,6 +17,7 @@
// "Temperature": 0.8,
// "TopP": 1,
// "MaxTokens": 500,
+ // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens
// "PresencePenalty": 0,
// "FrequencyPenalty": 0,
// "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object
diff --git a/samples/ChatGptFunctionCallingConsole/appsettings.json b/samples/ChatGptFunctionCallingConsole/appsettings.json
index e7a2f19..bf70f22 100644
--- a/samples/ChatGptFunctionCallingConsole/appsettings.json
+++ b/samples/ChatGptFunctionCallingConsole/appsettings.json
@@ -17,6 +17,7 @@
// "Temperature": 0.8,
// "TopP": 1,
// "MaxTokens": 500,
+ // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens
// "PresencePenalty": 0,
// "FrequencyPenalty": 0,
// "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object
diff --git a/samples/ChatGptStreamConsole/appsettings.json b/samples/ChatGptStreamConsole/appsettings.json
index c58b74d..7988164 100644
--- a/samples/ChatGptStreamConsole/appsettings.json
+++ b/samples/ChatGptStreamConsole/appsettings.json
@@ -17,6 +17,7 @@
// "Temperature": 0.8,
// "TopP": 1,
// "MaxTokens": 500,
+ // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens
// "PresencePenalty": 0,
// "FrequencyPenalty": 0,
// "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object
diff --git a/src/ChatGptNet/ChatGptClient.cs b/src/ChatGptNet/ChatGptClient.cs
index c9f73e6..c99ef5e 100644
--- a/src/ChatGptNet/ChatGptClient.cs
+++ b/src/ChatGptNet/ChatGptClient.cs
@@ -332,7 +332,8 @@ private async Task> CreateMessageListAsync(Guid conversati
}
private ChatGptRequest CreateChatGptRequest(IEnumerable messages, ChatGptToolParameters? toolParameters, bool stream, ChatGptParameters? parameters, string? model)
- => new()
+ {
+ var request = new ChatGptRequest()
{
Model = model ?? options.DefaultModel,
Messages = messages,
@@ -362,6 +363,7 @@ private ChatGptRequest CreateChatGptRequest(IEnumerable messages
Temperature = parameters?.Temperature ?? options.DefaultParameters.Temperature,
TopP = parameters?.TopP ?? options.DefaultParameters.TopP,
MaxTokens = parameters?.MaxTokens ?? options.DefaultParameters.MaxTokens,
+ MaxCompletionTokens = parameters?.MaxCompletionTokens ?? options.DefaultParameters.MaxCompletionTokens,
PresencePenalty = parameters?.PresencePenalty ?? options.DefaultParameters.PresencePenalty,
FrequencyPenalty = parameters?.FrequencyPenalty ?? options.DefaultParameters.FrequencyPenalty,
ResponseFormat = parameters?.ResponseFormat ?? options.DefaultParameters.ResponseFormat,
@@ -370,6 +372,19 @@ private ChatGptRequest CreateChatGptRequest(IEnumerable messages
User = options.User
};
+ /*
+ * As of 2024-09-01-preview, Azure OpenAI conditionally supports the use of the new max_completion_tokens property:
+ * - The o1-mini and o1-preview models accept max_completion_tokens and reject max_tokens
+ * - All other models reject max_completion_tokens and accept max_tokens
+ */
+ if (request.MaxCompletionTokens is not null)
+ {
+ request.MaxTokens = null;
+ }
+
+ return request;
+ }
+
private EmbeddingRequest CreateEmbeddingRequest(IEnumerable messages, EmbeddingParameters? parameters, string? model)
=> new()
{
diff --git a/src/ChatGptNet/Models/ChatGptParameters.cs b/src/ChatGptNet/Models/ChatGptParameters.cs
index 6d831e1..2ab6d9a 100644
--- a/src/ChatGptNet/Models/ChatGptParameters.cs
+++ b/src/ChatGptNet/Models/ChatGptParameters.cs
@@ -40,8 +40,21 @@ public class ChatGptParameters
///
/// Gets or sets the maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.
///
+ ///
+ /// This value is now deprecated in favor of , and is not compatible with o1 series models.
+ ///
+ ///
+ [JsonPropertyName("max_tokens")]
public int? MaxTokens { get; set; }
+ ///
+ /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
+ ///
+ /// o1 series models must use this property instead of .
+ ///
+ [JsonPropertyName("max_completion_tokens")]
+ public int? MaxCompletionTokens { get; set; }
+
///
/// Gets or sets the presence penalties for chat completion. Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics (default: 0).
///
diff --git a/src/ChatGptNet/Models/ChatGptRequest.cs b/src/ChatGptNet/Models/ChatGptRequest.cs
index 1bbc4ff..0483c40 100644
--- a/src/ChatGptNet/Models/ChatGptRequest.cs
+++ b/src/ChatGptNet/Models/ChatGptRequest.cs
@@ -126,9 +126,21 @@ internal class ChatGptRequest
///
/// Gets or sets the maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length.
///
+ ///
+ /// This value is now deprecated in favor of , and is not compatible with o1 series models.
+ ///
+ ///
[JsonPropertyName("max_tokens")]
public int? MaxTokens { get; set; }
+ ///
+ /// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
+ ///
+ /// o1 series models must use this property instead of .
+ ///
+ [JsonPropertyName("max_completion_tokens")]
+ public int? MaxCompletionTokens { get; set; }
+
///
/// Gets or sets the presence penalties for chat completion. A number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics (default: 0).
///
diff --git a/src/ChatGptNet/Models/OpenAIChatGptModels.cs b/src/ChatGptNet/Models/OpenAIChatGptModels.cs
index 0bc2878..94856b7 100644
--- a/src/ChatGptNet/Models/OpenAIChatGptModels.cs
+++ b/src/ChatGptNet/Models/OpenAIChatGptModels.cs
@@ -69,4 +69,22 @@ public static class OpenAIChatGptModels
/// See GPT-4 for more information.
///
public const string Gpt4_o_mini = "gpt-4o-mini";
+
+ ///
+ /// Reasoning model designed to solve hard problems across domains.
+ ///
+ ///
+ /// This model supports 128.000 tokens and returns a maximum of 32.768 outpout tokens.
+ /// See o1-preview and o1-mini for more information.
+ ///
+ public const string O1_preview = "o1-preview";
+
+ ///
+ /// Faster and cheaper reasoning model particularly good at coding, math, and science.
+ ///
+ ///
+ /// This model supports 128.000 tokens and returns a maximum of 32.768 outpout tokens.
+ /// See o1-preview and o1-mini for more information.
+ ///
+ public const string O1_mini = "o1-mini";
}
\ No newline at end of file
diff --git a/src/Directory.Build.props b/src/Directory.Build.props
index 60aa7b1..04b73b8 100644
--- a/src/Directory.Build.props
+++ b/src/Directory.Build.props
@@ -9,7 +9,7 @@
-
+