From 1b2c73f5a584f810d6d1783bf8f723cd6e259e99 Mon Sep 17 00:00:00 2001 From: Tolga Kayhan Date: Tue, 4 Jun 2024 12:04:41 +0100 Subject: [PATCH] Added missing modify Assistant parameters --- .../RequestModels/AssistantModifyRequest.cs | 43 +++++++++++++++++-- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs b/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs index 0a9c25b6..2d3f12b1 100644 --- a/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs +++ b/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs @@ -3,7 +3,7 @@ namespace OpenAI.ObjectModels.RequestModels; -public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileIds, IOpenAiModels.IMetaData +public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IMetaData, IOpenAiModels.ITemperature { /// /// The name of the assistant. The maximum length is 256 @@ -30,10 +30,12 @@ public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileI public List? Tools { get; set; } /// - /// A list of File IDs attached to this assistant. + /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For + /// example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of + /// vector store IDs. /// - [JsonPropertyName("file_ids")] - public List? FileIds { get; set; } + [JsonPropertyName("tool_resources")] + public ToolResources? ToolResources { get; set; } /// /// Set of 16 key-value pairs that can be attached to an object. @@ -46,4 +48,37 @@ public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileI /// [JsonPropertyName("model")] public string Model { get; set; } + + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while + /// lower values like 0.2 will make it more focused and deterministic. + /// + [JsonPropertyName("temperature")] + public float? Temperature { get; set; } + + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the + /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are + /// considered. + /// We generally recommend altering this or temperature but not both. + /// + [JsonPropertyName("top_p")] + public double? TopP { get; set; } + + /// + /// Specifies the format that the model must output. Compatible with + /// GPT-4o, + /// GPT-4 Turbo, and all GPT-3.5 Turbo + /// models since gpt-3.5-turbo-1106. + /// Setting to { "type": "json_object" } enables JSON mode, which guarantees the message the model generates is + /// valid JSON.
+ /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or + /// user message.Without this, the model may generate an unending stream of whitespace until the generation reaches the + /// token limit, resulting in a long-running and seemingly "stuck" request.Also note that the message content may be + /// partially cut off if finish_reason= "length", which indicates the generation exceeded max_tokens or + /// the + /// conversation exceeded the max context length. + ///
+ [JsonPropertyName("response_format")] + public ResponseFormatOneOfType? ResponseFormat { get; set; } } \ No newline at end of file