Skip to content

Commit

Permalink
fix: Added Assistants.CreateRunAsStream. Fixed AssistantsWithVision.
Browse files Browse the repository at this point in the history
  • Loading branch information
HavenDV committed Sep 1, 2024
1 parent 7aa699b commit 4b7ac31
Show file tree
Hide file tree
Showing 3 changed files with 245 additions and 28 deletions.
210 changes: 210 additions & 0 deletions src/libs/OpenAI/AssistantClient.CreateRun.AsStream.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,210 @@
using System.Linq;
using System.Runtime.CompilerServices;
using OpenAI.Extensions;

#nullable enable

namespace OpenAI
{
public partial class AssistantsClient
{
/// <summary>
/// Create a run.
/// </summary>
/// <param name="threadId"></param>
/// <param name="include"></param>
/// <param name="request"></param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async IAsyncEnumerable<global::OpenAI.AssistantStreamEvent> CreateRunAsStreamAsync(
string threadId,
global::OpenAI.CreateRunRequest request,
global::System.Collections.Generic.IList<global::OpenAI.CreateRunIncludeItem>? include = default,
[EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
{
request = request ?? throw new global::System.ArgumentNullException(nameof(request));
request.Stream = true;

PrepareArguments(
client: _httpClient);
PrepareCreateRunArguments(
httpClient: _httpClient,
threadId: ref threadId,
include: include,
request: request);

using var httpRequest = new global::System.Net.Http.HttpRequestMessage(
method: global::System.Net.Http.HttpMethod.Post,
requestUri: new global::System.Uri(_httpClient.BaseAddress?.AbsoluteUri.TrimEnd('/') + $"/threads/{threadId}/runs?{string.Join("&", include?.Select(static x => $"include={x}") ?? global::System.Array.Empty<string>())}", global::System.UriKind.RelativeOrAbsolute));
var __httpRequestContentBody = global::System.Text.Json.JsonSerializer.Serialize(request, global::OpenAI.SourceGenerationContext.Default.CreateRunRequest);
var __httpRequestContent = new global::System.Net.Http.StringContent(
content: __httpRequestContentBody,
encoding: global::System.Text.Encoding.UTF8,
mediaType: "application/json");
httpRequest.Content = __httpRequestContent;

PrepareRequest(
client: _httpClient,
request: httpRequest);
PrepareCreateRunRequest(
httpClient: _httpClient,
httpRequestMessage: httpRequest,
threadId: threadId,
include: include,
request: request);

using var response = await _httpClient.SendAsync(
request: httpRequest,
completionOption: global::System.Net.Http.HttpCompletionOption.ResponseHeadersRead,
cancellationToken: cancellationToken).ConfigureAwait(false);

ProcessResponse(
client: _httpClient,
response: response);
ProcessCreateRunResponse(
httpClient: _httpClient,
httpResponseMessage: response);

response.EnsureSuccessStatusCode();

using var stream = await response.Content.ReadAsStreamAsync(cancellationToken).ConfigureAwait(false);
using var reader = new StreamReader(stream);

#if NET8_0_OR_GREATER
while (await reader.ReadLineAsync(cancellationToken).ConfigureAwait(false) is { } streamData)
#else
while (await reader.ReadLineAsync().ConfigureAwait(false) is { } streamData)
#endif
{
cancellationToken.ThrowIfCancellationRequested();

if (!streamData.TryGetEventStreamData(out var eventData)) { continue; }
if (string.IsNullOrWhiteSpace(eventData)) { continue; }

var partialResponse =
global::System.Text.Json.JsonSerializer.Deserialize(eventData, global::OpenAI.SourceGenerationContext.Default.NullableAssistantStreamEvent) ??
throw new global::System.InvalidOperationException($"Response deserialization failed for \"{eventData}\" ");

yield return partialResponse;
}
}

/// <summary>
/// Create a run.
/// </summary>
/// <param name="threadId"></param>
/// <param name="include"></param>
/// <param name="assistantId">
/// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run.
/// </param>
/// <param name="model">
/// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.<br/>
/// Example: gpt-4o
/// </param>
/// <param name="instructions">
/// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.
/// </param>
/// <param name="additionalInstructions">
/// Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions.
/// </param>
/// <param name="additionalMessages">
/// Adds additional messages to the thread before creating the run.
/// </param>
/// <param name="tools">
/// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
/// </param>
/// <param name="metadata">
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
/// </param>
/// <param name="temperature">
/// empty<br/>
/// Default Value: 1<br/>
/// Example: 1
/// </param>
/// <param name="topP">
/// empty<br/>
/// Default Value: 1<br/>
/// Example: 1
/// </param>
/// <param name="stream">
/// If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message.
/// </param>
/// <param name="maxPromptTokens">
/// The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.
/// </param>
/// <param name="maxCompletionTokens">
/// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.
/// </param>
/// <param name="truncationStrategy">
/// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.
/// </param>
/// <param name="toolChoice">
/// Controls which (if any) tool is called by the model.<br/>
/// `none` means the model will not call any tools and instead generates a message.<br/>
/// `auto` is the default value and means the model can pick between generating a message or calling one or more tools.<br/>
/// `required` means the model must call one or more tools before responding to the user.<br/>
/// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
/// </param>
/// <param name="parallelToolCalls">
/// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.
/// </param>
/// <param name="responseFormat">
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.<br/>
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
/// </param>
/// <param name="cancellationToken">The token to cancel the operation with</param>
/// <exception cref="global::System.InvalidOperationException"></exception>
public async IAsyncEnumerable<global::OpenAI.AssistantStreamEvent> CreateRunAsStreamAsync(
string threadId,
string assistantId,
global::System.Collections.Generic.IList<global::OpenAI.CreateRunIncludeItem>? include = default,
global::System.AnyOf<string, global::OpenAI.CreateRunRequestModel?>? model = default,
string? instructions = default,
string? additionalInstructions = default,
global::System.Collections.Generic.IList<global::OpenAI.CreateMessageRequest>? additionalMessages = default,
global::System.Collections.Generic.IList<global::System.OneOf<global::OpenAI.AssistantToolsCode, global::OpenAI.AssistantToolsFileSearch, global::OpenAI.AssistantToolsFunction>>? tools = default,
global::OpenAI.CreateRunRequestMetadata? metadata = default,
double? temperature = 1,
double? topP = 1,
bool? stream = default,
int? maxPromptTokens = default,
int? maxCompletionTokens = default,
global::OpenAI.TruncationObject? truncationStrategy = default,
global::OpenAI.AssistantsApiToolChoiceOption? toolChoice = default,
bool? parallelToolCalls = default,
global::OpenAI.AssistantsApiResponseFormatOption? responseFormat = default,
[EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
{
var request = new global::OpenAI.CreateRunRequest
{
AssistantId = assistantId,
Model = model,
Instructions = instructions,
AdditionalInstructions = additionalInstructions,
AdditionalMessages = additionalMessages,
Tools = tools,
Metadata = metadata,
Temperature = temperature,
TopP = topP,
Stream = stream,
MaxPromptTokens = maxPromptTokens,
MaxCompletionTokens = maxCompletionTokens,
TruncationStrategy = truncationStrategy,
ToolChoice = toolChoice,
ParallelToolCalls = parallelToolCalls,
ResponseFormat = responseFormat,
};

await foreach (var response in CreateRunAsStreamAsync(
threadId: threadId,
include: include,
request: request,
cancellationToken: cancellationToken))
{
yield return response;
}
}
}
}
5 changes: 0 additions & 5 deletions src/libs/OpenAI/ChatClient.CreateChatCompletion.AsStream.cs
Original file line number Diff line number Diff line change
Expand Up @@ -71,11 +71,6 @@ public partial class ChatClient
if (!streamData.TryGetEventStreamData(out var eventData)) { continue; }
if (string.IsNullOrWhiteSpace(eventData)) { continue; }

ProcessCreateChatCompletionResponseContent(
httpClient: _httpClient,
httpResponseMessage: response,
content: ref eventData);

var partialResponse =
global::System.Text.Json.JsonSerializer.Deserialize(eventData, global::OpenAI.SourceGenerationContext.Default.CreateChatCompletionStreamResponse) ??
throw new global::System.InvalidOperationException($"Response deserialization failed for \"{eventData}\" ");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,33 +52,45 @@ public async Task AssistantsWithVision()
pictureOfOrangeFile,
]
});

// AsyncResultCollection<StreamingUpdate> streamingUpdates = api.Assistants.CreateRunStreamingAsync(
// thread,
// assistant,
// new RunCreationOptions()
// {
// AdditionalInstructions = "When possible, try to sneak in puns if you're asked to compare things.",
// });
//
// await foreach (StreamingUpdate streamingUpdate in streamingUpdates)
// {
// if (streamingUpdate.UpdateKind == StreamingUpdateReason.RunCreated)
// {
// Console.WriteLine($"--- Run started! ---");
// }
// if (streamingUpdate is MessageContentUpdate contentUpdate)
// {
// Console.Write(contentUpdate.Text);
// }
// }

RunObject response = await api.Assistants.CreateRunAsync(
var streamingUpdates = api.Assistants.CreateRunAsStreamAsync(
threadId: thread.Id,
assistantId: assistant.Id,
instructions: "When possible, try to sneak in puns if you're asked to compare things.");

Console.WriteLine(response[0].Content);

await foreach (AssistantStreamEvent streamingUpdate in streamingUpdates)
{
if (streamingUpdate.IsRun && streamingUpdate.Run.Value.IsValue1) // RunCreated
{
Console.WriteLine("--- Run started! ---");
}
if (streamingUpdate is { IsMessage: true, Message: var messageStreamEvent } &&
messageStreamEvent.Value is { IsValue3: true, Value3: var delta })
{
foreach (var deltaVariation in delta.Data.Delta.Content ?? [])
{
if (deltaVariation.IsValue1)
{
Console.WriteLine();
Console.WriteLine(deltaVariation.Value1.ImageFile?.FileId);
}
if (deltaVariation.IsValue2)
{
Console.Write(deltaVariation.Value2.Text?.Value);
}
if (deltaVariation.IsValue3)
{
Console.WriteLine();
Console.WriteLine(deltaVariation.Value3.Refusal);
}
if (deltaVariation.IsValue4)
{
Console.WriteLine();
Console.WriteLine(deltaVariation.Value4.ImageUrl?.Url);
}
}
}
}

_ = await api.Files.DeleteFileAsync(pictureOfAppleFile.Id);
_ = await api.Assistants.DeleteThreadAsync(thread.Id);
Expand Down

0 comments on commit 4b7ac31

Please sign in to comment.