diff --git a/.github/workflows/dotnet-build-and-test.yml b/.github/workflows/dotnet-build-and-test.yml index 5abfe2a879..8c9fe22ffc 100644 --- a/.github/workflows/dotnet-build-and-test.yml +++ b/.github/workflows/dotnet-build-and-test.yml @@ -8,11 +8,11 @@ name: dotnet-build-and-test on: workflow_dispatch: pull_request: - branches: ["main"] + branches: ["main", "feature*"] merge_group: - branches: ["main"] + branches: ["main", "feature*"] push: - branches: ["main"] + branches: ["main", "feature*"] schedule: - cron: "0 0 * * *" # Run at midnight UTC daily diff --git a/.github/workflows/python-test-coverage-report.yml b/.github/workflows/python-test-coverage-report.yml index 9ea5b8022d..81a506f277 100644 --- a/.github/workflows/python-test-coverage-report.yml +++ b/.github/workflows/python-test-coverage-report.yml @@ -39,7 +39,7 @@ jobs: echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV - name: Pytest coverage comment id: coverageComment - uses: MishaKav/pytest-coverage-comment@v1.1.57 + uses: MishaKav/pytest-coverage-comment@v1.1.59 with: github-token: ${{ secrets.GH_ACTIONS_PR_WRITE }} issue-number: ${{ env.PR_NUMBER }} diff --git a/docs/assets/Agentic-framework_high-res.png b/docs/assets/Agentic-framework_high-res.png new file mode 100644 index 0000000000..cdb53b11bf Binary files /dev/null and b/docs/assets/Agentic-framework_high-res.png differ diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index d110ec4426..bf3ff94eba 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -44,7 +44,7 @@ - + @@ -68,15 +68,15 @@ - - - - - - - - + + + + + + + + @@ -86,7 +86,7 @@ - + @@ -104,8 +104,8 @@ - - + + @@ -135,7 +135,7 @@ all runtime; build; native; contentfiles; analyzers; buildtransitive - + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/dotnet/agent-framework-dotnet.slnx b/dotnet/agent-framework-dotnet.slnx index 3e845c75d9..71c79efc44 100644 --- a/dotnet/agent-framework-dotnet.slnx +++ b/dotnet/agent-framework-dotnet.slnx @@ -20,6 +20,7 @@ + @@ -47,8 +48,7 @@ - - + @@ -58,20 +58,22 @@ - + - - - - + + + + + + @@ -80,7 +82,8 @@ - + + @@ -155,10 +158,10 @@ - - - - + + + + diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 0e92f8ef06..cbcc78bb9d 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -2,9 +2,9 @@ 1.0.0 - $(VersionPrefix)-$(VersionSuffix).251107.1 - $(VersionPrefix)-preview.251107.1 - 1.0.0-preview.251107.1 + $(VersionPrefix)-$(VersionSuffix).251110.2 + $(VersionPrefix)-preview.251110.2 + 1.0.0-preview.251110.2 Debug;Release;Publish true diff --git a/dotnet/samples/.gitignore b/dotnet/samples/.gitignore deleted file mode 100644 index 8392c905c6..0000000000 --- a/dotnet/samples/.gitignore +++ /dev/null @@ -1 +0,0 @@ -launchSettings.json \ No newline at end of file diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/AGUIDojoServer.csproj b/dotnet/samples/AGUIClientServer/AGUIDojoServer/AGUIDojoServer.csproj new file mode 100644 index 0000000000..0513374a93 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/AGUIDojoServer.csproj @@ -0,0 +1,24 @@ + + + + Exe + net9.0 + enable + enable + b9c3f1e1-2fb4-5g29-0e52-53e2b7g9gf21 + + + + + + + + + + + + + + + + diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/AGUIDojoServerSerializerContext.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/AGUIDojoServerSerializerContext.cs new file mode 100644 index 0000000000..af86dc2598 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/AGUIDojoServerSerializerContext.cs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AGUIDojoServer; + +[JsonSerializable(typeof(WeatherInfo))] +[JsonSerializable(typeof(Recipe))] +[JsonSerializable(typeof(Ingredient))] +[JsonSerializable(typeof(RecipeResponse))] +internal sealed partial class AGUIDojoServerSerializerContext : JsonSerializerContext; diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/ChatClientAgentFactory.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/ChatClientAgentFactory.cs new file mode 100644 index 0000000000..5145c5559d --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/ChatClientAgentFactory.cs @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.ComponentModel; +using System.Text.Json; +using Azure.AI.OpenAI; +using Azure.Identity; +using Microsoft.Agents.AI; +using Microsoft.Extensions.AI; +using ChatClient = OpenAI.Chat.ChatClient; + +namespace AGUIDojoServer; + +internal static class ChatClientAgentFactory +{ + private static AzureOpenAIClient? s_azureOpenAIClient; + private static string? s_deploymentName; + + public static void Initialize(IConfiguration configuration) + { + string endpoint = configuration["AZURE_OPENAI_ENDPOINT"] ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); + s_deploymentName = configuration["AZURE_OPENAI_DEPLOYMENT_NAME"] ?? throw new InvalidOperationException("AZURE_OPENAI_DEPLOYMENT_NAME is not set."); + + s_azureOpenAIClient = new AzureOpenAIClient( + new Uri(endpoint), + new DefaultAzureCredential()); + } + + public static ChatClientAgent CreateAgenticChat() + { + ChatClient chatClient = s_azureOpenAIClient!.GetChatClient(s_deploymentName!); + + return chatClient.AsIChatClient().CreateAIAgent( + name: "AgenticChat", + description: "A simple chat agent using Azure OpenAI"); + } + + public static ChatClientAgent CreateBackendToolRendering() + { + ChatClient chatClient = s_azureOpenAIClient!.GetChatClient(s_deploymentName!); + + return chatClient.AsIChatClient().CreateAIAgent( + name: "BackendToolRenderer", + description: "An agent that can render backend tools using Azure OpenAI", + tools: [AIFunctionFactory.Create( + GetWeather, + name: "get_weather", + description: "Get the weather for a given location.", + AGUIDojoServerSerializerContext.Default.Options)]); + } + + public static ChatClientAgent CreateHumanInTheLoop() + { + ChatClient chatClient = s_azureOpenAIClient!.GetChatClient(s_deploymentName!); + + return chatClient.AsIChatClient().CreateAIAgent( + name: "HumanInTheLoopAgent", + description: "An agent that involves human feedback in its decision-making process using Azure OpenAI"); + } + + public static ChatClientAgent CreateToolBasedGenerativeUI() + { + ChatClient chatClient = s_azureOpenAIClient!.GetChatClient(s_deploymentName!); + + return chatClient.AsIChatClient().CreateAIAgent( + name: "ToolBasedGenerativeUIAgent", + description: "An agent that uses tools to generate user interfaces using Azure OpenAI"); + } + + public static ChatClientAgent CreateAgenticUI() + { + ChatClient chatClient = s_azureOpenAIClient!.GetChatClient(s_deploymentName!); + + return chatClient.AsIChatClient().CreateAIAgent( + name: "AgenticUIAgent", + description: "An agent that generates agentic user interfaces using Azure OpenAI"); + } + + public static AIAgent CreateSharedState(JsonSerializerOptions options) + { + ChatClient chatClient = s_azureOpenAIClient!.GetChatClient(s_deploymentName!); + + var baseAgent = chatClient.AsIChatClient().CreateAIAgent( + name: "SharedStateAgent", + description: "An agent that demonstrates shared state patterns using Azure OpenAI"); + + return new SharedStateAgent(baseAgent, options); + } + + [Description("Get the weather for a given location.")] + private static WeatherInfo GetWeather([Description("The location to get the weather for.")] string location) => new() + { + Temperature = 20, + Conditions = "sunny", + Humidity = 50, + WindSpeed = 10, + FeelsLike = 25 + }; +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/Ingredient.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Ingredient.cs new file mode 100644 index 0000000000..4be57405ae --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Ingredient.cs @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AGUIDojoServer; + +internal sealed class Ingredient +{ + [JsonPropertyName("icon")] + public string Icon { get; set; } = string.Empty; + + [JsonPropertyName("name")] + public string Name { get; set; } = string.Empty; + + [JsonPropertyName("amount")] + public string Amount { get; set; } = string.Empty; +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/Program.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Program.cs new file mode 100644 index 0000000000..57cc409c58 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Program.cs @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft. All rights reserved. + +using AGUIDojoServer; +using Microsoft.Agents.AI.Hosting.AGUI.AspNetCore; +using Microsoft.AspNetCore.HttpLogging; +using Microsoft.Extensions.Options; + +WebApplicationBuilder builder = WebApplication.CreateBuilder(args); + +builder.Services.AddHttpLogging(logging => +{ + logging.LoggingFields = HttpLoggingFields.RequestPropertiesAndHeaders | HttpLoggingFields.RequestBody + | HttpLoggingFields.ResponsePropertiesAndHeaders | HttpLoggingFields.ResponseBody; + logging.RequestBodyLogLimit = int.MaxValue; + logging.ResponseBodyLogLimit = int.MaxValue; +}); + +builder.Services.AddHttpClient().AddLogging(); +builder.Services.ConfigureHttpJsonOptions(options => options.SerializerOptions.TypeInfoResolverChain.Add(AGUIDojoServerSerializerContext.Default)); +builder.Services.AddAGUI(); + +WebApplication app = builder.Build(); + +app.UseHttpLogging(); + +// Initialize the factory +ChatClientAgentFactory.Initialize(app.Configuration); + +// Map the AG-UI agent endpoints for different scenarios +app.MapAGUI("/agentic_chat", ChatClientAgentFactory.CreateAgenticChat()); + +app.MapAGUI("/backend_tool_rendering", ChatClientAgentFactory.CreateBackendToolRendering()); + +app.MapAGUI("/human_in_the_loop", ChatClientAgentFactory.CreateHumanInTheLoop()); + +app.MapAGUI("/tool_based_generative_ui", ChatClientAgentFactory.CreateToolBasedGenerativeUI()); + +app.MapAGUI("/agentic_generative_ui", ChatClientAgentFactory.CreateAgenticUI()); + +var jsonOptions = app.Services.GetRequiredService>(); +app.MapAGUI("/shared_state", ChatClientAgentFactory.CreateSharedState(jsonOptions.Value.SerializerOptions)); + +await app.RunAsync(); + +public partial class Program { } diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/Properties/launchSettings.json b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Properties/launchSettings.json new file mode 100644 index 0000000000..d1c2dbfa92 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Properties/launchSettings.json @@ -0,0 +1,12 @@ +{ + "profiles": { + "AGUIDojoServer": { + "commandName": "Project", + "launchBrowser": true, + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + }, + "applicationUrl": "http://localhost:5018" + } + } +} \ No newline at end of file diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/Recipe.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Recipe.cs new file mode 100644 index 0000000000..9af4f6eae9 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/Recipe.cs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AGUIDojoServer; + +internal sealed class Recipe +{ + [JsonPropertyName("title")] + public string Title { get; set; } = string.Empty; + + [JsonPropertyName("skill_level")] + public string SkillLevel { get; set; } = string.Empty; + + [JsonPropertyName("cooking_time")] + public string CookingTime { get; set; } = string.Empty; + + [JsonPropertyName("special_preferences")] + public List SpecialPreferences { get; set; } = []; + + [JsonPropertyName("ingredients")] + public List Ingredients { get; set; } = []; + + [JsonPropertyName("instructions")] + public List Instructions { get; set; } = []; +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/RecipeResponse.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/RecipeResponse.cs new file mode 100644 index 0000000000..0e9b2f2fff --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/RecipeResponse.cs @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AGUIDojoServer; + +#pragma warning disable CA1812 // Used for the JsonSchema response format +internal sealed class RecipeResponse +#pragma warning restore CA1812 +{ + [JsonPropertyName("recipe")] + public Recipe Recipe { get; set; } = new(); +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedStateAgent.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedStateAgent.cs new file mode 100644 index 0000000000..ea2f1d319f --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/SharedStateAgent.cs @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Text.Json; +using Microsoft.Agents.AI; +using Microsoft.Extensions.AI; + +namespace AGUIDojoServer; + +[SuppressMessage("Performance", "CA1812:Avoid uninstantiated internal classes", Justification = "Instantiated by ChatClientAgentFactory.CreateSharedState")] +internal sealed class SharedStateAgent : DelegatingAIAgent +{ + private readonly JsonSerializerOptions _jsonSerializerOptions; + + public SharedStateAgent(AIAgent innerAgent, JsonSerializerOptions jsonSerializerOptions) + : base(innerAgent) + { + this._jsonSerializerOptions = jsonSerializerOptions; + } + + public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + { + return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + } + + public override async IAsyncEnumerable RunStreamingAsync( + IEnumerable messages, + AgentThread? thread = null, + AgentRunOptions? options = null, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + if (options is not ChatClientAgentRunOptions { ChatOptions.AdditionalProperties: { } properties } chatRunOptions || + !properties.TryGetValue("ag_ui_state", out JsonElement state)) + { + await foreach (var update in this.InnerAgent.RunStreamingAsync(messages, thread, options, cancellationToken).ConfigureAwait(false)) + { + yield return update; + } + yield break; + } + + var firstRunOptions = new ChatClientAgentRunOptions + { + ChatOptions = chatRunOptions.ChatOptions.Clone(), + AllowBackgroundResponses = chatRunOptions.AllowBackgroundResponses, + ContinuationToken = chatRunOptions.ContinuationToken, + ChatClientFactory = chatRunOptions.ChatClientFactory, + }; + + // Configure JSON schema response format for structured state output + firstRunOptions.ChatOptions.ResponseFormat = ChatResponseFormat.ForJsonSchema( + schemaName: "RecipeResponse", + schemaDescription: "A response containing a recipe with title, skill level, cooking time, preferences, ingredients, and instructions"); + + ChatMessage stateUpdateMessage = new( + ChatRole.System, + [ + new TextContent("Here is the current state in JSON format:"), + new TextContent(state.GetRawText()), + new TextContent("The new state is:") + ]); + + var firstRunMessages = messages.Append(stateUpdateMessage); + + var allUpdates = new List(); + await foreach (var update in this.InnerAgent.RunStreamingAsync(firstRunMessages, thread, firstRunOptions, cancellationToken).ConfigureAwait(false)) + { + allUpdates.Add(update); + + // Yield all non-text updates (tool calls, etc.) + bool hasNonTextContent = update.Contents.Any(c => c is not TextContent); + if (hasNonTextContent) + { + yield return update; + } + } + + var response = allUpdates.ToAgentRunResponse(); + + if (response.TryDeserialize(this._jsonSerializerOptions, out JsonElement stateSnapshot)) + { + byte[] stateBytes = JsonSerializer.SerializeToUtf8Bytes( + stateSnapshot, + this._jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))); + yield return new AgentRunResponseUpdate + { + Contents = [new DataContent(stateBytes, "application/json")] + }; + } + else + { + yield break; + } + + var secondRunMessages = messages.Concat(response.Messages).Append( + new ChatMessage( + ChatRole.System, + [new TextContent("Please provide a concise summary of the state changes in at most two sentences.")])); + + await foreach (var update in this.InnerAgent.RunStreamingAsync(secondRunMessages, thread, options, cancellationToken).ConfigureAwait(false)) + { + yield return update; + } + } +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/WeatherInfo.cs b/dotnet/samples/AGUIClientServer/AGUIDojoServer/WeatherInfo.cs new file mode 100644 index 0000000000..e5b4811739 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/WeatherInfo.cs @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace AGUIDojoServer; + +internal sealed class WeatherInfo +{ + [JsonPropertyName("temperature")] + public int Temperature { get; init; } + + [JsonPropertyName("conditions")] + public string Conditions { get; init; } = string.Empty; + + [JsonPropertyName("humidity")] + public int Humidity { get; init; } + + [JsonPropertyName("wind_speed")] + public int WindSpeed { get; init; } + + [JsonPropertyName("feelsLike")] + public int FeelsLike { get; init; } +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/appsettings.Development.json b/dotnet/samples/AGUIClientServer/AGUIDojoServer/appsettings.Development.json new file mode 100644 index 0000000000..3e805edef8 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/appsettings.Development.json @@ -0,0 +1,9 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning", + "Microsoft.AspNetCore.HttpLogging.HttpLoggingMiddleware": "Information" + } + } +} diff --git a/dotnet/samples/AGUIClientServer/AGUIDojoServer/appsettings.json b/dotnet/samples/AGUIClientServer/AGUIDojoServer/appsettings.json new file mode 100644 index 0000000000..bb20fb69dd --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIDojoServer/appsettings.json @@ -0,0 +1,10 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning", + "Microsoft.AspNetCore.HttpLogging.HttpLoggingMiddleware": "Information" + } + }, + "AllowedHosts": "*" +} diff --git a/dotnet/samples/AGUIClientServer/AGUIServer/Properties/launchSettings.json b/dotnet/samples/AGUIClientServer/AGUIServer/Properties/launchSettings.json new file mode 100644 index 0000000000..6e38bd9975 --- /dev/null +++ b/dotnet/samples/AGUIClientServer/AGUIServer/Properties/launchSettings.json @@ -0,0 +1,12 @@ +{ + "profiles": { + "AGUIServer": { + "commandName": "Project", + "launchBrowser": true, + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + }, + "applicationUrl": "http://localhost:5100;https://localhost:5101" + } + } +} \ No newline at end of file diff --git a/dotnet/samples/AgentWebChat/AgentWebChat.AgentHost/Program.cs b/dotnet/samples/AgentWebChat/AgentWebChat.AgentHost/Program.cs index cb1a7e3cd9..46af2a5b19 100644 --- a/dotnet/samples/AgentWebChat/AgentWebChat.AgentHost/Program.cs +++ b/dotnet/samples/AgentWebChat/AgentWebChat.AgentHost/Program.cs @@ -107,8 +107,8 @@ Once the user has deduced what type (knight or knave) both Alice and Bob are, te app.UseExceptionHandler(); // attach a2a with simple message communication -app.MapA2A(agentName: "pirate", path: "/a2a/pirate"); -app.MapA2A(agentName: "knights-and-knaves", path: "/a2a/knights-and-knaves", agentCard: new() +app.MapA2A(pirateAgentBuilder, path: "/a2a/pirate"); +app.MapA2A(knightsKnavesAgentBuilder, path: "/a2a/knights-and-knaves", agentCard: new() { Name = "Knights and Knaves", Description = "An agent that helps you solve the knights and knaves puzzle.", diff --git a/dotnet/samples/GettingStarted/AgentOpenTelemetry/README.md b/dotnet/samples/GettingStarted/AgentOpenTelemetry/README.md index 3542bf5b30..8f675a20d1 100644 --- a/dotnet/samples/GettingStarted/AgentOpenTelemetry/README.md +++ b/dotnet/samples/GettingStarted/AgentOpenTelemetry/README.md @@ -142,11 +142,11 @@ You: Besides the Aspire Dashboard and the Application Insights native UI, you can also use Grafana to visualize the telemetry data in Application Insights. There are two tailored dashboards for you to get started quickly: ### Agent Overview dashboard -Grafana Dashboard Gallery link: +Open dashboard in Azure portal: ![Agent Overview dashboard](https://github.com/Azure/azure-managed-grafana/raw/main/samples/assets/grafana-af-agent.gif) ### Workflow Overview dashboard -Grafana Dashboard Gallery link: +Open dashboard in Azure portal: ![Workflow Overview dashboard](https://github.com/Azure/azure-managed-grafana/raw/main/samples/assets/grafana-af-workflow.gif) ## Key Features Demonstrated diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step21_ChatHistoryMemoryProvider/Agent_Step21_ChatHistoryMemoryProvider.csproj b/dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step01_ChatHistoryMemory/AgentWithMemory_Step01_ChatHistoryMemory.csproj similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step21_ChatHistoryMemoryProvider/Agent_Step21_ChatHistoryMemoryProvider.csproj rename to dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step01_ChatHistoryMemory/AgentWithMemory_Step01_ChatHistoryMemory.csproj diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step21_ChatHistoryMemoryProvider/Program.cs b/dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step01_ChatHistoryMemory/Program.cs similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step21_ChatHistoryMemoryProvider/Program.cs rename to dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step01_ChatHistoryMemory/Program.cs diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step19_Mem0Provider/Agent_Step19_Mem0Provider.csproj b/dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step02_MemoryUsingMem0/AgentWithMemory_Step02_MemoryUsingMem0.csproj similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step19_Mem0Provider/Agent_Step19_Mem0Provider.csproj rename to dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step02_MemoryUsingMem0/AgentWithMemory_Step02_MemoryUsingMem0.csproj diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step19_Mem0Provider/Program.cs b/dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step02_MemoryUsingMem0/Program.cs similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step19_Mem0Provider/Program.cs rename to dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step02_MemoryUsingMem0/Program.cs diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03.1_UsingFunctionTools/Agent_Step03.1_UsingFunctionTools.csproj b/dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step03_CustomMemory/AgentWithMemory_Step03_CustomMemory.csproj similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step03.1_UsingFunctionTools/Agent_Step03.1_UsingFunctionTools.csproj rename to dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step03_CustomMemory/AgentWithMemory_Step03_CustomMemory.csproj diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Program.cs b/dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step03_CustomMemory/Program.cs similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Program.cs rename to dotnet/samples/GettingStarted/AgentWithMemory/AgentWithMemory_Step03_CustomMemory/Program.cs diff --git a/dotnet/samples/GettingStarted/AgentWithMemory/README.md b/dotnet/samples/GettingStarted/AgentWithMemory/README.md new file mode 100644 index 0000000000..903fcf1b78 --- /dev/null +++ b/dotnet/samples/GettingStarted/AgentWithMemory/README.md @@ -0,0 +1,9 @@ +# Agent Framework Retrieval Augmented Generation (RAG) + +These samples show how to create an agent with the Agent Framework that uses Memory to remember previous conversations or facts from previous conversations. + +|Sample|Description| +|---|---| +|[Chat History memory](./AgentWithMemory_Step01_ChatHistoryMemory/)|This sample demonstrates how to enable an agent to remember messages from previous conversations.| +|[Memory with MemoryStore](./AgentWithMemory_Step02_MemoryUsingMem0/)|This sample demonstrates how to create and run an agent that uses the Mem0 service to extract and retrieve individual memories.| +|[Custom Memory Implementation](./AgentWithMemory_Step03_CustomMemory/)|This sample demonstrates how to create a custom memory component and attach it to an agent.| diff --git a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/AgentWithRAG_Step02_ExternalDataSourceRAG.csproj b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/AgentWithRAG_Step02_CustomVectorStoreRAG.csproj similarity index 100% rename from dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/AgentWithRAG_Step02_ExternalDataSourceRAG.csproj rename to dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/AgentWithRAG_Step02_CustomVectorStoreRAG.csproj diff --git a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/Program.cs b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/Program.cs similarity index 97% rename from dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/Program.cs rename to dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/Program.cs index 4e8fbf0bde..89ced52b69 100644 --- a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/Program.cs +++ b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/Program.cs @@ -1,6 +1,6 @@ // Copyright (c) Microsoft. All rights reserved. -// This sample shows how to use Qdrant to add retrieval augmented generation (RAG) capabilities to an AI agent. +// This sample shows how to use Qdrant with a custom schema to add retrieval augmented generation (RAG) capabilities to an AI agent. // While the sample is using Qdrant, it can easily be replaced with any other vector store that implements the Microsoft.Extensions.VectorData abstractions. // The TextSearchProvider runs a search against the vector store before each model invocation and injects the results into the model context. diff --git a/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/README.md b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/README.md similarity index 100% rename from dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_ExternalDataSourceRAG/README.md rename to dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step02_CustomVectorStoreRAG/README.md diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Agent_Step13_Memory.csproj b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step03_CustomRAGDataSource/AgentWithRAG_Step03_CustomRAGDataSource.csproj similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step13_Memory/Agent_Step13_Memory.csproj rename to dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step03_CustomRAGDataSource/AgentWithRAG_Step03_CustomRAGDataSource.csproj diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step18_TextSearchRag/Program.cs b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step03_CustomRAGDataSource/Program.cs similarity index 94% rename from dotnet/samples/GettingStarted/Agents/Agent_Step18_TextSearchRag/Program.cs rename to dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step03_CustomRAGDataSource/Program.cs index c56be11fc5..38bc2e09f3 100644 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step18_TextSearchRag/Program.cs +++ b/dotnet/samples/GettingStarted/AgentWithRAG/AgentWithRAG_Step03_CustomRAGDataSource/Program.cs @@ -1,11 +1,11 @@ // Copyright (c) Microsoft. All rights reserved. // This sample shows how to use TextSearchProvider to add retrieval augmented generation (RAG) -// capabilities to an AI agent. The provider runs a search against an external knowledge base +// capabilities to an AI agent. This shows a mock implementation of a search function, +// which can be replaced with any custom search logic to query any external knowledge base. +// The provider invokes the custom search function // before each model invocation and injects the results into the model context. -// Also see the AgentWithRAG folder for more advanced RAG scenarios. - using Azure.AI.OpenAI; using Azure.Identity; using Microsoft.Agents.AI; diff --git a/dotnet/samples/GettingStarted/AgentWithRAG/README.md b/dotnet/samples/GettingStarted/AgentWithRAG/README.md index f45c2c2540..bf2a8f9b11 100644 --- a/dotnet/samples/GettingStarted/AgentWithRAG/README.md +++ b/dotnet/samples/GettingStarted/AgentWithRAG/README.md @@ -5,4 +5,5 @@ These samples show how to create an agent with the Agent Framework that uses Ret |Sample|Description| |---|---| |[Basic Text RAG](./AgentWithRAG_Step01_BasicTextRAG/)|This sample demonstrates how to create and run a basic agent with simple text Retrieval Augmented Generation (RAG).| -|[RAG with external Vector Store and custom schema](./AgentWithRAG_Step02_ExternalDataSourceRAG/)|This sample demonstrates how to create and run an agent that uses Retrieval Augmented Generation (RAG) with an external vector store. It also uses a custom schema for the documents stored in the vector store.| +|[RAG with Vector Store and custom schema](./AgentWithRAG_Step02_CustomVectorStoreRAG/)|This sample demonstrates how to create and run an agent that uses Retrieval Augmented Generation (RAG) with a vector store. It also uses a custom schema for the documents stored in the vector store.| +|[RAG with custom RAG data source](./AgentWithRAG_Step03_CustomRAGDataSource/)|This sample demonstrates how to create and run an agent that uses Retrieval Augmented Generation (RAG) with a custom RAG data source.| diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/Agent_Step03.2_UsingFunctionTools_FromOpenAPI.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/Agent_Step03.2_UsingFunctionTools_FromOpenAPI.csproj deleted file mode 100644 index e2edbb2f8d..0000000000 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/Agent_Step03.2_UsingFunctionTools_FromOpenAPI.csproj +++ /dev/null @@ -1,28 +0,0 @@ - - - - Exe - net9.0 - - enable - enable - - - - - - - - - - - - - - - - PreserveNewest - - - - diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/OpenAPISpec.json b/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/OpenAPISpec.json deleted file mode 100644 index 84715914da..0000000000 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/OpenAPISpec.json +++ /dev/null @@ -1,354 +0,0 @@ -{ - "openapi": "3.0.1", - "info": { - "title": "Github Versions API", - "version": "1.0.0" - }, - "servers": [ - { - "url": "https://api.github.com" - } - ], - "components": { - "schemas": { - "basic-error": { - "title": "Basic Error", - "description": "Basic Error", - "type": "object", - "properties": { - "message": { - "type": "string" - }, - "documentation_url": { - "type": "string" - }, - "url": { - "type": "string" - }, - "status": { - "type": "string" - } - } - }, - "label": { - "title": "Label", - "description": "Color-coded labels help you categorize and filter your issues (just like labels in Gmail).", - "type": "object", - "properties": { - "id": { - "description": "Unique identifier for the label.", - "type": "integer", - "format": "int64", - "example": 208045946 - }, - "node_id": { - "type": "string", - "example": "MDU6TGFiZWwyMDgwNDU5NDY=" - }, - "url": { - "description": "URL for the label", - "example": "https://api.github.com/repositories/42/labels/bug", - "type": "string", - "format": "uri" - }, - "name": { - "description": "The name of the label.", - "example": "bug", - "type": "string" - }, - "description": { - "description": "Optional description of the label, such as its purpose.", - "type": "string", - "example": "Something isn't working", - "nullable": true - }, - "color": { - "description": "6-character hex code, without the leading #, identifying the color", - "example": "FFFFFF", - "type": "string" - }, - "default": { - "description": "Whether this label comes by default in a new repository.", - "type": "boolean", - "example": true - } - }, - "required": [ - "id", - "node_id", - "url", - "name", - "description", - "color", - "default" - ] - }, - "tag": { - "title": "Tag", - "description": "Tag", - "type": "object", - "properties": { - "name": { - "type": "string", - "example": "v0.1" - }, - "commit": { - "type": "object", - "properties": { - "sha": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "required": [ - "sha", - "url" - ] - }, - "zipball_url": { - "type": "string", - "format": "uri", - "example": "https://github.com/octocat/Hello-World/zipball/v0.1" - }, - "tarball_url": { - "type": "string", - "format": "uri", - "example": "https://github.com/octocat/Hello-World/tarball/v0.1" - }, - "node_id": { - "type": "string" - } - }, - "required": [ - "name", - "node_id", - "commit", - "zipball_url", - "tarball_url" - ] - } - }, - "examples": { - "label-items": { - "value": [ - { - "id": 208045946, - "node_id": "MDU6TGFiZWwyMDgwNDU5NDY=", - "url": "https://api.github.com/repos/octocat/Hello-World/labels/bug", - "name": "bug", - "description": "Something isn't working", - "color": "f29513", - "default": true - }, - { - "id": 208045947, - "node_id": "MDU6TGFiZWwyMDgwNDU5NDc=", - "url": "https://api.github.com/repos/octocat/Hello-World/labels/enhancement", - "name": "enhancement", - "description": "New feature or request", - "color": "a2eeef", - "default": false - } - ] - }, - "tag-items": { - "value": [ - { - "name": "v0.1", - "commit": { - "sha": "c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc", - "url": "https://api.github.com/repos/octocat/Hello-World/commits/c5b97d5ae6c19d5c5df71a34c7fbeeda2479ccbc" - }, - "zipball_url": "https://github.com/octocat/Hello-World/zipball/v0.1", - "tarball_url": "https://github.com/octocat/Hello-World/tarball/v0.1", - "node_id": "MDQ6VXNlcjE=" - } - ] - } - }, - "parameters": { - "owner": { - "name": "owner", - "description": "The account owner of the repository. The name is not case sensitive.", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - }, - "repo": { - "name": "repo", - "description": "The name of the repository without the `.git` extension. The name is not case sensitive.", - "in": "path", - "required": true, - "schema": { - "type": "string" - } - }, - "per-page": { - "name": "per_page", - "description": "The number of results per page (max 100). For more information, see \"[Using pagination in the REST API](https://docs.github.com/rest/using-the-rest-api/using-pagination-in-the-rest-api).\"", - "in": "query", - "schema": { - "type": "integer", - "default": 30 - } - }, - "page": { - "name": "page", - "description": "The page number of the results to fetch. For more information, see \"[Using pagination in the REST API](https://docs.github.com/rest/using-the-rest-api/using-pagination-in-the-rest-api).\"", - "in": "query", - "schema": { - "type": "integer", - "default": 1 - } - } - }, - "responses": { - "not_found": { - "description": "Resource not found", - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/basic-error" - } - } - } - } - }, - "headers": { - "link": { - "example": "; rel=\"next\", ; rel=\"last\"", - "schema": { - "type": "string" - } - } - } - }, - "paths": { - "/repos/{owner}/{repo}/tags": { - "get": { - "summary": "List repository tags", - "description": "", - "tags": [ - "repos" - ], - "operationId": "repos/list-tags", - "externalDocs": { - "description": "API method documentation", - "url": "https://docs.github.com/rest/repos/repos#list-repository-tags" - }, - "parameters": [ - { - "$ref": "#/components/parameters/owner" - }, - { - "$ref": "#/components/parameters/repo" - }, - { - "$ref": "#/components/parameters/per-page" - }, - { - "$ref": "#/components/parameters/page" - } - ], - "responses": { - "200": { - "description": "Response", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/tag" - } - }, - "examples": { - "default": { - "$ref": "#/components/examples/tag-items" - } - } - } - }, - "headers": { - "Link": { - "$ref": "#/components/headers/link" - } - } - } - }, - "x-github": { - "githubCloudOnly": false, - "enabledForGitHubApps": true, - "category": "repos", - "subcategory": "repos" - } - } - }, - "/repos/{owner}/{repo}/labels": { - "get": { - "summary": "List labels for a repository", - "description": "Lists all labels for a repository.", - "tags": [ - "issues" - ], - "operationId": "issues/list-labels-for-repo", - "externalDocs": { - "description": "API method documentation", - "url": "https://docs.github.com/rest/issues/labels#list-labels-for-a-repository" - }, - "parameters": [ - { - "$ref": "#/components/parameters/owner" - }, - { - "$ref": "#/components/parameters/repo" - }, - { - "$ref": "#/components/parameters/per-page" - }, - { - "$ref": "#/components/parameters/page" - } - ], - "responses": { - "200": { - "description": "Response", - "content": { - "application/json": { - "schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/label" - } - }, - "examples": { - "default": { - "$ref": "#/components/examples/label-items" - } - } - } - }, - "headers": { - "Link": { - "$ref": "#/components/headers/link" - } - } - }, - "404": { - "$ref": "#/components/responses/not_found" - } - }, - "x-github": { - "githubCloudOnly": false, - "enabledForGitHubApps": true, - "category": "issues", - "subcategory": "labels" - } - } - } - } -} \ No newline at end of file diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/Program.cs deleted file mode 100644 index e61c9f845a..0000000000 --- a/dotnet/samples/GettingStarted/Agents/Agent_Step03.2_UsingFunctionTools_FromOpenAPI/Program.cs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -// This sample demonstrates how to use a ChatClientAgent with function tools provided via an OpenAPI spec. -// It uses functionality from Semantic Kernel to parse the OpenAPI spec and create function tools to use with the Agent Framework Agent. - -using Azure.AI.OpenAI; -using Azure.Identity; -using Microsoft.Agents.AI; -using Microsoft.Extensions.AI; -using Microsoft.SemanticKernel; -using Microsoft.SemanticKernel.Plugins.OpenApi; -using OpenAI; - -var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new InvalidOperationException("AZURE_OPENAI_ENDPOINT is not set."); -var deploymentName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOYMENT_NAME") ?? "gpt-4o-mini"; - -// Load the OpenAPI Spec from a file. -KernelPlugin plugin = await OpenApiKernelPluginFactory.CreateFromOpenApiAsync("github", "OpenAPISpec.json"); - -// Convert the Semantic Kernel plugin to Agent Framework function tools. -// This requires a dummy Kernel instance, since KernelFunctions cannot execute without one. -Kernel kernel = new(); -List tools = plugin.Select(x => x.WithKernel(kernel)).Cast().ToList(); - -// Create the chat client and agent, and provide the OpenAPI function tools to the agent. -AIAgent agent = new AzureOpenAIClient( - new Uri(endpoint), - new AzureCliCredential()) - .GetChatClient(deploymentName) - .CreateAIAgent(instructions: "You are a helpful assistant", tools: tools); - -// Run the agent with the OpenAPI function tools. -Console.WriteLine(await agent.RunAsync("Please list the names, colors and descriptions of all the labels available in the microsoft/agent-framework repository on github.")); diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step18_TextSearchRag/Agent_Step18_TextSearchRag.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Agent_Step03_UsingFunctionTools.csproj similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step18_TextSearchRag/Agent_Step18_TextSearchRag.csproj rename to dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Agent_Step03_UsingFunctionTools.csproj diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step03.1_UsingFunctionTools/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Program.cs similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step03.1_UsingFunctionTools/Program.cs rename to dotnet/samples/GettingStarted/Agents/Agent_Step03_UsingFunctionTools/Program.cs diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step20_BackgroundResponsesWithToolsAndPersistence/Agent_Step20_BackgroundResponsesWithToolsAndPersistence.csproj b/dotnet/samples/GettingStarted/Agents/Agent_Step13_BackgroundResponsesWithToolsAndPersistence/Agent_Step13_BackgroundResponsesWithToolsAndPersistence.csproj similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step20_BackgroundResponsesWithToolsAndPersistence/Agent_Step20_BackgroundResponsesWithToolsAndPersistence.csproj rename to dotnet/samples/GettingStarted/Agents/Agent_Step13_BackgroundResponsesWithToolsAndPersistence/Agent_Step13_BackgroundResponsesWithToolsAndPersistence.csproj diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step20_BackgroundResponsesWithToolsAndPersistence/Program.cs b/dotnet/samples/GettingStarted/Agents/Agent_Step13_BackgroundResponsesWithToolsAndPersistence/Program.cs similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step20_BackgroundResponsesWithToolsAndPersistence/Program.cs rename to dotnet/samples/GettingStarted/Agents/Agent_Step13_BackgroundResponsesWithToolsAndPersistence/Program.cs diff --git a/dotnet/samples/GettingStarted/Agents/Agent_Step20_BackgroundResponsesWithToolsAndPersistence/README.md b/dotnet/samples/GettingStarted/Agents/Agent_Step13_BackgroundResponsesWithToolsAndPersistence/README.md similarity index 100% rename from dotnet/samples/GettingStarted/Agents/Agent_Step20_BackgroundResponsesWithToolsAndPersistence/README.md rename to dotnet/samples/GettingStarted/Agents/Agent_Step13_BackgroundResponsesWithToolsAndPersistence/README.md diff --git a/dotnet/samples/GettingStarted/Agents/README.md b/dotnet/samples/GettingStarted/Agents/README.md index 562b6b2500..b93e9ceb72 100644 --- a/dotnet/samples/GettingStarted/Agents/README.md +++ b/dotnet/samples/GettingStarted/Agents/README.md @@ -28,8 +28,8 @@ Before you begin, ensure you have the following prerequisites: |---|---| |[Running a simple agent](./Agent_Step01_Running/)|This sample demonstrates how to create and run a basic agent with instructions| |[Multi-turn conversation with a simple agent](./Agent_Step02_MultiturnConversation/)|This sample demonstrates how to implement a multi-turn conversation with a simple agent| -|[Using function tools with a simple agent](./Agent_Step03.1_UsingFunctionTools/)|This sample demonstrates how to use function tools with a simple agent| -|[Using OpenAPI function tools with a simple agent](./Agent_Step03.2_UsingFunctionTools_FromOpenAPI/)|This sample demonstrates how to create function tools from an OpenAPI spec and use them with a simple agent| +|[Using function tools with a simple agent](./Agent_Step03_UsingFunctionTools/)|This sample demonstrates how to use function tools with a simple agent| +|[Using OpenAPI function tools with a simple agent](https://github.com/microsoft/semantic-kernel/tree/main/dotnet/samples/AgentFrameworkMigration/AzureOpenAI/Step04_ToolCall_WithOpenAPI)|This sample demonstrates how to create function tools from an OpenAPI spec and use them with a simple agent (note that this sample is in the Semantic Kernel repository)| |[Using function tools with approvals](./Agent_Step04_UsingFunctionToolsWithApprovals/)|This sample demonstrates how to use function tools where approvals require human in the loop approvals before execution| |[Structured output with a simple agent](./Agent_Step05_StructuredOutput/)|This sample demonstrates how to use structured output with a simple agent| |[Persisted conversations with a simple agent](./Agent_Step06_PersistedConversations/)|This sample demonstrates how to persist conversations and reload them later. This is useful for cases where an agent is hosted in a stateless service| @@ -39,14 +39,11 @@ Before you begin, ensure you have the following prerequisites: |[Exposing a simple agent as MCP tool](./Agent_Step10_AsMcpTool/)|This sample demonstrates how to expose an agent as an MCP tool| |[Using images with a simple agent](./Agent_Step11_UsingImages/)|This sample demonstrates how to use image multi-modality with an AI agent| |[Exposing a simple agent as a function tool](./Agent_Step12_AsFunctionTool/)|This sample demonstrates how to expose an agent as a function tool| -|[Using memory with an agent](./Agent_Step13_Memory/)|This sample demonstrates how to create a simple memory component and use it with an agent| +|[Background responses with tools and persistence](./Agent_Step13_BackgroundResponsesWithToolsAndPersistence/)|This sample demonstrates advanced background response scenarios including function calling during background operations and state persistence| |[Using middleware with an agent](./Agent_Step14_Middleware/)|This sample demonstrates how to use middleware with an agent| |[Using plugins with an agent](./Agent_Step15_Plugins/)|This sample demonstrates how to use plugins with an agent| |[Reducing chat history size](./Agent_Step16_ChatReduction/)|This sample demonstrates how to reduce the chat history to constrain its size, where chat history is maintained locally| |[Background responses](./Agent_Step17_BackgroundResponses/)|This sample demonstrates how to use background responses for long-running operations with polling and resumption support| -|[Adding RAG with text search](./Agent_Step18_TextSearchRag/)|This sample demonstrates how to enrich agent responses with retrieval augmented generation using the text search provider| -|[Using Mem0-backed memory](./Agent_Step19_Mem0Provider/)|This sample demonstrates how to use the Mem0Provider to persist and recall memories across conversations| -|[Background responses with tools and persistence](./Agent_Step20_BackgroundResponsesWithToolsAndPersistence/)|This sample demonstrates advanced background response scenarios including function calling during background operations and state persistence| ## Running the samples from the console diff --git a/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Program.cs b/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Program.cs index 68a3043f3f..0415f0e0e0 100644 --- a/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Program.cs +++ b/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Program.cs @@ -64,13 +64,14 @@ private static void Main(string[] args) return AgentWorkflowBuilder.BuildSequential(workflowName: key, agents: agents); }).AddAsAIAgent(); - if (builder.Environment.IsDevelopment()) - { - builder.AddDevUI(); - } + builder.Services.AddOpenAIResponses(); + builder.Services.AddOpenAIConversations(); var app = builder.Build(); + app.MapOpenAIResponses(); + app.MapOpenAIConversations(); + if (builder.Environment.IsDevelopment()) { app.MapDevUI(); diff --git a/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Properties/launchSettings.json b/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Properties/launchSettings.json new file mode 100644 index 0000000000..fd55d5d1f0 --- /dev/null +++ b/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/Properties/launchSettings.json @@ -0,0 +1,13 @@ +{ + "profiles": { + "DevUI_Step01_BasicUsage": { + "commandName": "Project", + "launchUrl": "devui", + "launchBrowser": true, + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + }, + "applicationUrl": "https://localhost:50516;http://localhost:50518" + } + } +} \ No newline at end of file diff --git a/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/README.md b/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/README.md index 2b6cc28644..0bf24dfb26 100644 --- a/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/README.md +++ b/dotnet/samples/GettingStarted/DevUI/DevUI_Step01_BasicUsage/README.md @@ -63,17 +63,23 @@ To add DevUI to your ASP.NET Core application: .AddAsAIAgent(); ``` -3. Add DevUI services and map the endpoint: +3. Add OpenAI services and map the endpoints for OpenAI and DevUI: ```csharp - builder.AddDevUI(); + // Register services for OpenAI responses and conversations (also required for DevUI) + builder.Services.AddOpenAIResponses(); + builder.Services.AddOpenAIConversations(); + var app = builder.Build(); - - app.MapDevUI(); - - // Add required endpoints - app.MapEntities(); + + // Map endpoints for OpenAI responses and conversations (also required for DevUI) app.MapOpenAIResponses(); app.MapOpenAIConversations(); + + if (builder.Environment.IsDevelopment()) + { + // Map DevUI endpoint to /devui + app.MapDevUI(); + } app.Run(); ``` diff --git a/dotnet/samples/GettingStarted/DevUI/README.md b/dotnet/samples/GettingStarted/DevUI/README.md index 155d3f2b9d..45b2f6f63b 100644 --- a/dotnet/samples/GettingStarted/DevUI/README.md +++ b/dotnet/samples/GettingStarted/DevUI/README.md @@ -38,19 +38,22 @@ builder.Services.AddChatClient(chatClient); // Register your agents builder.AddAIAgent("my-agent", "You are a helpful assistant."); -// Add DevUI services -builder.AddDevUI(); +// Register services for OpenAI responses and conversations (also required for DevUI) +builder.Services.AddOpenAIResponses(); +builder.Services.AddOpenAIConversations(); var app = builder.Build(); -// Map the DevUI endpoint -app.MapDevUI(); - -// Add required endpoints -app.MapEntities(); +// Map endpoints for OpenAI responses and conversations (also required for DevUI) app.MapOpenAIResponses(); app.MapOpenAIConversations(); +if (builder.Environment.IsDevelopment()) +{ + // Map DevUI endpoint to /devui + app.MapDevUI(); +} + app.Run(); ``` diff --git a/dotnet/samples/GettingStarted/README.md b/dotnet/samples/GettingStarted/README.md index e7249ac33d..4fdf0a3d7f 100644 --- a/dotnet/samples/GettingStarted/README.md +++ b/dotnet/samples/GettingStarted/README.md @@ -9,6 +9,8 @@ of the agent framework. |---|---| |[Agents](./Agents/README.md)|Step by step instructions for getting started with agents| |[Agent Providers](./AgentProviders/README.md)|Getting started with creating agents using various providers| +|[Agents With Retrieval Augmented Generation (RAG)](./AgentWithRAG/README.md)|Adding Retrieval Augmented Generation (RAG) capabilities to your agents.| +|[Agents With Memory](./AgentWithMemory/README.md)|Adding Memory capabilities to your agents.| |[A2A](./A2A/README.md)|Getting started with A2A (Agent-to-Agent) specific features| |[Agent Open Telemetry](./AgentOpenTelemetry/README.md)|Getting started with OpenTelemetry for agents| |[Agent With OpenAI exchange types](./AgentWithOpenAI/README.md)|Using OpenAI exchange types with agents| diff --git a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowFactory.cs b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowFactory.cs index 653ebdf4c2..e418ca7131 100644 --- a/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowFactory.cs +++ b/dotnet/samples/GettingStarted/Workflows/Agents/WorkflowAsAnAgent/WorkflowFactory.cs @@ -16,7 +16,7 @@ internal static class WorkflowFactory internal static Workflow BuildWorkflow(IChatClient chatClient) { // Create executors - var startExecutor = new ConcurrentStartExecutor(); + var startExecutor = new ChatForwardingExecutor("Start"); var aggregationExecutor = new ConcurrentAggregationExecutor(); AIAgent frenchAgent = GetLanguageAgent("French", chatClient); AIAgent englishAgent = GetLanguageAgent("English", chatClient); @@ -38,33 +38,11 @@ internal static Workflow BuildWorkflow(IChatClient chatClient) private static ChatClientAgent GetLanguageAgent(string targetLanguage, IChatClient chatClient) => new(chatClient, instructions: $"You're a helpful assistant who always responds in {targetLanguage}.", name: $"{targetLanguage}Agent"); - /// - /// Executor that starts the concurrent processing by sending messages to the agents. - /// - private sealed class ConcurrentStartExecutor() : Executor("ConcurrentStartExecutor") - { - protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) - { - return routeBuilder - .AddHandler>(this.RouteMessages) - .AddHandler(this.RouteTurnTokenAsync); - } - - private ValueTask RouteMessages(List messages, IWorkflowContext context, CancellationToken cancellationToken) - { - return context.SendMessageAsync(messages, cancellationToken: cancellationToken); - } - - private ValueTask RouteTurnTokenAsync(TurnToken token, IWorkflowContext context, CancellationToken cancellationToken) - { - return context.SendMessageAsync(token, cancellationToken: cancellationToken); - } - } - /// /// Executor that aggregates the results from the concurrent agents. /// - private sealed class ConcurrentAggregationExecutor() : Executor>("ConcurrentAggregationExecutor") + private sealed class ConcurrentAggregationExecutor() : + Executor>("ConcurrentAggregationExecutor"), IResettableExecutor { private readonly List _messages = []; @@ -85,5 +63,12 @@ public override async ValueTask HandleAsync(List message, IWorkflow await context.YieldOutputAsync(formattedMessages, cancellationToken); } } + + /// + public ValueTask ResetAsync() + { + this._messages.Clear(); + return default; + } } } diff --git a/dotnet/samples/GettingStarted/Workflows/Declarative/GenerateCode/Program.cs b/dotnet/samples/GettingStarted/Workflows/Declarative/GenerateCode/Program.cs index 859b74b194..54c77d4077 100644 --- a/dotnet/samples/GettingStarted/Workflows/Declarative/GenerateCode/Program.cs +++ b/dotnet/samples/GettingStarted/Workflows/Declarative/GenerateCode/Program.cs @@ -42,7 +42,7 @@ private void Execute() Console.WriteLine(code); } - private const string DefaultWorkflow = "HelloWorld.yaml"; + private const string DefaultWorkflow = "Marketing.yaml"; private string WorkflowFile { get; } diff --git a/dotnet/samples/GettingStarted/Workflows/Declarative/README.md b/dotnet/samples/GettingStarted/Workflows/Declarative/README.md index 03023ea847..d2bbaa14a6 100644 --- a/dotnet/samples/GettingStarted/Workflows/Declarative/README.md +++ b/dotnet/samples/GettingStarted/Workflows/Declarative/README.md @@ -92,11 +92,11 @@ The repository has example workflows available in the root [`/workflow-samples`] 2. Run the demo referencing a sample workflow by name: ```sh - dotnet run HelloWorld + dotnet run Marketing ``` 3. Run the demo with a path to any workflow file: ```sh - dotnet run c:/myworkflows/HelloWorld.yaml + dotnet run c:/myworkflows/Marketing.yaml ``` diff --git a/dotnet/samples/Catalog/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj similarity index 100% rename from dotnet/samples/Catalog/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj rename to dotnet/samples/HostedAgents/AgentWithTextSearchRag/AgentWithTextSearchRag.csproj diff --git a/dotnet/samples/Catalog/AgentWithTextSearchRag/Program.cs b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/Program.cs similarity index 100% rename from dotnet/samples/Catalog/AgentWithTextSearchRag/Program.cs rename to dotnet/samples/HostedAgents/AgentWithTextSearchRag/Program.cs diff --git a/dotnet/samples/Catalog/AgentWithTextSearchRag/README.md b/dotnet/samples/HostedAgents/AgentWithTextSearchRag/README.md similarity index 100% rename from dotnet/samples/Catalog/AgentWithTextSearchRag/README.md rename to dotnet/samples/HostedAgents/AgentWithTextSearchRag/README.md diff --git a/dotnet/samples/Catalog/AgentsInWorkflows/AgentsInWorkflows.csproj b/dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj similarity index 100% rename from dotnet/samples/Catalog/AgentsInWorkflows/AgentsInWorkflows.csproj rename to dotnet/samples/HostedAgents/AgentsInWorkflows/AgentsInWorkflows.csproj diff --git a/dotnet/samples/Catalog/AgentsInWorkflows/Program.cs b/dotnet/samples/HostedAgents/AgentsInWorkflows/Program.cs similarity index 100% rename from dotnet/samples/Catalog/AgentsInWorkflows/Program.cs rename to dotnet/samples/HostedAgents/AgentsInWorkflows/Program.cs diff --git a/dotnet/samples/Catalog/AgentsInWorkflows/README.md b/dotnet/samples/HostedAgents/AgentsInWorkflows/README.md similarity index 100% rename from dotnet/samples/Catalog/AgentsInWorkflows/README.md rename to dotnet/samples/HostedAgents/AgentsInWorkflows/README.md diff --git a/dotnet/samples/Catalog/DeepResearchAgent/DeepResearchAgent.csproj b/dotnet/samples/HostedAgents/DeepResearchAgent/DeepResearchAgent.csproj similarity index 100% rename from dotnet/samples/Catalog/DeepResearchAgent/DeepResearchAgent.csproj rename to dotnet/samples/HostedAgents/DeepResearchAgent/DeepResearchAgent.csproj diff --git a/dotnet/samples/Catalog/DeepResearchAgent/Program.cs b/dotnet/samples/HostedAgents/DeepResearchAgent/Program.cs similarity index 100% rename from dotnet/samples/Catalog/DeepResearchAgent/Program.cs rename to dotnet/samples/HostedAgents/DeepResearchAgent/Program.cs diff --git a/dotnet/samples/Catalog/DeepResearchAgent/README.md b/dotnet/samples/HostedAgents/DeepResearchAgent/README.md similarity index 100% rename from dotnet/samples/Catalog/DeepResearchAgent/README.md rename to dotnet/samples/HostedAgents/DeepResearchAgent/README.md diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/AGUIChatClient.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/AGUIChatClient.cs index 11894eb488..a168e2eab6 100644 --- a/dotnet/src/Microsoft.Agents.AI.AGUI/AGUIChatClient.cs +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/AGUIChatClient.cs @@ -4,6 +4,7 @@ using System.Collections.Generic; using System.Linq; using System.Net.Http; +using System.Net.Http.Headers; using System.Runtime.CompilerServices; using System.Text.Json; using System.Threading; @@ -152,6 +153,8 @@ private static ChatResponseUpdate CopyResponseUpdate(ChatResponseUpdate source) private sealed class AGUIChatClientHandler : IChatClient { + private static readonly MediaTypeHeaderValue s_json = new("application/json"); + private readonly AGUIHttpService _httpService; private readonly JsonSerializerOptions _jsonSerializerOptions; private readonly ILogger _logger; @@ -199,6 +202,9 @@ public async IAsyncEnumerable GetStreamingResponseAsync( var threadId = ExtractTemporaryThreadId(messagesList) ?? ExtractThreadIdFromOptions(options) ?? $"thread_{Guid.NewGuid():N}"; + // Extract state from the last message if it contains DataContent with application/json + JsonElement state = this.ExtractAndRemoveStateFromMessages(messagesList); + // Create the input for the AGUI service var input = new RunAgentInput { @@ -207,6 +213,7 @@ public async IAsyncEnumerable GetStreamingResponseAsync( ThreadId = threadId, RunId = runId, Messages = messagesList.AsAGUIMessages(this._jsonSerializerOptions), + State = state, }; // Add tools if provided @@ -300,6 +307,51 @@ public async IAsyncEnumerable GetStreamingResponseAsync( return threadId; } + // Extract state from the last message's DataContent with application/json media type + // and remove that message from the list + private JsonElement ExtractAndRemoveStateFromMessages(List messagesList) + { + if (messagesList.Count == 0) + { + return default; + } + + // Check the last message for state DataContent + ChatMessage lastMessage = messagesList[messagesList.Count - 1]; + for (int i = 0; i < lastMessage.Contents.Count; i++) + { + if (lastMessage.Contents[i] is DataContent dataContent && + MediaTypeHeaderValue.TryParse(dataContent.MediaType, out var mediaType) && + mediaType.Equals(s_json)) + { + // Deserialize the state JSON directly from UTF-8 bytes + try + { + JsonElement stateElement = (JsonElement)JsonSerializer.Deserialize( + dataContent.Data.Span, + this._jsonSerializerOptions.GetTypeInfo(typeof(JsonElement)))!; + + // Remove the DataContent from the message contents + lastMessage.Contents.RemoveAt(i); + + // If no contents remain, remove the entire message + if (lastMessage.Contents.Count == 0) + { + messagesList.RemoveAt(messagesList.Count - 1); + } + + return stateElement; + } + catch (JsonException ex) + { + throw new InvalidOperationException($"Failed to deserialize state JSON from DataContent: {ex.Message}", ex); + } + } + } + + return default; + } + public void Dispose() { // No resources to dispose @@ -316,7 +368,7 @@ public void Dispose() } } - private class ServerFunctionCallContent(FunctionCallContent functionCall) : AIContent + private sealed class ServerFunctionCallContent(FunctionCallContent functionCall) : AIContent { public FunctionCallContent FunctionCallContent { get; } = functionCall; } diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIEventTypes.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIEventTypes.cs index 731d8a8f42..1b8958cdf0 100644 --- a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIEventTypes.cs +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIEventTypes.cs @@ -27,4 +27,8 @@ internal static class AGUIEventTypes public const string ToolCallEnd = "TOOL_CALL_END"; public const string ToolCallResult = "TOOL_CALL_RESULT"; + + public const string StateSnapshot = "STATE_SNAPSHOT"; + + public const string StateDelta = "STATE_DELTA"; } diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIJsonSerializerContext.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIJsonSerializerContext.cs index 7c4338f0c9..0b571c4ff1 100644 --- a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIJsonSerializerContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/AGUIJsonSerializerContext.cs @@ -44,6 +44,8 @@ namespace Microsoft.Agents.AI.AGUI; [JsonSerializable(typeof(ToolCallArgsEvent))] [JsonSerializable(typeof(ToolCallEndEvent))] [JsonSerializable(typeof(ToolCallResultEvent))] +[JsonSerializable(typeof(StateSnapshotEvent))] +[JsonSerializable(typeof(StateDeltaEvent))] [JsonSerializable(typeof(IDictionary))] [JsonSerializable(typeof(Dictionary))] [JsonSerializable(typeof(IDictionary))] @@ -57,6 +59,6 @@ namespace Microsoft.Agents.AI.AGUI; [JsonSerializable(typeof(float))] [JsonSerializable(typeof(bool))] [JsonSerializable(typeof(decimal))] -internal partial class AGUIJsonSerializerContext : JsonSerializerContext +internal sealed partial class AGUIJsonSerializerContext : JsonSerializerContext { } diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/BaseEventJsonConverter.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/BaseEventJsonConverter.cs index af2414d7f0..eca2131f23 100644 --- a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/BaseEventJsonConverter.cs +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/BaseEventJsonConverter.cs @@ -46,6 +46,7 @@ public override BaseEvent Read( AGUIEventTypes.ToolCallArgs => jsonElement.Deserialize(options.GetTypeInfo(typeof(ToolCallArgsEvent))) as ToolCallArgsEvent, AGUIEventTypes.ToolCallEnd => jsonElement.Deserialize(options.GetTypeInfo(typeof(ToolCallEndEvent))) as ToolCallEndEvent, AGUIEventTypes.ToolCallResult => jsonElement.Deserialize(options.GetTypeInfo(typeof(ToolCallResultEvent))) as ToolCallResultEvent, + AGUIEventTypes.StateSnapshot => jsonElement.Deserialize(options.GetTypeInfo(typeof(StateSnapshotEvent))) as StateSnapshotEvent, _ => throw new JsonException($"Unknown BaseEvent type discriminator: '{discriminator}'") }; @@ -95,8 +96,14 @@ public override void Write( case ToolCallResultEvent toolCallResult: JsonSerializer.Serialize(writer, toolCallResult, options.GetTypeInfo(typeof(ToolCallResultEvent))); break; + case StateSnapshotEvent stateSnapshot: + JsonSerializer.Serialize(writer, stateSnapshot, options.GetTypeInfo(typeof(StateSnapshotEvent))); + break; + case StateDeltaEvent stateDelta: + JsonSerializer.Serialize(writer, stateDelta, options.GetTypeInfo(typeof(StateDeltaEvent))); + break; default: - throw new JsonException($"Unknown BaseEvent type: {value.GetType().Name}"); + throw new InvalidOperationException($"Unknown event type: {value.GetType().Name}"); } } } diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/ChatResponseUpdateAGUIExtensions.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/ChatResponseUpdateAGUIExtensions.cs index 9b865afabe..46184a6588 100644 --- a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/ChatResponseUpdateAGUIExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/ChatResponseUpdateAGUIExtensions.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; using System.Diagnostics; +using System.Net.Http.Headers; using System.Runtime.CompilerServices; using System.Text; using System.Text.Json; @@ -18,6 +19,9 @@ namespace Microsoft.Agents.AI.AGUI.Shared; internal static class ChatResponseUpdateAGUIExtensions { + private static readonly MediaTypeHeaderValue? s_jsonPatchMediaType = new("application/json-patch+json"); + private static readonly MediaTypeHeaderValue? s_json = new("application/json"); + public static async IAsyncEnumerable AsChatResponseUpdatesAsync( this IAsyncEnumerable events, JsonSerializerOptions jsonSerializerOptions, @@ -70,11 +74,73 @@ public static async IAsyncEnumerable AsChatResponseUpdatesAs case ToolCallResultEvent toolCallResult: yield return toolCallAccumulator.EmitToolCallResult(toolCallResult, jsonSerializerOptions); break; + + // State snapshot events + case StateSnapshotEvent stateSnapshot: + if (stateSnapshot.Snapshot.HasValue) + { + yield return CreateStateSnapshotUpdate(stateSnapshot, conversationId, responseId, jsonSerializerOptions); + } + break; + case StateDeltaEvent stateDelta: + if (stateDelta.Delta.HasValue) + { + yield return CreateStateDeltaUpdate(stateDelta, conversationId, responseId, jsonSerializerOptions); + } + break; } } } - private class TextMessageBuilder() + private static ChatResponseUpdate CreateStateSnapshotUpdate( + StateSnapshotEvent stateSnapshot, + string? conversationId, + string? responseId, + JsonSerializerOptions jsonSerializerOptions) + { + // Serialize JsonElement directly to UTF-8 bytes using AOT-safe overload + byte[] jsonBytes = JsonSerializer.SerializeToUtf8Bytes( + stateSnapshot.Snapshot!.Value, + jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))); + DataContent dataContent = new(jsonBytes, "application/json"); + + return new ChatResponseUpdate(ChatRole.Assistant, [dataContent]) + { + ConversationId = conversationId, + ResponseId = responseId, + CreatedAt = DateTimeOffset.UtcNow, + AdditionalProperties = new AdditionalPropertiesDictionary + { + ["is_state_snapshot"] = true + } + }; + } + + private static ChatResponseUpdate CreateStateDeltaUpdate( + StateDeltaEvent stateDelta, + string? conversationId, + string? responseId, + JsonSerializerOptions jsonSerializerOptions) + { + // Serialize JsonElement directly to UTF-8 bytes using AOT-safe overload + byte[] jsonBytes = JsonSerializer.SerializeToUtf8Bytes( + stateDelta.Delta!.Value, + jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))); + DataContent dataContent = new(jsonBytes, "application/json-patch+json"); + + return new ChatResponseUpdate(ChatRole.Assistant, [dataContent]) + { + ConversationId = conversationId, + ResponseId = responseId, + CreatedAt = DateTimeOffset.UtcNow, + AdditionalProperties = new AdditionalPropertiesDictionary + { + ["is_state_delta"] = true + } + }; + } + + private sealed class TextMessageBuilder() { private ChatRole _currentRole; private string? _currentMessageId; @@ -154,7 +220,7 @@ private static ChatResponseUpdate ValidateAndEmitRunFinished(string? conversatio }; } - private class ToolCallBuilder + private sealed class ToolCallBuilder { private string? _conversationId; private string? _responseId; @@ -348,6 +414,55 @@ chatResponse.Contents[0] is TextContent && Role = AGUIRoles.Tool }; } + else if (content is DataContent dataContent) + { + if (MediaTypeHeaderValue.TryParse(dataContent.MediaType, out var mediaType) && mediaType.Equals(s_json)) + { + // State snapshot event + yield return new StateSnapshotEvent + { +#if NET472 || NETSTANDARD2_0 + Snapshot = (JsonElement?)JsonSerializer.Deserialize( + dataContent.Data.ToArray(), + jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))) +#else + Snapshot = (JsonElement?)JsonSerializer.Deserialize( + dataContent.Data.Span, + jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))) +#endif + }; + } + else if (mediaType is { } && mediaType.Equals(s_jsonPatchMediaType)) + { + // State snapshot patch event must be a valid JSON patch, + // but its not up to us to validate that here. + yield return new StateDeltaEvent + { +#if NET472 || NETSTANDARD2_0 + Delta = (JsonElement?)JsonSerializer.Deserialize( + dataContent.Data.ToArray(), + jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))) +#else + Delta = (JsonElement?)JsonSerializer.Deserialize( + dataContent.Data.Span, + jsonSerializerOptions.GetTypeInfo(typeof(JsonElement))) +#endif + }; + } + else + { + // Text content event + yield return new TextMessageContentEvent + { + MessageId = chatResponse.MessageId!, +#if NET472 || NETSTANDARD2_0 + Delta = Encoding.UTF8.GetString(dataContent.Data.ToArray()) +#else + Delta = Encoding.UTF8.GetString(dataContent.Data.Span) +#endif + }; + } + } } } } diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/StateDeltaEvent.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/StateDeltaEvent.cs new file mode 100644 index 0000000000..98d3b168b3 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/StateDeltaEvent.cs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Serialization; + +#if ASPNETCORE +namespace Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.Shared; +#else +namespace Microsoft.Agents.AI.AGUI.Shared; +#endif + +internal sealed class StateDeltaEvent : BaseEvent +{ + public StateDeltaEvent() + { + this.Type = AGUIEventTypes.StateDelta; + } + + [JsonPropertyName("delta")] + public JsonElement? Delta { get; set; } +} diff --git a/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/StateSnapshotEvent.cs b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/StateSnapshotEvent.cs new file mode 100644 index 0000000000..dc77e4ba46 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.AGUI/Shared/StateSnapshotEvent.cs @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json; +using System.Text.Json.Serialization; + +#if ASPNETCORE +namespace Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.Shared; +#else +namespace Microsoft.Agents.AI.AGUI.Shared; +#endif + +internal sealed class StateSnapshotEvent : BaseEvent +{ + public StateSnapshotEvent() + { + this.Type = AGUIEventTypes.StateSnapshot; + } + + [JsonPropertyName("snapshot")] + public JsonElement? Snapshot { get; set; } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunOptions.cs b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunOptions.cs index c6a64915cf..7262979207 100644 --- a/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunOptions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Abstractions/AgentRunOptions.cs @@ -1,6 +1,7 @@ // Copyright (c) Microsoft. All rights reserved. using System; +using Microsoft.Extensions.AI; using Microsoft.Shared.Diagnostics; namespace Microsoft.Agents.AI; @@ -32,6 +33,7 @@ public AgentRunOptions(AgentRunOptions options) _ = Throw.IfNull(options); this.ContinuationToken = options.ContinuationToken; this.AllowBackgroundResponses = options.AllowBackgroundResponses; + this.AdditionalProperties = options.AdditionalProperties?.Clone(); } /// @@ -74,4 +76,18 @@ public AgentRunOptions(AgentRunOptions options) /// /// public bool? AllowBackgroundResponses { get; set; } + + /// + /// Gets or sets additional properties associated with these options. + /// + /// + /// An containing custom properties, + /// or if no additional properties are present. + /// + /// + /// Additional properties provide a way to include custom metadata or provider-specific + /// information that doesn't fit into the standard options schema. This is useful for + /// preserving implementation-specific details or extending the options with custom data. + /// + public AdditionalPropertiesDictionary? AdditionalProperties { get; set; } } diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIExtensions.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIExtensions.cs index 4a85de121a..8d5159cab7 100644 --- a/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/DevUIExtensions.cs @@ -9,32 +9,31 @@ namespace Microsoft.Agents.AI.DevUI; /// public static class DevUIExtensions { - /// - /// Adds the necessary services for the DevUI to the application builder. - /// - public static IHostApplicationBuilder AddDevUI(this IHostApplicationBuilder builder) - { - ArgumentNullException.ThrowIfNull(builder); - builder.Services.AddOpenAIConversations(); - builder.Services.AddOpenAIResponses(); - - return builder; - } - /// /// Maps an endpoint that serves the DevUI from the '/devui' path. /// + /// + /// DevUI requires the OpenAI Responses and Conversations services to be registered with + /// and + /// , + /// and the corresponding endpoints to be mapped using + /// and + /// . + /// /// The to add the endpoint to. /// A that can be used to add authorization or other endpoint configuration. + /// + /// + /// + /// /// Thrown when is null. public static IEndpointConventionBuilder MapDevUI( this IEndpointRouteBuilder endpoints) { var group = endpoints.MapGroup(""); group.MapDevUI(pattern: "/devui"); + group.MapMeta(); group.MapEntities(); - group.MapOpenAIConversations(); - group.MapOpenAIResponses(); return group; } diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/EntitiesJsonContext.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/EntitiesJsonContext.cs index fc8bbe3864..3acc8d48d3 100644 --- a/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/EntitiesJsonContext.cs +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/EntitiesJsonContext.cs @@ -15,10 +15,12 @@ namespace Microsoft.Agents.AI.DevUI.Entities; DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull)] [JsonSerializable(typeof(EntityInfo))] [JsonSerializable(typeof(DiscoveryResponse))] +[JsonSerializable(typeof(MetaResponse))] [JsonSerializable(typeof(EnvVarRequirement))] [JsonSerializable(typeof(List))] [JsonSerializable(typeof(List))] [JsonSerializable(typeof(Dictionary))] +[JsonSerializable(typeof(Dictionary))] [JsonSerializable(typeof(JsonElement))] [ExcludeFromCodeCoverage] internal sealed partial class EntitiesJsonContext : JsonSerializerContext; diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/MetaResponse.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/MetaResponse.cs new file mode 100644 index 0000000000..6e1260cdc7 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/Entities/MetaResponse.cs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Text.Json.Serialization; + +namespace Microsoft.Agents.AI.DevUI.Entities; + +/// +/// Server metadata response for the /meta endpoint. +/// Provides information about the DevUI server configuration, capabilities, and requirements. +/// +/// +/// This response is used by the frontend to: +/// - Determine the UI mode (developer vs user interface) +/// - Check server capabilities (tracing, OpenAI proxy support) +/// - Verify authentication requirements +/// - Display framework and version information +/// +internal sealed record MetaResponse +{ + /// + /// Gets the UI interface mode. + /// "developer" shows debug tools and advanced features, "user" shows a simplified interface. + /// + [JsonPropertyName("ui_mode")] + public string UiMode { get; init; } = "developer"; + + /// + /// Gets the DevUI version string. + /// + [JsonPropertyName("version")] + public string Version { get; init; } = "0.1.0"; + + /// + /// Gets the backend framework identifier. + /// Always "agent_framework" for Agent Framework implementations. + /// + [JsonPropertyName("framework")] + public string Framework { get; init; } = "agent_framework"; + + /// + /// Gets the backend runtime/language. + /// "dotnet" for .NET implementations, "python" for Python implementations. + /// Used by frontend for deployment guides and feature availability. + /// + [JsonPropertyName("runtime")] + public string Runtime { get; init; } = "dotnet"; + + /// + /// Gets the server capabilities dictionary. + /// Key-value pairs indicating which optional features are enabled. + /// + /// + /// Standard capability keys: + /// - "tracing": Whether trace events are emitted for debugging + /// - "openai_proxy": Whether the server can proxy requests to OpenAI + /// + [JsonPropertyName("capabilities")] + public Dictionary Capabilities { get; init; } = new(); + + /// + /// Gets a value indicating whether Bearer token authentication is required for API access. + /// When true, clients must include "Authorization: Bearer {token}" header in requests. + /// + [JsonPropertyName("auth_required")] + public bool AuthRequired { get; init; } +} diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs index 716dab8542..eb41fe90b8 100644 --- a/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/EntitiesApiExtensions.cs @@ -1,9 +1,11 @@ // Copyright (c) Microsoft. All rights reserved. +using System.Runtime.CompilerServices; using System.Text.Json; using Microsoft.Agents.AI.DevUI.Entities; using Microsoft.Agents.AI.Hosting; +using Microsoft.Agents.AI.Workflows; namespace Microsoft.Agents.AI.DevUI; @@ -56,79 +58,19 @@ private static async Task ListEntitiesAsync( { var entities = new List(); - // Discover agents from the agent catalog - if (agentCatalog is not null) + // Discover agents + await foreach (var agentInfo in DiscoverAgentsAsync(agentCatalog, entityIdFilter: null, cancellationToken).ConfigureAwait(false)) { - await foreach (var agent in agentCatalog.GetAgentsAsync(cancellationToken).ConfigureAwait(false)) - { - if (agent.GetType().Name == "WorkflowHostAgent") - { - // HACK: ignore WorkflowHostAgent instances as they are just wrappers around workflows, - // and workflows are handled below. - continue; - } - - entities.Add(new EntityInfo( - Id: agent.Name ?? agent.Id, - Type: "agent", - Name: agent.Name ?? agent.Id, - Description: agent.Description, - Framework: "agent-framework", - Tools: null, - Metadata: [] - ) - { - Source = "in_memory" - }); - } + entities.Add(agentInfo); } - // Discover workflows from the workflow catalog - if (workflowCatalog is not null) + // Discover workflows + await foreach (var workflowInfo in DiscoverWorkflowsAsync(workflowCatalog, entityIdFilter: null, cancellationToken).ConfigureAwait(false)) { - await foreach (var workflow in workflowCatalog.GetWorkflowsAsync(cancellationToken).ConfigureAwait(false)) - { - // Extract executor IDs from the workflow structure - var executorIds = new HashSet { workflow.StartExecutorId }; - var reflectedEdges = workflow.ReflectEdges(); - foreach (var (sourceId, edgeSet) in reflectedEdges) - { - executorIds.Add(sourceId); - foreach (var edge in edgeSet) - { - foreach (var sinkId in edge.Connection.SinkIds) - { - executorIds.Add(sinkId); - } - } - } - - // Create a default input schema (string type) - var defaultInputSchema = new Dictionary - { - ["type"] = "string" - }; - - entities.Add(new EntityInfo( - Id: workflow.Name ?? workflow.StartExecutorId, - Type: "workflow", - Name: workflow.Name ?? workflow.StartExecutorId, - Description: workflow.Description, - Framework: "agent-framework", - Tools: [.. executorIds], - Metadata: [] - ) - { - Source = "in_memory", - WorkflowDump = JsonSerializer.SerializeToElement(workflow.ToDevUIDict()), - InputSchema = JsonSerializer.SerializeToElement(defaultInputSchema), - InputTypeName = "string", - StartExecutorId = workflow.StartExecutorId - }); - } + entities.Add(workflowInfo); } - return Results.Json(new DiscoveryResponse(entities), EntitiesJsonContext.Default.DiscoveryResponse); + return Results.Json(new DiscoveryResponse([.. entities]), EntitiesJsonContext.Default.DiscoveryResponse); } catch (Exception ex) { @@ -141,93 +83,26 @@ private static async Task ListEntitiesAsync( private static async Task GetEntityInfoAsync( string entityId, + string? type, AgentCatalog? agentCatalog, WorkflowCatalog? workflowCatalog, CancellationToken cancellationToken) { try { - // Try to find the entity among discovered agents - if (agentCatalog is not null) + if (type is null || string.Equals(type, "agent", StringComparison.OrdinalIgnoreCase)) { - await foreach (var agent in agentCatalog.GetAgentsAsync(cancellationToken).ConfigureAwait(false)) + await foreach (var agentInfo in DiscoverAgentsAsync(agentCatalog, entityId, cancellationToken).ConfigureAwait(false)) { - if (agent.GetType().Name == "WorkflowHostAgent") - { - // HACK: ignore WorkflowHostAgent instances as they are just wrappers around workflows, - // and workflows are handled below. - continue; - } - - if (string.Equals(agent.Name, entityId, StringComparison.OrdinalIgnoreCase) || - string.Equals(agent.Id, entityId, StringComparison.OrdinalIgnoreCase)) - { - var entityInfo = new EntityInfo( - Id: agent.Name ?? agent.Id, - Type: "agent", - Name: agent.Name ?? agent.Id, - Description: agent.Description, - Framework: "agent-framework", - Tools: null, - Metadata: [] - ) - { - Source = "in_memory" - }; - - return Results.Json(entityInfo, EntitiesJsonContext.Default.EntityInfo); - } + return Results.Json(agentInfo, EntitiesJsonContext.Default.EntityInfo); } } - // Try to find the entity among discovered workflows - if (workflowCatalog is not null) + if (type is null || string.Equals(type, "workflow", StringComparison.OrdinalIgnoreCase)) { - await foreach (var workflow in workflowCatalog.GetWorkflowsAsync(cancellationToken).ConfigureAwait(false)) + await foreach (var workflowInfo in DiscoverWorkflowsAsync(workflowCatalog, entityId, cancellationToken).ConfigureAwait(false)) { - var workflowId = workflow.Name ?? workflow.StartExecutorId; - if (string.Equals(workflowId, entityId, StringComparison.OrdinalIgnoreCase)) - { - // Extract executor IDs from the workflow structure - var executorIds = new HashSet { workflow.StartExecutorId }; - var reflectedEdges = workflow.ReflectEdges(); - foreach (var (sourceId, edgeSet) in reflectedEdges) - { - executorIds.Add(sourceId); - foreach (var edge in edgeSet) - { - foreach (var sinkId in edge.Connection.SinkIds) - { - executorIds.Add(sinkId); - } - } - } - - // Create a default input schema (string type) - var defaultInputSchema = new Dictionary - { - ["type"] = "string" - }; - - var entityInfo = new EntityInfo( - Id: workflowId, - Type: "workflow", - Name: workflow.Name ?? workflow.StartExecutorId, - Description: workflow.Description, - Framework: "agent-framework", - Tools: [.. executorIds], - Metadata: [] - ) - { - Source = "in_memory", - WorkflowDump = JsonSerializer.SerializeToElement(workflow.ToDevUIDict()), - InputSchema = JsonSerializer.SerializeToElement(defaultInputSchema), - InputTypeName = "Input", - StartExecutorId = workflow.StartExecutorId - }; - - return Results.Json(entityInfo, EntitiesJsonContext.Default.EntityInfo); - } + return Results.Json(workflowInfo, EntitiesJsonContext.Default.EntityInfo); } } @@ -241,4 +116,123 @@ private static async Task GetEntityInfoAsync( title: "Error getting entity info"); } } + + private static async IAsyncEnumerable DiscoverAgentsAsync( + AgentCatalog? agentCatalog, + string? entityIdFilter, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + if (agentCatalog is null) + { + yield break; + } + + await foreach (var agent in agentCatalog.GetAgentsAsync(cancellationToken).ConfigureAwait(false)) + { + // If filtering by entity ID, skip non-matching agents + if (entityIdFilter is not null && + !string.Equals(agent.Name, entityIdFilter, StringComparison.OrdinalIgnoreCase) && + !string.Equals(agent.Id, entityIdFilter, StringComparison.OrdinalIgnoreCase)) + { + continue; + } + + yield return CreateAgentEntityInfo(agent); + + // If we found the entity we're looking for, we're done + if (entityIdFilter is not null) + { + yield break; + } + } + } + + private static async IAsyncEnumerable DiscoverWorkflowsAsync( + WorkflowCatalog? workflowCatalog, + string? entityIdFilter, + [EnumeratorCancellation] CancellationToken cancellationToken) + { + if (workflowCatalog is null) + { + yield break; + } + + await foreach (var workflow in workflowCatalog.GetWorkflowsAsync(cancellationToken).ConfigureAwait(false)) + { + var workflowId = workflow.Name ?? workflow.StartExecutorId; + + // If filtering by entity ID, skip non-matching workflows + if (entityIdFilter is not null && !string.Equals(workflowId, entityIdFilter, StringComparison.OrdinalIgnoreCase)) + { + continue; + } + + yield return CreateWorkflowEntityInfo(workflow); + + // If we found the entity we're looking for, we're done + if (entityIdFilter is not null) + { + yield break; + } + } + } + + private static EntityInfo CreateAgentEntityInfo(AIAgent agent) + { + var entityId = agent.Name ?? agent.Id; + return new EntityInfo( + Id: entityId, + Type: "agent", + Name: entityId, + Description: agent.Description, + Framework: "agent-framework", + Tools: null, + Metadata: [] + ) + { + Source = "in_memory" + }; + } + + private static EntityInfo CreateWorkflowEntityInfo(Workflow workflow) + { + // Extract executor IDs from the workflow structure + var executorIds = new HashSet { workflow.StartExecutorId }; + var reflectedEdges = workflow.ReflectEdges(); + foreach (var (sourceId, edgeSet) in reflectedEdges) + { + executorIds.Add(sourceId); + foreach (var edge in edgeSet) + { + foreach (var sinkId in edge.Connection.SinkIds) + { + executorIds.Add(sinkId); + } + } + } + + // Create a default input schema (string type) + var defaultInputSchema = new Dictionary + { + ["type"] = "string" + }; + + var workflowId = workflow.Name ?? workflow.StartExecutorId; + return new EntityInfo( + Id: workflowId, + Type: "workflow", + Name: workflowId, + Description: workflow.Description, + Framework: "agent-framework", + Tools: [.. executorIds], + Metadata: [] + ) + { + Source = "in_memory", + WorkflowDump = JsonSerializer.SerializeToElement(workflow.ToDevUIDict()), + InputSchema = JsonSerializer.SerializeToElement(defaultInputSchema), + InputTypeName = "string", + StartExecutorId = workflow.StartExecutorId + }; + } } diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/MetaApiExtensions.cs b/dotnet/src/Microsoft.Agents.AI.DevUI/MetaApiExtensions.cs new file mode 100644 index 0000000000..4a3cfbb8f0 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/MetaApiExtensions.cs @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft. All rights reserved. + +using Microsoft.Agents.AI.DevUI.Entities; + +namespace Microsoft.Agents.AI.DevUI; + +/// +/// Provides extension methods for mapping the server metadata endpoint to an . +/// +internal static class MetaApiExtensions +{ + /// + /// Maps the HTTP API endpoint for retrieving server metadata. + /// + /// The to add the route to. + /// The for method chaining. + /// + /// This extension method registers the following endpoint: + /// + /// GET /meta - Retrieve server metadata including UI mode, version, capabilities, and auth requirements + /// + /// The endpoint is compatible with the Python DevUI frontend and provides essential + /// configuration information needed for proper frontend initialization. + /// + public static IEndpointConventionBuilder MapMeta(this IEndpointRouteBuilder endpoints) + { + return endpoints.MapGet("/meta", GetMeta) + .WithName("GetMeta") + .WithSummary("Get server metadata and configuration") + .WithDescription("Returns server metadata including UI mode, version, framework identifier, capabilities, and authentication requirements. Used by the frontend for initialization and feature detection.") + .Produces(StatusCodes.Status200OK, contentType: "application/json"); + } + + private static IResult GetMeta() + { + // TODO: Consider making these configurable via IOptions + // For now, using sensible defaults that match Python DevUI behavior + + var meta = new MetaResponse + { + UiMode = "developer", // Could be made configurable to support "user" mode + Version = "0.1.0", // TODO: Extract from assembly version attribute + Framework = "agent_framework", + Runtime = "dotnet", // .NET runtime for deployment guides + Capabilities = new Dictionary + { + // Tracing capability - will be enabled when trace event support is added + ["tracing"] = false, + + // OpenAI proxy capability - not currently supported in .NET DevUI + ["openai_proxy"] = false, + + // Deployment capability - not currently supported in .NET DevUI + ["deployment"] = false + }, + AuthRequired = false // Could be made configurable based on authentication middleware + }; + + return Results.Json(meta, EntitiesJsonContext.Default.MetaResponse); + } +} diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/Microsoft.Agents.AI.DevUI.Frontend.targets b/dotnet/src/Microsoft.Agents.AI.DevUI/Microsoft.Agents.AI.DevUI.Frontend.targets index 7f3af7decb..c8bdc5dbdf 100644 --- a/dotnet/src/Microsoft.Agents.AI.DevUI/Microsoft.Agents.AI.DevUI.Frontend.targets +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/Microsoft.Agents.AI.DevUI.Frontend.targets @@ -8,11 +8,6 @@ $(FrontendRoot)\node_modules - - - - - @@ -27,18 +22,6 @@ - - - $(BaseIntermediateOutputPath)\frontend.build.marker - - - - - - - - - @@ -47,7 +30,7 @@ - + diff --git a/dotnet/src/Microsoft.Agents.AI.DevUI/README.md b/dotnet/src/Microsoft.Agents.AI.DevUI/README.md index 1f106e29ef..b55869748d 100644 --- a/dotnet/src/Microsoft.Agents.AI.DevUI/README.md +++ b/dotnet/src/Microsoft.Agents.AI.DevUI/README.md @@ -24,14 +24,16 @@ var builder = WebApplication.CreateBuilder(args); // Register your agents builder.AddAIAgent("assistant", "You are a helpful assistant."); -if (builder.Environment.IsDevelopment()) -{ - // Add DevUI services - builder.AddDevUI(); -} +// Register services for OpenAI responses and conversations (also required for DevUI) +builder.Services.AddOpenAIResponses(); +builder.Services.AddOpenAIConversations(); var app = builder.Build(); +// Map endpoints for OpenAI responses and conversations (also required for DevUI) +app.MapOpenAIResponses(); +app.MapOpenAIConversations(); + if (builder.Environment.IsDevelopment()) { // Map DevUI endpoint to /devui diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs index 43376d8fb2..c54af66bb8 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.A2A/AIAgentExtensions.cs @@ -83,7 +83,16 @@ public static ITaskManager MapA2A( { // A2A SDK assigns the url on its own // we can help user if they did not set Url explicitly. - agentCard.Url ??= context; + if (string.IsNullOrEmpty(agentCard.Url)) + { + var agentCardUrl = context.TrimEnd('/'); + if (!context.EndsWith("/v1/card", StringComparison.Ordinal)) + { + agentCardUrl += "/v1/card"; + } + + agentCard.Url = agentCardUrl; + } return Task.FromResult(agentCard); }; diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore/AGUIEndpointRouteBuilderExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore/AGUIEndpointRouteBuilderExtensions.cs index 6e356f531d..e20d1ab448 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore/AGUIEndpointRouteBuilderExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore/AGUIEndpointRouteBuilderExtensions.cs @@ -44,22 +44,27 @@ public static IEndpointConventionBuilder MapAGUI( var jsonSerializerOptions = jsonOptions.Value.SerializerOptions; var messages = input.Messages.AsChatMessages(jsonSerializerOptions); - var agent = aiAgent; + var clientTools = input.Tools?.AsAITools().ToList(); - ChatClientAgentRunOptions? runOptions = null; - List? clientTools = input.Tools?.AsAITools().ToList(); - if (clientTools?.Count > 0) + // Create run options with AG-UI context in AdditionalProperties + var runOptions = new ChatClientAgentRunOptions { - runOptions = new ChatClientAgentRunOptions + ChatOptions = new ChatOptions { - ChatOptions = new ChatOptions + Tools = clientTools, + AdditionalProperties = new AdditionalPropertiesDictionary { - Tools = clientTools + ["ag_ui_state"] = input.State, + ["ag_ui_context"] = input.Context?.Select(c => new KeyValuePair(c.Description, c.Value)).ToArray(), + ["ag_ui_forwarded_properties"] = input.ForwardedProperties, + ["ag_ui_thread_id"] = input.ThreadId, + ["ag_ui_run_id"] = input.RunId } - }; - } + } + }; - var events = agent.RunStreamingAsync( + // Run the agent and convert to AG-UI events + var events = aiAgent.RunStreamingAsync( messages, options: runOptions, cancellationToken: cancellationToken) diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/ChatCompletions/Models/Tool.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/ChatCompletions/Models/Tool.cs index 470f7d15b0..87b0637b9b 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/ChatCompletions/Models/Tool.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/ChatCompletions/Models/Tool.cs @@ -18,7 +18,7 @@ internal abstract record Tool /// /// The type of the tool. /// - [JsonPropertyName("type")] + [JsonIgnore] public abstract string Type { get; } } @@ -30,7 +30,7 @@ internal sealed record FunctionTool : Tool /// /// The type of the tool. Always "function". /// - [JsonPropertyName("type")] + [JsonIgnore] public override string Type => "function"; /// @@ -88,7 +88,7 @@ internal sealed record CustomTool : Tool /// /// The type of the tool. Always "custom". /// - [JsonPropertyName("type")] + [JsonIgnore] public override string Type => "custom"; /// diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/OpenAIHostingJsonUtilities.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/OpenAIHostingJsonUtilities.cs index 49ceef622a..f77143c583 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/OpenAIHostingJsonUtilities.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/OpenAIHostingJsonUtilities.cs @@ -109,6 +109,7 @@ private static JsonSerializerOptions CreateDefaultOptions() [JsonSerializable(typeof(MCPApprovalRequestItemResource))] [JsonSerializable(typeof(MCPApprovalResponseItemResource))] [JsonSerializable(typeof(MCPCallItemResource))] +[JsonSerializable(typeof(ExecutorActionItemResource))] [JsonSerializable(typeof(List))] // ItemParam types [JsonSerializable(typeof(ItemParam))] diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AIAgentResponseExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AIAgentResponseExecutor.cs index 18863034bf..8b909651b9 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AIAgentResponseExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AIAgentResponseExecutor.cs @@ -24,6 +24,10 @@ public AIAgentResponseExecutor(AIAgent agent) this._agent = agent; } + public ValueTask ValidateRequestAsync( + CreateResponse request, + CancellationToken cancellationToken = default) => ValueTask.FromResult(null); + public async IAsyncEnumerable ExecuteAsync( AgentInvocationContext context, CreateResponse request, diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseExtensions.cs index fedaeae1f4..97dcf9740f 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseExtensions.cs @@ -56,7 +56,7 @@ public static Response ToResponse( MaxOutputTokens = request.MaxOutputTokens, MaxToolCalls = request.MaxToolCalls, Metadata = request.Metadata is IReadOnlyDictionary metadata ? new Dictionary(metadata) : [], - Model = request.Agent?.Name ?? request.Model, + Model = request.Model, Output = output, ParallelToolCalls = request.ParallelToolCalls ?? true, PreviousResponseId = request.PreviousResponseId, @@ -64,7 +64,7 @@ public static Response ToResponse( PromptCacheKey = request.PromptCacheKey, Reasoning = request.Reasoning, SafetyIdentifier = request.SafetyIdentifier, - ServiceTier = request.ServiceTier ?? "default", + ServiceTier = request.ServiceTier, Status = ResponseStatus.Completed, Store = request.Store ?? true, Temperature = request.Temperature ?? 1.0, diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseUpdateExtensions.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseUpdateExtensions.cs index 252cdc8d92..628b80b340 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseUpdateExtensions.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/AgentRunResponseUpdateExtensions.cs @@ -45,6 +45,9 @@ public static async IAsyncEnumerable ToStreamingResponse var updateEnumerator = updates.GetAsyncEnumerator(cancellationToken); await using var _ = updateEnumerator.ConfigureAwait(false); + // Track active item IDs by executor ID to pair invoked/completed/failed events + Dictionary executorItemIds = []; + AgentRunResponseUpdate? previousUpdate = null; StreamingEventGenerator? generator = null; while (await updateEnumerator.MoveNextAsync().ConfigureAwait(false)) @@ -55,7 +58,92 @@ public static async IAsyncEnumerable ToStreamingResponse // Special-case for agent framework workflow events. if (update.RawRepresentation is WorkflowEvent workflowEvent) { - yield return CreateWorkflowEventResponse(workflowEvent, seq.Increment(), outputIndex); + // Convert executor events to standard OpenAI output_item events + if (workflowEvent is ExecutorInvokedEvent invokedEvent) + { + var itemId = IdGenerator.NewId(prefix: "item"); + // Store the item ID for this executor so we can reuse it for completion/failure + executorItemIds[invokedEvent.ExecutorId] = itemId; + + var item = new ExecutorActionItemResource + { + Id = itemId, + ExecutorId = invokedEvent.ExecutorId, + Status = "in_progress", + CreatedAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + }; + + yield return new StreamingOutputItemAdded + { + SequenceNumber = seq.Increment(), + OutputIndex = outputIndex, + Item = item + }; + } + else if (workflowEvent is ExecutorCompletedEvent completedEvent) + { + // Reuse the item ID from the invoked event, or generate a new one if not found + var itemId = executorItemIds.TryGetValue(completedEvent.ExecutorId, out var existingId) + ? existingId + : IdGenerator.NewId(prefix: "item"); + + // Remove from tracking as this executor run is now complete + executorItemIds.Remove(completedEvent.ExecutorId); + JsonElement? resultData = null; + if (completedEvent.Data != null && JsonSerializer.IsReflectionEnabledByDefault) + { + resultData = JsonSerializer.SerializeToElement( + completedEvent.Data, + OpenAIHostingJsonUtilities.DefaultOptions.GetTypeInfo(typeof(object))); + } + + var item = new ExecutorActionItemResource + { + Id = itemId, + ExecutorId = completedEvent.ExecutorId, + Status = "completed", + Result = resultData, + CreatedAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + }; + + yield return new StreamingOutputItemDone + { + SequenceNumber = seq.Increment(), + OutputIndex = outputIndex, + Item = item + }; + } + else if (workflowEvent is ExecutorFailedEvent failedEvent) + { + // Reuse the item ID from the invoked event, or generate a new one if not found + var itemId = executorItemIds.TryGetValue(failedEvent.ExecutorId, out var existingId) + ? existingId + : IdGenerator.NewId(prefix: "item"); + + // Remove from tracking as this executor run has now failed + executorItemIds.Remove(failedEvent.ExecutorId); + + var item = new ExecutorActionItemResource + { + Id = itemId, + ExecutorId = failedEvent.ExecutorId, + Status = "failed", + Error = failedEvent.Data?.ToString(), + CreatedAt = DateTimeOffset.UtcNow.ToUnixTimeSeconds() + }; + + yield return new StreamingOutputItemDone + { + SequenceNumber = seq.Increment(), + OutputIndex = outputIndex, + Item = item + }; + } + else + { + // For other workflow events (not executor-specific), keep the old format as fallback + yield return CreateWorkflowEventResponse(workflowEvent, seq.Increment(), outputIndex); + } continue; } @@ -165,7 +253,7 @@ Response CreateResponse(ResponseStatus status = ResponseStatus.Completed, IEnume MaxOutputTokens = request.MaxOutputTokens, MaxToolCalls = request.MaxToolCalls, Metadata = request.Metadata != null ? new Dictionary(request.Metadata) : [], - Model = request.Agent?.Name ?? request.Model, + Model = request.Model, Output = outputs?.ToList() ?? [], ParallelToolCalls = request.ParallelToolCalls ?? true, PreviousResponseId = request.PreviousResponseId, diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Converters/ItemResourceConverter.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Converters/ItemResourceConverter.cs index 571e45fa1f..0ca5c05d9b 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Converters/ItemResourceConverter.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Converters/ItemResourceConverter.cs @@ -45,6 +45,7 @@ internal sealed class ItemResourceConverter : JsonConverter MCPApprovalRequestItemResource.ItemType => doc.Deserialize(OpenAIHostingJsonContext.Default.MCPApprovalRequestItemResource), MCPApprovalResponseItemResource.ItemType => doc.Deserialize(OpenAIHostingJsonContext.Default.MCPApprovalResponseItemResource), MCPCallItemResource.ItemType => doc.Deserialize(OpenAIHostingJsonContext.Default.MCPCallItemResource), + ExecutorActionItemResource.ItemType => doc.Deserialize(OpenAIHostingJsonContext.Default.ExecutorActionItemResource), _ => null }; } @@ -106,6 +107,9 @@ public override void Write(Utf8JsonWriter writer, ItemResource value, JsonSerial case MCPCallItemResource mcpCall: JsonSerializer.Serialize(writer, mcpCall, OpenAIHostingJsonContext.Default.MCPCallItemResource); break; + case ExecutorActionItemResource executorAction: + JsonSerializer.Serialize(writer, executorAction, OpenAIHostingJsonContext.Default.ExecutorActionItemResource); + break; default: throw new JsonException($"Unknown item type: {value.GetType().Name}"); } diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/HostedAgentResponseExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/HostedAgentResponseExecutor.cs index 78e4331b6b..f90e47b070 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/HostedAgentResponseExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/HostedAgentResponseExecutor.cs @@ -13,8 +13,9 @@ namespace Microsoft.Agents.AI.Hosting.OpenAI.Responses; /// -/// Response executor that routes requests to hosted AIAgent services based on the model or agent.name parameter. +/// Response executor that routes requests to hosted AIAgent services based on agent.name or metadata["entity_id"]. /// This executor resolves agents from keyed services registered via AddAIAgent(). +/// The model field is reserved for actual model names and is never used for entity/agent identification. /// internal sealed class HostedAgentResponseExecutor : IResponseExecutor { @@ -37,16 +38,46 @@ public HostedAgentResponseExecutor( this._logger = logger; } + /// + public ValueTask ValidateRequestAsync( + CreateResponse request, + CancellationToken cancellationToken = default) + { + // Extract agent name from agent.name or model parameter + string? agentName = GetAgentName(request); + + if (string.IsNullOrEmpty(agentName)) + { + return ValueTask.FromResult(new ResponseError + { + Code = "missing_required_parameter", + Message = "No 'agent.name' or 'metadata[\"entity_id\"]' specified in the request." + }); + } + + // Validate that the agent can be resolved + AIAgent? agent = this._serviceProvider.GetKeyedService(agentName); + if (agent is null) + { + this._logger.LogWarning("Failed to resolve agent with name '{AgentName}'", agentName); + return ValueTask.FromResult(new ResponseError + { + Code = "agent_not_found", + Message = $"Agent '{agentName}' not found. Ensure the agent is registered with AddAIAgent()." + }); + } + + return ValueTask.FromResult(null); + } + /// public async IAsyncEnumerable ExecuteAsync( AgentInvocationContext context, CreateResponse request, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - // Validate and resolve agent synchronously to ensure validation errors are thrown immediately - AIAgent agent = this.ResolveAgent(request); - - // Create options with properties from the request + string agentName = GetAgentName(request)!; + AIAgent agent = this._serviceProvider.GetRequiredKeyedService(agentName); var chatOptions = new ChatOptions { ConversationId = request.Conversation?.Id, @@ -57,8 +88,6 @@ public async IAsyncEnumerable ExecuteAsync( ModelId = request.Model, }; var options = new ChatClientAgentRunOptions(chatOptions); - - // Convert input to chat messages var messages = new List(); foreach (var inputMessage in request.Input.GetInputMessages()) @@ -66,7 +95,6 @@ public async IAsyncEnumerable ExecuteAsync( messages.Add(inputMessage.ToChatMessage()); } - // Use the extension method to convert streaming updates to streaming response events await foreach (var streamingEvent in agent.RunStreamingAsync(messages, options: options, cancellationToken: cancellationToken) .ToStreamingResponseAsync(request, context, cancellationToken).ConfigureAwait(false)) { @@ -75,41 +103,20 @@ public async IAsyncEnumerable ExecuteAsync( } /// - /// Resolves an agent from the service provider based on the request. + /// Extracts the agent name for a request from the agent.name property, falling back to metadata["entity_id"]. /// /// The create response request. - /// The resolved AIAgent instance. - /// Thrown when the agent cannot be resolved. - private AIAgent ResolveAgent(CreateResponse request) + /// The agent name. + private static string? GetAgentName(CreateResponse request) { - // Extract agent name from agent.name or model parameter - var agentName = request.Agent?.Name ?? request.Model; - if (string.IsNullOrEmpty(agentName)) - { - throw new InvalidOperationException("No 'agent.name' or 'model' specified in the request."); - } + string? agentName = request.Agent?.Name; - // Resolve the keyed agent service - try + // Fall back to metadata["entity_id"] if agent.name is not present + if (string.IsNullOrEmpty(agentName) && request.Metadata?.TryGetValue("entity_id", out string? entityId) == true) { - return this._serviceProvider.GetRequiredKeyedService(agentName); + agentName = entityId; } - catch (InvalidOperationException ex) - { - this._logger.LogError(ex, "Failed to resolve agent with name '{AgentName}'", agentName); - throw new InvalidOperationException($"Agent '{agentName}' not found. Ensure the agent is registered with AddAIAgent().", ex); - } - } - /// - /// Validates that the agent can be resolved without actually resolving it. - /// This allows early validation before starting async execution. - /// - /// The create response request. - /// Thrown when the agent cannot be resolved. - public void ValidateAgent(CreateResponse request) - { - // Use the same logic as ResolveAgent but don't return the agent - _ = this.ResolveAgent(request); + return agentName; } } diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponseExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponseExecutor.cs index ca4da70b88..b96879f4cc 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponseExecutor.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponseExecutor.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Threading; +using System.Threading.Tasks; using Microsoft.Agents.AI.Hosting.OpenAI.Responses.Models; namespace Microsoft.Agents.AI.Hosting.OpenAI.Responses; @@ -12,6 +13,16 @@ namespace Microsoft.Agents.AI.Hosting.OpenAI.Responses; /// internal interface IResponseExecutor { + /// + /// Validates a create response request before execution. + /// + /// The create response request to validate. + /// Cancellation token. + /// A if validation fails, null if validation succeeds. + ValueTask ValidateRequestAsync( + CreateResponse request, + CancellationToken cancellationToken = default); + /// /// Executes a response generation request and returns streaming events. /// diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponsesService.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponsesService.cs index 67f7b72f20..b1676ac99c 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponsesService.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/IResponsesService.cs @@ -18,6 +18,17 @@ internal interface IResponsesService /// Default limit for list operations. /// const int DefaultListLimit = 20; + + /// + /// Validates a create response request before execution. + /// + /// The create response request to validate. + /// Cancellation token. + /// A ResponseError if validation fails, null if validation succeeds. + ValueTask ValidateRequestAsync( + CreateResponse request, + CancellationToken cancellationToken = default); + /// /// Creates a model response for the given input. /// diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/InMemoryResponsesService.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/InMemoryResponsesService.cs index dfb744596a..2f5b3f4660 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/InMemoryResponsesService.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/InMemoryResponsesService.cs @@ -147,18 +147,27 @@ public InMemoryResponsesService(IResponseExecutor executor, InMemoryStorageOptio this._conversationStorage = conversationStorage; } - public async Task CreateResponseAsync( + public async ValueTask ValidateRequestAsync( CreateResponse request, CancellationToken cancellationToken = default) { - ValidateRequest(request); - - // Validate agent resolution early for HostedAgentResponseExecutor - if (this._executor is HostedAgentResponseExecutor hostedExecutor) + if (request.Conversation is not null && !string.IsNullOrEmpty(request.Conversation.Id) && + !string.IsNullOrEmpty(request.PreviousResponseId)) { - hostedExecutor.ValidateAgent(request); + return new ResponseError + { + Code = "invalid_request", + Message = "Mutually exclusive parameters: 'conversation' and 'previous_response_id'. Ensure you are only providing one of: 'previous_response_id' or 'conversation'." + }; } + return await this._executor.ValidateRequestAsync(request, cancellationToken).ConfigureAwait(false); + } + + public async Task CreateResponseAsync( + CreateResponse request, + CancellationToken cancellationToken = default) + { if (request.Stream == true) { throw new InvalidOperationException("Cannot create a streaming response using CreateResponseAsync. Use CreateResponseStreamingAsync instead."); @@ -189,8 +198,6 @@ public async IAsyncEnumerable CreateResponseStreamingAsy CreateResponse request, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - ValidateRequest(request); - if (request.Stream == false) { throw new InvalidOperationException("Cannot create a non-streaming response using CreateResponseStreamingAsync. Use CreateResponseAsync instead."); @@ -342,15 +349,6 @@ public Task> ListResponseInputItemsAsync( }); } - private static void ValidateRequest(CreateResponse request) - { - if (request.Conversation is not null && !string.IsNullOrEmpty(request.Conversation.Id) && - !string.IsNullOrEmpty(request.PreviousResponseId)) - { - throw new InvalidOperationException("Mutually exclusive parameters: 'conversation' and 'previous_response_id'. Ensure you are only providing one of: 'previous_response_id' or 'conversation'."); - } - } - private ResponseState InitializeResponse(string responseId, CreateResponse request) { var metadata = request.Metadata ?? []; @@ -371,7 +369,7 @@ private ResponseState InitializeResponse(string responseId, CreateResponse reque MaxOutputTokens = request.MaxOutputTokens, MaxToolCalls = request.MaxToolCalls, Metadata = metadata, - Model = request.Model ?? "default", + Model = request.Model, Output = [], ParallelToolCalls = request.ParallelToolCalls ?? true, PreviousResponseId = request.PreviousResponseId, diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ItemResource.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ItemResource.cs index 0a543e1be9..289bafbc43 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ItemResource.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ItemResource.cs @@ -888,3 +888,47 @@ internal sealed class MCPCallItemResource : ItemResource [JsonPropertyName("error")] public string? Error { get; init; } } + +/// +/// An executor action item resource for workflow execution visualization. +/// +internal sealed class ExecutorActionItemResource : ItemResource +{ + /// + /// The constant item type identifier for executor action items. + /// + public const string ItemType = "executor_action"; + + /// + public override string Type => ItemType; + + /// + /// The executor identifier. + /// + [JsonPropertyName("executor_id")] + public required string ExecutorId { get; init; } + + /// + /// The execution status: "in_progress", "completed", "failed", or "cancelled". + /// + [JsonPropertyName("status")] + public required string Status { get; init; } + + /// + /// The executor result data (for completed status). + /// + [JsonPropertyName("result")] + public JsonElement? Result { get; init; } + + /// + /// The error message (for failed status). + /// + [JsonPropertyName("error")] + public string? Error { get; init; } + + /// + /// The creation timestamp. + /// + [JsonPropertyName("created_at")] + public long CreatedAt { get; init; } +} diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ResponseInput.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ResponseInput.cs index d0555a2c00..d291b93528 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ResponseInput.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/ResponseInput.cs @@ -182,7 +182,9 @@ internal sealed class ResponseInputJsonConverter : JsonConverter return messages is not null ? ResponseInput.FromMessages(messages) : null; } - throw new JsonException($"Unexpected token type for ResponseInput: {reader.TokenType}"); + throw new JsonException( + "ResponseInput must be either a string or an array of messages. " + + $"Objects are not supported. Received token type: {reader.TokenType}"); } /// diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/StreamingResponseEvent.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/StreamingResponseEvent.cs index 6d41e10aff..f39c6e4bca 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/StreamingResponseEvent.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/Models/StreamingResponseEvent.cs @@ -565,7 +565,7 @@ internal sealed class StreamingWorkflowEventComplete : StreamingResponseEvent /// /// The constant event type identifier for workflow event events. /// - public const string EventType = "response.workflow_event.complete"; + public const string EventType = "response.workflow_event.completed"; /// [JsonIgnore] diff --git a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/ResponsesHttpHandler.cs b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/ResponsesHttpHandler.cs index 31f61e967e..b73cdebda5 100644 --- a/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/ResponsesHttpHandler.cs +++ b/dotnet/src/Microsoft.Agents.AI.Hosting.OpenAI/Responses/ResponsesHttpHandler.cs @@ -34,6 +34,21 @@ public async Task CreateResponseAsync( [FromQuery] bool? stream, CancellationToken cancellationToken) { + // Validate the request first + ResponseError? validationError = await this._responsesService.ValidateRequestAsync(request, cancellationToken).ConfigureAwait(false); + if (validationError is not null) + { + return Results.BadRequest(new ErrorResponse + { + Error = new ErrorDetails + { + Message = validationError.Message, + Type = "invalid_request_error", + Code = validationError.Code + } + }); + } + try { // Handle streaming vs non-streaming @@ -55,45 +70,24 @@ public async Task CreateResponseAsync( request, cancellationToken: cancellationToken).ConfigureAwait(false); - return Results.Ok(response); - } - catch (InvalidOperationException ex) when (ex.Message.Contains("Mutually exclusive")) - { - // Return OpenAI-style error for mutual exclusivity violations - return Results.BadRequest(new ErrorResponse + return response.Status switch { - Error = new ErrorDetails - { - Message = ex.Message, - Type = "invalid_request_error", - Code = "mutually_exclusive_parameters" - } - }); - } - catch (InvalidOperationException ex) when (ex.Message.Contains("not found") || ex.Message.Contains("does not exist")) - { - // Return OpenAI-style error for not found errors - return Results.NotFound(new ErrorResponse - { - Error = new ErrorDetails - { - Message = ex.Message, - Type = "invalid_request_error" - } - }); + ResponseStatus.Failed when response.Error is { } error => Results.Problem( + detail: error.Message, + statusCode: StatusCodes.Status500InternalServerError, + title: error.Code ?? "Internal Server Error"), + ResponseStatus.Failed => Results.Problem(), + ResponseStatus.Queued => Results.Accepted(value: response), + _ => Results.Ok(response) + }; } - catch (InvalidOperationException ex) when (ex.Message.Contains("No 'agent.name' or 'model' specified")) + catch (Exception ex) { - // Return OpenAI-style error for missing required parameters - return Results.BadRequest(new ErrorResponse - { - Error = new ErrorDetails - { - Message = ex.Message, - Type = "invalid_request_error", - Code = "missing_required_parameter" - } - }); + // Return InternalServerError for unexpected exceptions + return Results.Problem( + detail: ex.Message, + statusCode: StatusCodes.Status500InternalServerError, + title: "Internal Server Error"); } } diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/ChatForwardingExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatForwardingExecutor.cs new file mode 100644 index 0000000000..5bb2f5e237 --- /dev/null +++ b/dotnet/src/Microsoft.Agents.AI.Workflows/ChatForwardingExecutor.cs @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Workflows; + +/// +/// Provides configuration options for . +/// +public class ChatForwardingExecutorOptions +{ + /// + /// Gets or sets the chat role to use when converting string messages to instances. + /// If set, the executor will accept string messages and convert them to chat messages with this role. + /// + public ChatRole? StringMessageChatRole { get; set; } +} + +/// +/// A ChatProtocol executor that forwards all messages it receives. Useful for splitting inputs into parallel +/// processing paths. +/// +/// This executor is designed to be cross-run shareable and can be reset to its initial state. It handles +/// multiple chat-related types, enabling flexible message forwarding scenarios. Thread safety and reusability are +/// ensured by its design. +/// The unique identifier for the executor instance. Used to distinguish this executor within the system. +/// Optional configuration settings for the executor. If null, default options are used. +public sealed class ChatForwardingExecutor(string id, ChatForwardingExecutorOptions? options = null) : Executor(id, declareCrossRunShareable: true), IResettableExecutor +{ + private readonly ChatRole? _stringMessageChatRole = options?.StringMessageChatRole; + + /// + protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) + { + if (this._stringMessageChatRole.HasValue) + { + routeBuilder = routeBuilder.AddHandler( + (message, context) => context.SendMessageAsync(new ChatMessage(ChatRole.User, message))); + } + + return routeBuilder.AddHandler(ForwardMessageAsync) + .AddHandler>(ForwardMessagesAsync) + .AddHandler(ForwardMessagesAsync) + .AddHandler>(ForwardMessagesAsync) + .AddHandler(ForwardTurnTokenAsync); + } + + private static ValueTask ForwardMessageAsync(ChatMessage message, IWorkflowContext context, CancellationToken cancellationToken) + => context.SendMessageAsync(message, cancellationToken); + + // Note that this can be used to split a turn into multiple parallel turns taken, which will cause streaming ChatMessages + // to overlap. + private static ValueTask ForwardTurnTokenAsync(TurnToken message, IWorkflowContext context, CancellationToken cancellationToken) + => context.SendMessageAsync(message, cancellationToken); + + // TODO: This is not ideal, but until we have a way of guaranteeing correct routing of interfaces across serialization + // boundaries, we need to do type unification. It behaves better when used as a handler in ChatProtocolExecutor because + // it is a strictly contravariant use, whereas this forces invariance on the type because it is directly forwarded. + private static ValueTask ForwardMessagesAsync(IEnumerable messages, IWorkflowContext context, CancellationToken cancellationToken) + => context.SendMessageAsync(messages is List messageList ? messageList : messages.ToList(), cancellationToken); + + private static ValueTask ForwardMessagesAsync(ChatMessage[] messages, IWorkflowContext context, CancellationToken cancellationToken) + => context.SendMessageAsync(messages, cancellationToken); + + /// + public ValueTask ResetAsync() => default; +} diff --git a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/ChatForwardingExecutor.cs b/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/ChatForwardingExecutor.cs deleted file mode 100644 index b395dd4216..0000000000 --- a/dotnet/src/Microsoft.Agents.AI.Workflows/Specialized/ChatForwardingExecutor.cs +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Microsoft. All rights reserved. - -using System.Collections.Generic; -using System.Threading.Tasks; -using Microsoft.Extensions.AI; - -namespace Microsoft.Agents.AI.Workflows.Specialized; - -/// Executor that forwards all messages. -internal sealed class ChatForwardingExecutor(string id) : Executor(id, declareCrossRunShareable: true), IResettableExecutor -{ - protected override RouteBuilder ConfigureRoutes(RouteBuilder routeBuilder) => - routeBuilder - .AddHandler((message, context, cancellationToken) => context.SendMessageAsync(new ChatMessage(ChatRole.User, message), cancellationToken: cancellationToken)) - .AddHandler((message, context, cancellationToken) => context.SendMessageAsync(message, cancellationToken: cancellationToken)) - .AddHandler>((messages, context, cancellationToken) => context.SendMessageAsync(messages, cancellationToken: cancellationToken)) - .AddHandler((turnToken, context, cancellationToken) => context.SendMessageAsync(turnToken, cancellationToken: cancellationToken)); - - public ValueTask ResetAsync() => default; -} diff --git a/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/AGUIChatClientTests.cs b/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/AGUIChatClientTests.cs index 06045343c1..6ce89101a0 100644 --- a/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/AGUIChatClientTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/AGUIChatClientTests.cs @@ -1282,6 +1282,312 @@ public async Task GetStreamingResponseAsync_EnsuresConversationIdIsNull_ForInner // AG-UI requirement: full history on every turn (which happens when ConversationId is null for FunctionInvokingChatClient) Assert.True(captureHandler.RequestWasMade); } + + [Fact] + public async Task GetStreamingResponseAsync_ExtractsStateFromDataContent_AndRemovesStateMessageAsync() + { + // Arrange + var stateData = new { counter = 42, status = "active" }; + string stateJson = JsonSerializer.Serialize(stateData); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + var dataContent = new DataContent(stateBytes, "application/json"); + + var captureHandler = new StateCapturingTestDelegatingHandler(); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new TextMessageStartEvent { MessageId = "msg1", Role = AGUIRoles.Assistant }, + new TextMessageContentEvent { MessageId = "msg1", Delta = "Response" }, + new TextMessageEndEvent { MessageId = "msg1" }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + using HttpClient httpClient = new(captureHandler); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = + [ + new ChatMessage(ChatRole.User, "Hello"), + new ChatMessage(ChatRole.System, [dataContent]) + ]; + + // Act + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + + // Assert + Assert.True(captureHandler.RequestWasMade); + Assert.NotNull(captureHandler.CapturedState); + Assert.Equal(42, captureHandler.CapturedState.Value.GetProperty("counter").GetInt32()); + Assert.Equal("active", captureHandler.CapturedState.Value.GetProperty("status").GetString()); + + // Verify state message was removed - only user message should be in the request + Assert.Equal(1, captureHandler.CapturedMessageCount); + } + + [Fact] + public async Task GetStreamingResponseAsync_WithNoStateDataContent_SendsEmptyStateAsync() + { + // Arrange + var captureHandler = new StateCapturingTestDelegatingHandler(); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new TextMessageStartEvent { MessageId = "msg1", Role = AGUIRoles.Assistant }, + new TextMessageContentEvent { MessageId = "msg1", Delta = "Response" }, + new TextMessageEndEvent { MessageId = "msg1" }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + using HttpClient httpClient = new(captureHandler); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = [new ChatMessage(ChatRole.User, "Hello")]; + + // Act + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + + // Assert + Assert.True(captureHandler.RequestWasMade); + Assert.Null(captureHandler.CapturedState); + } + + [Fact] + public async Task GetStreamingResponseAsync_WithMalformedStateJson_ThrowsInvalidOperationExceptionAsync() + { + // Arrange + byte[] invalidJson = System.Text.Encoding.UTF8.GetBytes("{invalid json"); + var dataContent = new DataContent(invalidJson, "application/json"); + + using HttpClient httpClient = this.CreateMockHttpClient([]); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = + [ + new ChatMessage(ChatRole.User, "Hello"), + new ChatMessage(ChatRole.System, [dataContent]) + ]; + + // Act & Assert + InvalidOperationException ex = await Assert.ThrowsAsync(async () => + { + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + }); + + Assert.Contains("Failed to deserialize state JSON", ex.Message); + } + + [Fact] + public async Task GetStreamingResponseAsync_WithEmptyStateObject_SendsEmptyObjectAsync() + { + // Arrange + var emptyState = new { }; + string stateJson = JsonSerializer.Serialize(emptyState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + var dataContent = new DataContent(stateBytes, "application/json"); + + var captureHandler = new StateCapturingTestDelegatingHandler(); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + using HttpClient httpClient = new(captureHandler); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = + [ + new ChatMessage(ChatRole.User, "Hello"), + new ChatMessage(ChatRole.System, [dataContent]) + ]; + + // Act + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + + // Assert + Assert.True(captureHandler.RequestWasMade); + Assert.NotNull(captureHandler.CapturedState); + Assert.Equal(JsonValueKind.Object, captureHandler.CapturedState.Value.ValueKind); + } + + [Fact] + public async Task GetStreamingResponseAsync_OnlyProcessesDataContentFromLastMessage_IgnoresEarlierOnesAsync() + { + // Arrange + var oldState = new { counter = 10 }; + string oldStateJson = JsonSerializer.Serialize(oldState); + byte[] oldStateBytes = System.Text.Encoding.UTF8.GetBytes(oldStateJson); + var oldDataContent = new DataContent(oldStateBytes, "application/json"); + + var newState = new { counter = 20 }; + string newStateJson = JsonSerializer.Serialize(newState); + byte[] newStateBytes = System.Text.Encoding.UTF8.GetBytes(newStateJson); + var newDataContent = new DataContent(newStateBytes, "application/json"); + + var captureHandler = new StateCapturingTestDelegatingHandler(); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + using HttpClient httpClient = new(captureHandler); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = + [ + new ChatMessage(ChatRole.User, "First message"), + new ChatMessage(ChatRole.System, [oldDataContent]), + new ChatMessage(ChatRole.User, "Second message"), + new ChatMessage(ChatRole.System, [newDataContent]) + ]; + + // Act + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + + // Assert + Assert.True(captureHandler.RequestWasMade); + Assert.NotNull(captureHandler.CapturedState); + // Should use the new state from the last message + Assert.Equal(20, captureHandler.CapturedState.Value.GetProperty("counter").GetInt32()); + + // Should have removed only the last state message + Assert.Equal(3, captureHandler.CapturedMessageCount); + } + + [Fact] + public async Task GetStreamingResponseAsync_WithNonJsonMediaType_IgnoresDataContentAsync() + { + // Arrange + byte[] imageData = System.Text.Encoding.UTF8.GetBytes("fake image data"); + var dataContent = new DataContent(imageData, "image/png"); + + var captureHandler = new StateCapturingTestDelegatingHandler(); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + using HttpClient httpClient = new(captureHandler); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = + [ + new ChatMessage(ChatRole.User, [new TextContent("Hello"), dataContent]) + ]; + + // Act + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + + // Assert + Assert.True(captureHandler.RequestWasMade); + Assert.Null(captureHandler.CapturedState); + // Message should not be removed since it's not state + Assert.Equal(1, captureHandler.CapturedMessageCount); + } + + [Fact] + public async Task GetStreamingResponseAsync_RoundTripState_PreservesJsonStructureAsync() + { + // Arrange - Server returns state snapshot + var returnedState = new { counter = 100, nested = new { value = "test" } }; + JsonElement stateSnapshot = JsonSerializer.SerializeToElement(returnedState); + + var captureHandler = new StateCapturingTestDelegatingHandler(); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateSnapshotEvent { Snapshot = stateSnapshot }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + captureHandler.AddResponse( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run2" }, + new TextMessageStartEvent { MessageId = "msg1", Role = AGUIRoles.Assistant }, + new TextMessageContentEvent { MessageId = "msg1", Delta = "Done" }, + new TextMessageEndEvent { MessageId = "msg1" }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run2" } + ]); + using HttpClient httpClient = new(captureHandler); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = [new ChatMessage(ChatRole.User, "Hello")]; + + // Act - First turn: receive state + DataContent? receivedStateContent = null; + await foreach (var update in chatClient.GetStreamingResponseAsync(messages, null)) + { + if (update.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")) + { + receivedStateContent = (DataContent)update.Contents.First(c => c is DataContent); + } + } + + // Second turn: send the received state back + Assert.NotNull(receivedStateContent); + messages.Add(new ChatMessage(ChatRole.System, [receivedStateContent])); + await foreach (var _ in chatClient.GetStreamingResponseAsync(messages, null)) + { + // Just consume the stream + } + + // Assert - Verify the round-tripped state + Assert.NotNull(captureHandler.CapturedState); + Assert.Equal(100, captureHandler.CapturedState.Value.GetProperty("counter").GetInt32()); + Assert.Equal("test", captureHandler.CapturedState.Value.GetProperty("nested").GetProperty("value").GetString()); + } + + [Fact] + public async Task GetStreamingResponseAsync_ReceivesStateSnapshot_AsDataContentWithAdditionalPropertiesAsync() + { + // Arrange + var state = new { sessionId = "abc123", step = 5 }; + JsonElement stateSnapshot = JsonSerializer.SerializeToElement(state); + + using HttpClient httpClient = this.CreateMockHttpClient( + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateSnapshotEvent { Snapshot = stateSnapshot }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]); + + var chatClient = new AGUIChatClient(httpClient, "http://localhost/agent", null, AGUIJsonSerializerContext.Default.Options); + List messages = [new ChatMessage(ChatRole.User, "Test")]; + + // Act + List updates = []; + await foreach (var update in chatClient.GetStreamingResponseAsync(messages, null)) + { + updates.Add(update); + } + + // Assert + ChatResponseUpdate stateUpdate = updates.First(u => u.Contents.Any(c => c is DataContent)); + Assert.NotNull(stateUpdate.AdditionalProperties); + Assert.True((bool)stateUpdate.AdditionalProperties!["is_state_snapshot"]!); + + DataContent dataContent = (DataContent)stateUpdate.Contents[0]; + Assert.Equal("application/json", dataContent.MediaType); + + string jsonText = System.Text.Encoding.UTF8.GetString(dataContent.Data.ToArray()); + JsonElement deserializedState = JsonSerializer.Deserialize(jsonText); + Assert.Equal("abc123", deserializedState.GetProperty("sessionId").GetString()); + Assert.Equal(5, deserializedState.GetProperty("step").GetInt32()); + } } internal sealed class TestDelegatingHandler : DelegatingHandler @@ -1376,3 +1682,58 @@ private static HttpResponseMessage CreateResponse(BaseEvent[] events) }; } } + +internal sealed class StateCapturingTestDelegatingHandler : DelegatingHandler +{ + private readonly Queue>> _responseFactories = new(); + + public bool RequestWasMade { get; private set; } + public JsonElement? CapturedState { get; private set; } + public int CapturedMessageCount { get; private set; } + + public void AddResponse(BaseEvent[] events) + { + this._responseFactories.Enqueue(_ => Task.FromResult(CreateResponse(events))); + } + + protected override async Task SendAsync(HttpRequestMessage request, CancellationToken cancellationToken) + { + this.RequestWasMade = true; + + // Capture the state and message count from the request +#if NET472 || NETSTANDARD2_0 + string requestBody = await request.Content!.ReadAsStringAsync().ConfigureAwait(false); +#else + string requestBody = await request.Content!.ReadAsStringAsync(cancellationToken).ConfigureAwait(false); +#endif + RunAgentInput? input = JsonSerializer.Deserialize(requestBody, AGUIJsonSerializerContext.Default.RunAgentInput); + if (input != null) + { + if (input.State.ValueKind != JsonValueKind.Undefined && input.State.ValueKind != JsonValueKind.Null) + { + this.CapturedState = input.State; + } + this.CapturedMessageCount = input.Messages.Count(); + } + + if (this._responseFactories.Count == 0) + { + throw new InvalidOperationException("No more responses configured for StateCapturingTestDelegatingHandler."); + } + + var factory = this._responseFactories.Dequeue(); + return await factory(request); + } + + private static HttpResponseMessage CreateResponse(BaseEvent[] events) + { + string sseContent = string.Join("", events.Select(e => + $"data: {JsonSerializer.Serialize(e, AGUIJsonSerializerContext.Default.BaseEvent)}\n\n")); + + return new HttpResponseMessage + { + StatusCode = HttpStatusCode.OK, + Content = new StringContent(sseContent) + }; + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/ChatResponseUpdateAGUIExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/ChatResponseUpdateAGUIExtensionsTests.cs index c980dbd645..3f6df1eeeb 100644 --- a/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/ChatResponseUpdateAGUIExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.AGUI.UnitTests/ChatResponseUpdateAGUIExtensionsTests.cs @@ -369,4 +369,412 @@ public async Task AsChatResponseUpdatesAsync_WithMultipleSequentialToolCalls_Pro Assert.Equal("call_2", functionCalls[1].CallId); Assert.Equal("Tool2", functionCalls[1].Name); } + + [Fact] + public async Task AsChatResponseUpdatesAsync_ConvertsStateSnapshotEvent_ToDataContentWithJsonAsync() + { + // Arrange + JsonElement stateSnapshot = JsonSerializer.SerializeToElement(new { counter = 42, status = "active" }); + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateSnapshotEvent { Snapshot = stateSnapshot }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + ChatResponseUpdate stateUpdate = updates.First(u => u.Contents.Any(c => c is DataContent)); + Assert.Equal(ChatRole.Assistant, stateUpdate.Role); + Assert.Equal("thread1", stateUpdate.ConversationId); + Assert.Equal("run1", stateUpdate.ResponseId); + + DataContent dataContent = Assert.IsType(stateUpdate.Contents[0]); + Assert.Equal("application/json", dataContent.MediaType); + + // Verify the JSON content + string jsonText = System.Text.Encoding.UTF8.GetString(dataContent.Data.ToArray()); + JsonElement deserializedState = JsonSerializer.Deserialize(jsonText); + Assert.Equal(42, deserializedState.GetProperty("counter").GetInt32()); + Assert.Equal("active", deserializedState.GetProperty("status").GetString()); + + // Verify additional properties + Assert.NotNull(stateUpdate.AdditionalProperties); + Assert.True((bool)stateUpdate.AdditionalProperties["is_state_snapshot"]!); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithNullStateSnapshot_DoesNotEmitUpdateAsync() + { + // Arrange + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateSnapshotEvent { Snapshot = null }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + Assert.DoesNotContain(updates, u => u.Contents.Any(c => c is DataContent)); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithEmptyObjectStateSnapshot_EmitsDataContentAsync() + { + // Arrange + JsonElement emptyState = JsonSerializer.SerializeToElement(new { }); + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateSnapshotEvent { Snapshot = emptyState }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + ChatResponseUpdate stateUpdate = updates.First(u => u.Contents.Any(c => c is DataContent)); + DataContent dataContent = Assert.IsType(stateUpdate.Contents[0]); + string jsonText = System.Text.Encoding.UTF8.GetString(dataContent.Data.ToArray()); + Assert.Equal("{}", jsonText); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithComplexStateSnapshot_PreservesJsonStructureAsync() + { + // Arrange + var complexState = new + { + user = new { name = "Alice", age = 30 }, + items = new[] { "item1", "item2", "item3" }, + metadata = new { timestamp = "2024-01-01T00:00:00Z", version = 2 } + }; + JsonElement stateSnapshot = JsonSerializer.SerializeToElement(complexState); + List events = + [ + new StateSnapshotEvent { Snapshot = stateSnapshot } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + ChatResponseUpdate stateUpdate = updates.First(); + DataContent dataContent = Assert.IsType(stateUpdate.Contents[0]); + string jsonText = System.Text.Encoding.UTF8.GetString(dataContent.Data.ToArray()); + JsonElement roundTrippedState = JsonSerializer.Deserialize(jsonText); + + Assert.Equal("Alice", roundTrippedState.GetProperty("user").GetProperty("name").GetString()); + Assert.Equal(30, roundTrippedState.GetProperty("user").GetProperty("age").GetInt32()); + Assert.Equal(3, roundTrippedState.GetProperty("items").GetArrayLength()); + Assert.Equal("item1", roundTrippedState.GetProperty("items")[0].GetString()); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithStateSnapshotAndTextMessages_EmitsBothAsync() + { + // Arrange + JsonElement state = JsonSerializer.SerializeToElement(new { step = 1 }); + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new TextMessageStartEvent { MessageId = "msg1", Role = AGUIRoles.Assistant }, + new TextMessageContentEvent { MessageId = "msg1", Delta = "Processing..." }, + new TextMessageEndEvent { MessageId = "msg1" }, + new StateSnapshotEvent { Snapshot = state }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + Assert.Contains(updates, u => u.Contents.Any(c => c is TextContent)); + Assert.Contains(updates, u => u.Contents.Any(c => c is DataContent)); + } + + #region State Delta Tests + + [Fact] + public async Task AsChatResponseUpdatesAsync_ConvertsStateDeltaEvent_ToDataContentWithJsonPatchAsync() + { + // Arrange - Create JSON Patch operations (RFC 6902) + JsonElement stateDelta = JsonSerializer.SerializeToElement(new object[] + { + new { op = "replace", path = "/counter", value = 43 }, + new { op = "add", path = "/newField", value = "test" } + }); + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateDeltaEvent { Delta = stateDelta }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + ChatResponseUpdate deltaUpdate = updates.First(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json-patch+json")); + Assert.Equal(ChatRole.Assistant, deltaUpdate.Role); + Assert.Equal("thread1", deltaUpdate.ConversationId); + Assert.Equal("run1", deltaUpdate.ResponseId); + + DataContent dataContent = Assert.IsType(deltaUpdate.Contents[0]); + Assert.Equal("application/json-patch+json", dataContent.MediaType); + + // Verify the JSON Patch content + string jsonText = System.Text.Encoding.UTF8.GetString(dataContent.Data.ToArray()); + JsonElement deserializedDelta = JsonSerializer.Deserialize(jsonText); + Assert.Equal(JsonValueKind.Array, deserializedDelta.ValueKind); + Assert.Equal(2, deserializedDelta.GetArrayLength()); + + // Verify first operation + JsonElement firstOp = deserializedDelta[0]; + Assert.Equal("replace", firstOp.GetProperty("op").GetString()); + Assert.Equal("/counter", firstOp.GetProperty("path").GetString()); + Assert.Equal(43, firstOp.GetProperty("value").GetInt32()); + + // Verify second operation + JsonElement secondOp = deserializedDelta[1]; + Assert.Equal("add", secondOp.GetProperty("op").GetString()); + Assert.Equal("/newField", secondOp.GetProperty("path").GetString()); + Assert.Equal("test", secondOp.GetProperty("value").GetString()); + + // Verify additional properties + Assert.NotNull(deltaUpdate.AdditionalProperties); + Assert.True((bool)deltaUpdate.AdditionalProperties["is_state_delta"]!); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithNullStateDelta_DoesNotEmitUpdateAsync() + { + // Arrange + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateDeltaEvent { Delta = null }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert - Only run started and finished should be present + Assert.Equal(2, updates.Count); + Assert.IsType(updates[0]); // Run started + Assert.IsType(updates[1]); // Run finished + Assert.DoesNotContain(updates, u => u.Contents.Any(c => c is DataContent)); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithEmptyStateDelta_EmitsUpdateAsync() + { + // Arrange - Empty JSON Patch array is valid + JsonElement emptyDelta = JsonSerializer.SerializeToElement(Array.Empty()); + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateDeltaEvent { Delta = emptyDelta }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + Assert.Contains(updates, u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json-patch+json")); + } + + [Fact] + public async Task AsChatResponseUpdatesAsync_WithMultipleStateDeltaEvents_ConvertsAllAsync() + { + // Arrange + JsonElement delta1 = JsonSerializer.SerializeToElement(new[] { new { op = "replace", path = "/counter", value = 1 } }); + JsonElement delta2 = JsonSerializer.SerializeToElement(new[] { new { op = "replace", path = "/counter", value = 2 } }); + JsonElement delta3 = JsonSerializer.SerializeToElement(new[] { new { op = "replace", path = "/counter", value = 3 } }); + + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateDeltaEvent { Delta = delta1 }, + new StateDeltaEvent { Delta = delta2 }, + new StateDeltaEvent { Delta = delta3 }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + // Assert + var deltaUpdates = updates.Where(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json-patch+json")).ToList(); + Assert.Equal(3, deltaUpdates.Count); + } + + [Fact] + public async Task AsAGUIEventStreamAsync_ConvertsDataContentWithJsonPatch_ToStateDeltaEventAsync() + { + // Arrange - Create a ChatResponseUpdate with JSON Patch DataContent + JsonElement patchOps = JsonSerializer.SerializeToElement(new object[] + { + new { op = "remove", path = "/oldField" }, + new { op = "add", path = "/newField", value = "newValue" } + }); + byte[] jsonBytes = JsonSerializer.SerializeToUtf8Bytes(patchOps); + DataContent dataContent = new(jsonBytes, "application/json-patch+json"); + + List updates = + [ + new ChatResponseUpdate(ChatRole.Assistant, [dataContent]) + { + MessageId = "msg1" + } + ]; + + // Act + List outputEvents = []; + await foreach (BaseEvent evt in updates.ToAsyncEnumerableAsync().AsAGUIEventStreamAsync("thread1", "run1", AGUIJsonSerializerContext.Default.Options)) + { + outputEvents.Add(evt); + } + + // Assert + StateDeltaEvent? deltaEvent = outputEvents.OfType().FirstOrDefault(); + Assert.NotNull(deltaEvent); + Assert.NotNull(deltaEvent.Delta); + Assert.Equal(JsonValueKind.Array, deltaEvent.Delta.Value.ValueKind); + + // Verify patch operations + JsonElement delta = deltaEvent.Delta.Value; + Assert.Equal(2, delta.GetArrayLength()); + Assert.Equal("remove", delta[0].GetProperty("op").GetString()); + Assert.Equal("/oldField", delta[0].GetProperty("path").GetString()); + Assert.Equal("add", delta[1].GetProperty("op").GetString()); + Assert.Equal("/newField", delta[1].GetProperty("path").GetString()); + } + + [Fact] + public async Task AsAGUIEventStreamAsync_WithBothSnapshotAndDelta_EmitsBothEventsAsync() + { + // Arrange + JsonElement snapshot = JsonSerializer.SerializeToElement(new { counter = 0 }); + byte[] snapshotBytes = JsonSerializer.SerializeToUtf8Bytes(snapshot); + DataContent snapshotContent = new(snapshotBytes, "application/json"); + + JsonElement delta = JsonSerializer.SerializeToElement(new[] { new { op = "replace", path = "/counter", value = 1 } }); + byte[] deltaBytes = JsonSerializer.SerializeToUtf8Bytes(delta); + DataContent deltaContent = new(deltaBytes, "application/json-patch+json"); + + List updates = + [ + new ChatResponseUpdate(ChatRole.Assistant, [snapshotContent]) { MessageId = "msg1" }, + new ChatResponseUpdate(ChatRole.Assistant, [deltaContent]) { MessageId = "msg2" } + ]; + + // Act + List outputEvents = []; + await foreach (BaseEvent evt in updates.ToAsyncEnumerableAsync().AsAGUIEventStreamAsync("thread1", "run1", AGUIJsonSerializerContext.Default.Options)) + { + outputEvents.Add(evt); + } + + // Assert + Assert.Contains(outputEvents, e => e is StateSnapshotEvent); + Assert.Contains(outputEvents, e => e is StateDeltaEvent); + } + + [Fact] + public async Task StateDeltaEvent_RoundTrip_PreservesJsonPatchOperationsAsync() + { + // Arrange - Create complex JSON Patch with various operations + JsonElement originalDelta = JsonSerializer.SerializeToElement(new object[] + { + new { op = "add", path = "/user/email", value = "test@example.com" }, + new { op = "remove", path = "/user/tempData" }, + new { op = "replace", path = "/user/lastLogin", value = "2025-11-09T12:00:00Z" }, + new { op = "move", from = "/user/oldAddress", path = "/user/previousAddress" }, + new { op = "copy", from = "/user/name", path = "/user/displayName" }, + new { op = "test", path = "/user/version", value = 2 } + }); + + List events = + [ + new RunStartedEvent { ThreadId = "thread1", RunId = "run1" }, + new StateDeltaEvent { Delta = originalDelta }, + new RunFinishedEvent { ThreadId = "thread1", RunId = "run1" } + ]; + + // Act - Convert to ChatResponseUpdate and back to events + List updates = []; + await foreach (ChatResponseUpdate update in events.ToAsyncEnumerableAsync().AsChatResponseUpdatesAsync(AGUIJsonSerializerContext.Default.Options)) + { + updates.Add(update); + } + + List roundTripEvents = []; + await foreach (BaseEvent evt in updates.ToAsyncEnumerableAsync().AsAGUIEventStreamAsync("thread1", "run1", AGUIJsonSerializerContext.Default.Options)) + { + roundTripEvents.Add(evt); + } + + // Assert + StateDeltaEvent? roundTripDelta = roundTripEvents.OfType().FirstOrDefault(); + Assert.NotNull(roundTripDelta); + Assert.NotNull(roundTripDelta.Delta); + + JsonElement delta = roundTripDelta.Delta.Value; + Assert.Equal(6, delta.GetArrayLength()); + + // Verify each operation type + Assert.Equal("add", delta[0].GetProperty("op").GetString()); + Assert.Equal("remove", delta[1].GetProperty("op").GetString()); + Assert.Equal("replace", delta[2].GetProperty("op").GetString()); + Assert.Equal("move", delta[3].GetProperty("op").GetString()); + Assert.Equal("copy", delta[4].GetProperty("op").GetString()); + Assert.Equal("test", delta[5].GetProperty("op").GetString()); + } + + #endregion State Delta Tests } diff --git a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunOptionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunOptionsTests.cs index 40901a4969..32560949fb 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunOptionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Abstractions.UnitTests/AgentRunOptionsTests.cs @@ -18,7 +18,12 @@ public void CloningConstructorCopiesProperties() var options = new AgentRunOptions { ContinuationToken = new object(), - AllowBackgroundResponses = true + AllowBackgroundResponses = true, + AdditionalProperties = new AdditionalPropertiesDictionary + { + ["key1"] = "value1", + ["key2"] = 42 + } }; // Act @@ -28,6 +33,10 @@ public void CloningConstructorCopiesProperties() Assert.NotNull(clone); Assert.Same(options.ContinuationToken, clone.ContinuationToken); Assert.Equal(options.AllowBackgroundResponses, clone.AllowBackgroundResponses); + Assert.NotNull(clone.AdditionalProperties); + Assert.NotSame(options.AdditionalProperties, clone.AdditionalProperties); + Assert.Equal("value1", clone.AdditionalProperties["key1"]); + Assert.Equal(42, clone.AdditionalProperties["key2"]); } [Fact] @@ -42,7 +51,12 @@ public void JsonSerializationRoundtrips() var options = new AgentRunOptions { ContinuationToken = ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }), - AllowBackgroundResponses = true + AllowBackgroundResponses = true, + AdditionalProperties = new AdditionalPropertiesDictionary + { + ["key1"] = "value1", + ["key2"] = 42 + } }; // Act @@ -54,5 +68,13 @@ public void JsonSerializationRoundtrips() Assert.NotNull(deserialized); Assert.Equivalent(ResponseContinuationToken.FromBytes(new byte[] { 1, 2, 3 }), deserialized!.ContinuationToken); Assert.Equal(options.AllowBackgroundResponses, deserialized.AllowBackgroundResponses); + Assert.NotNull(deserialized.AdditionalProperties); + Assert.Equal(2, deserialized.AdditionalProperties.Count); + Assert.True(deserialized.AdditionalProperties.TryGetValue("key1", out object? value1)); + Assert.IsType(value1); + Assert.Equal("value1", ((JsonElement)value1!).GetString()); + Assert.True(deserialized.AdditionalProperties.TryGetValue("key2", out object? value2)); + Assert.IsType(value2); + Assert.Equal(42, ((JsonElement)value2!).GetInt32()); } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/A2AIntegrationTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/A2AIntegrationTests.cs new file mode 100644 index 0000000000..48cb19789a --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/A2AIntegrationTests.cs @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Text.Json; +using System.Threading.Tasks; +using A2A; +using Microsoft.Agents.AI.Hosting.A2A.UnitTests.Internal; +using Microsoft.AspNetCore.Builder; +using Microsoft.AspNetCore.Hosting.Server; +using Microsoft.AspNetCore.TestHost; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.DependencyInjection; + +namespace Microsoft.Agents.AI.Hosting.A2A.UnitTests; + +public sealed class A2AIntegrationTests +{ + /// + /// Verifies that calling the A2A card endpoint with MapA2A returns an agent card with a URL populated. + /// + [Fact] + public async Task MapA2A_WithAgentCard_CardEndpointReturnsCardWithUrlAsync() + { + // Arrange + WebApplicationBuilder builder = WebApplication.CreateBuilder(); + builder.WebHost.UseTestServer(); + + IChatClient mockChatClient = new DummyChatClient(); + builder.Services.AddKeyedSingleton("chat-client", mockChatClient); + IHostedAgentBuilder agentBuilder = builder.AddAIAgent("test-agent", "Test instructions", chatClientServiceKey: "chat-client"); + builder.Services.AddLogging(); + + using WebApplication app = builder.Build(); + + var agentCard = new AgentCard + { + Name = "Test Agent", + Description = "A test agent for A2A communication", + Version = "1.0" + }; + + // Map A2A with the agent card + app.MapA2A(agentBuilder, "/a2a/test-agent", agentCard); + + await app.StartAsync(); + + try + { + // Get the test server client + TestServer testServer = app.Services.GetRequiredService() as TestServer + ?? throw new InvalidOperationException("TestServer not found"); + var httpClient = testServer.CreateClient(); + + // Act - Query the agent card endpoint + var requestUri = new Uri("/a2a/test-agent/v1/card", UriKind.Relative); + var response = await httpClient.GetAsync(requestUri); + + // Assert + Assert.True(response.IsSuccessStatusCode, $"Expected successful response but got {response.StatusCode}"); + + var content = await response.Content.ReadAsStringAsync(); + var jsonDoc = JsonDocument.Parse(content); + var root = jsonDoc.RootElement; + + // Verify the card has expected properties + Assert.True(root.TryGetProperty("name", out var nameProperty)); + Assert.Equal("Test Agent", nameProperty.GetString()); + + Assert.True(root.TryGetProperty("description", out var descProperty)); + Assert.Equal("A test agent for A2A communication", descProperty.GetString()); + + // Verify the card has a URL property and it's not null/empty + Assert.True(root.TryGetProperty("url", out var urlProperty)); + Assert.NotEqual(JsonValueKind.Null, urlProperty.ValueKind); + + var url = urlProperty.GetString(); + Assert.NotNull(url); + Assert.NotEmpty(url); + Assert.StartsWith("http", url, StringComparison.OrdinalIgnoreCase); + Assert.Equal($"{testServer.BaseAddress.ToString().TrimEnd('/')}/a2a/test-agent/v1/card", url); + } + finally + { + await app.StopAsync(); + } + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/EndpointRouteA2ABuilderExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/EndpointRouteA2ABuilderExtensionsTests.cs index 1ae0dda908..a848528888 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/EndpointRouteA2ABuilderExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/EndpointRouteA2ABuilderExtensionsTests.cs @@ -1,10 +1,8 @@ // Copyright (c) Microsoft. All rights reserved. using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; using A2A; +using Microsoft.Agents.AI.Hosting.A2A.UnitTests.Internal; using Microsoft.AspNetCore.Builder; using Microsoft.Extensions.AI; using Microsoft.Extensions.DependencyInjection; @@ -478,25 +476,4 @@ public void MapA2A_WithAgentBuilder_FullAgentCard_Succeeds() var result = app.MapA2A(agentBuilder, "/a2a", agentCard); Assert.NotNull(result); } - - private sealed class DummyChatClient : IChatClient - { - public void Dispose() - { - throw new NotImplementedException(); - } - - public Task GetResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellationToken = default) - { - throw new NotImplementedException(); - } - - public object? GetService(Type serviceType, object? serviceKey = null) => - serviceType.IsInstanceOfType(this) ? this : null; - - public IAsyncEnumerable GetStreamingResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellationToken = default) - { - throw new NotImplementedException(); - } - } } diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Internal/DummyChatClient.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Internal/DummyChatClient.cs new file mode 100644 index 0000000000..efab140b68 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Internal/DummyChatClient.cs @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Extensions.AI; + +namespace Microsoft.Agents.AI.Hosting.A2A.UnitTests.Internal; + +internal sealed class DummyChatClient : IChatClient +{ + public void Dispose() + { + throw new NotImplementedException(); + } + + public Task GetResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellationToken = default) + { + throw new NotImplementedException(); + } + + public object? GetService(Type serviceType, object? serviceKey = null) => + serviceType.IsInstanceOfType(this) ? this : null; + + public IAsyncEnumerable GetStreamingResponseAsync(IEnumerable messages, ChatOptions? options = null, CancellationToken cancellationToken = default) + { + throw new NotImplementedException(); + } +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Microsoft.Agents.AI.Hosting.A2A.UnitTests.csproj b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Microsoft.Agents.AI.Hosting.A2A.UnitTests.csproj index 63387ae458..07dde4f802 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Microsoft.Agents.AI.Hosting.A2A.UnitTests.csproj +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.A2A.UnitTests/Microsoft.Agents.AI.Hosting.A2A.UnitTests.csproj @@ -1,4 +1,4 @@ - + $(ProjectsCoreTargetFrameworks) @@ -6,6 +6,9 @@ + + + diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs new file mode 100644 index 0000000000..47d9e63520 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests/SharedStateTests.cs @@ -0,0 +1,441 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Net.Http; +using System.Runtime.CompilerServices; +using System.Text.Json; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Microsoft.Agents.AI.AGUI; +using Microsoft.AspNetCore.Builder; +using Microsoft.AspNetCore.Hosting.Server; +using Microsoft.AspNetCore.TestHost; +using Microsoft.Extensions.AI; +using Microsoft.Extensions.DependencyInjection; + +namespace Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.IntegrationTests; + +public sealed class SharedStateTests : IAsyncDisposable +{ + private WebApplication? _app; + private HttpClient? _client; + + [Fact] + public async Task StateSnapshot_IsReturnedAsDataContent_WithCorrectMediaTypeAsync() + { + // Arrange + var initialState = new { counter = 42, status = "active" }; + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + string stateJson = JsonSerializer.Serialize(initialState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + DataContent stateContent = new(stateBytes, "application/json"); + ChatMessage stateMessage = new(ChatRole.System, [stateContent]); + ChatMessage userMessage = new(ChatRole.User, "update state"); + + List updates = []; + + // Act + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([userMessage, stateMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + updates.Add(update); + } + + // Assert + updates.Should().NotBeEmpty(); + + // Should receive state snapshot as DataContent with application/json media type + AgentRunResponseUpdate? stateUpdate = updates.FirstOrDefault(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + stateUpdate.Should().NotBeNull("should receive state snapshot update"); + + DataContent? dataContent = stateUpdate!.Contents.OfType().FirstOrDefault(dc => dc.MediaType == "application/json"); + dataContent.Should().NotBeNull(); + + // Verify the state content + string receivedJson = System.Text.Encoding.UTF8.GetString(dataContent!.Data.ToArray()); + JsonElement receivedState = JsonSerializer.Deserialize(receivedJson); + receivedState.GetProperty("counter").GetInt32().Should().Be(43, "state should be incremented"); + receivedState.GetProperty("status").GetString().Should().Be("active"); + } + + [Fact] + public async Task StateSnapshot_HasCorrectAdditionalPropertiesAsync() + { + // Arrange + var initialState = new { step = 1 }; + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + string stateJson = JsonSerializer.Serialize(initialState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + DataContent stateContent = new(stateBytes, "application/json"); + ChatMessage stateMessage = new(ChatRole.System, [stateContent]); + ChatMessage userMessage = new(ChatRole.User, "process"); + + List updates = []; + + // Act + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([userMessage, stateMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + updates.Add(update); + } + + // Assert + AgentRunResponseUpdate? stateUpdate = updates.FirstOrDefault(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + stateUpdate.Should().NotBeNull(); + + ChatResponseUpdate chatUpdate = stateUpdate!.AsChatResponseUpdate(); + chatUpdate.AdditionalProperties.Should().NotBeNull(); + chatUpdate.AdditionalProperties.Should().ContainKey("is_state_snapshot"); + ((bool)chatUpdate.AdditionalProperties!["is_state_snapshot"]!).Should().BeTrue(); + } + + [Fact] + public async Task ComplexState_WithNestedObjectsAndArrays_RoundTripsCorrectlyAsync() + { + // Arrange + var complexState = new + { + sessionId = "test-123", + nested = new { value = "test", count = 10 }, + array = new[] { 1, 2, 3 }, + tags = new[] { "tag1", "tag2" } + }; + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + string stateJson = JsonSerializer.Serialize(complexState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + DataContent stateContent = new(stateBytes, "application/json"); + ChatMessage stateMessage = new(ChatRole.System, [stateContent]); + ChatMessage userMessage = new(ChatRole.User, "process complex state"); + + List updates = []; + + // Act + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([userMessage, stateMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + updates.Add(update); + } + + // Assert + AgentRunResponseUpdate? stateUpdate = updates.FirstOrDefault(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + stateUpdate.Should().NotBeNull(); + + DataContent? dataContent = stateUpdate!.Contents.OfType().FirstOrDefault(dc => dc.MediaType == "application/json"); + string receivedJson = System.Text.Encoding.UTF8.GetString(dataContent!.Data.ToArray()); + JsonElement receivedState = JsonSerializer.Deserialize(receivedJson); + + receivedState.GetProperty("sessionId").GetString().Should().Be("test-123"); + receivedState.GetProperty("nested").GetProperty("count").GetInt32().Should().Be(10); + receivedState.GetProperty("array").GetArrayLength().Should().Be(3); + receivedState.GetProperty("tags").GetArrayLength().Should().Be(2); + } + + [Fact] + public async Task StateSnapshot_CanBeUsedInSubsequentRequest_ForStateRoundTripAsync() + { + // Arrange + var initialState = new { counter = 1, sessionId = "round-trip-test" }; + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + string stateJson = JsonSerializer.Serialize(initialState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + DataContent stateContent = new(stateBytes, "application/json"); + ChatMessage stateMessage = new(ChatRole.System, [stateContent]); + ChatMessage userMessage = new(ChatRole.User, "increment"); + + List firstRoundUpdates = []; + + // Act - First round + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([userMessage, stateMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + firstRoundUpdates.Add(update); + } + + // Extract state snapshot from first round + AgentRunResponseUpdate? firstStateUpdate = firstRoundUpdates.FirstOrDefault(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + firstStateUpdate.Should().NotBeNull(); + DataContent? firstStateContent = firstStateUpdate!.Contents.OfType().FirstOrDefault(dc => dc.MediaType == "application/json"); + + // Second round - use returned state + ChatMessage secondStateMessage = new(ChatRole.System, [firstStateContent!]); + ChatMessage secondUserMessage = new(ChatRole.User, "increment again"); + + List secondRoundUpdates = []; + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([secondUserMessage, secondStateMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + secondRoundUpdates.Add(update); + } + + // Assert - Second round should have incremented counter again + AgentRunResponseUpdate? secondStateUpdate = secondRoundUpdates.FirstOrDefault(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + secondStateUpdate.Should().NotBeNull(); + + DataContent? secondStateContent = secondStateUpdate!.Contents.OfType().FirstOrDefault(dc => dc.MediaType == "application/json"); + string secondStateJson = System.Text.Encoding.UTF8.GetString(secondStateContent!.Data.ToArray()); + JsonElement secondState = JsonSerializer.Deserialize(secondStateJson); + + secondState.GetProperty("counter").GetInt32().Should().Be(3, "counter should be incremented twice: 1 -> 2 -> 3"); + } + + [Fact] + public async Task WithoutState_AgentBehavesNormally_NoStateSnapshotReturnedAsync() + { + // Arrange + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + ChatMessage userMessage = new(ChatRole.User, "hello"); + + List updates = []; + + // Act + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([userMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + updates.Add(update); + } + + // Assert + updates.Should().NotBeEmpty(); + + // Should NOT have state snapshot when no state is sent + bool hasStateSnapshot = updates.Any(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + hasStateSnapshot.Should().BeFalse("should not return state snapshot when no state is provided"); + + // Should have normal text response + updates.Should().Contain(u => u.Contents.Any(c => c is TextContent)); + } + + [Fact] + public async Task EmptyState_DoesNotTriggerStateHandlingAsync() + { + // Arrange + var emptyState = new { }; + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + string stateJson = JsonSerializer.Serialize(emptyState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + DataContent stateContent = new(stateBytes, "application/json"); + ChatMessage stateMessage = new(ChatRole.System, [stateContent]); + ChatMessage userMessage = new(ChatRole.User, "hello"); + + List updates = []; + + // Act + await foreach (AgentRunResponseUpdate update in agent.RunStreamingAsync([userMessage, stateMessage], thread, new AgentRunOptions(), CancellationToken.None)) + { + updates.Add(update); + } + + // Assert + updates.Should().NotBeEmpty(); + + // Empty state {} should not trigger state snapshot mechanism + bool hasEmptyStateSnapshot = updates.Any(u => u.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + hasEmptyStateSnapshot.Should().BeFalse("empty state should be treated as no state"); + + // Should have normal response + updates.Should().Contain(u => u.Contents.Any(c => c is TextContent)); + } + + [Fact] + public async Task NonStreamingRunAsync_WithState_ReturnsStateInResponseAsync() + { + // Arrange + var initialState = new { counter = 5 }; + var fakeAgent = new FakeStateAgent(); + + await this.SetupTestServerAsync(fakeAgent); + var chatClient = new AGUIChatClient(this._client!, "", null); + AIAgent agent = chatClient.CreateAIAgent(instructions: null, name: "assistant", description: "Sample assistant", tools: []); + ChatClientAgentThread thread = (ChatClientAgentThread)agent.GetNewThread(); + + string stateJson = JsonSerializer.Serialize(initialState); + byte[] stateBytes = System.Text.Encoding.UTF8.GetBytes(stateJson); + DataContent stateContent = new(stateBytes, "application/json"); + ChatMessage stateMessage = new(ChatRole.System, [stateContent]); + ChatMessage userMessage = new(ChatRole.User, "process"); + + // Act + AgentRunResponse response = await agent.RunAsync([userMessage, stateMessage], thread, new AgentRunOptions(), CancellationToken.None); + + // Assert + response.Should().NotBeNull(); + response.Messages.Should().NotBeEmpty(); + + // Should have message with DataContent containing state + bool hasStateMessage = response.Messages.Any(m => m.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + hasStateMessage.Should().BeTrue("response should contain state message"); + + ChatMessage? stateResponseMessage = response.Messages.FirstOrDefault(m => m.Contents.Any(c => c is DataContent dc && dc.MediaType == "application/json")); + stateResponseMessage.Should().NotBeNull(); + + DataContent? dataContent = stateResponseMessage!.Contents.OfType().FirstOrDefault(dc => dc.MediaType == "application/json"); + string receivedJson = System.Text.Encoding.UTF8.GetString(dataContent!.Data.ToArray()); + JsonElement receivedState = JsonSerializer.Deserialize(receivedJson); + receivedState.GetProperty("counter").GetInt32().Should().Be(6); + } + + private async Task SetupTestServerAsync(FakeStateAgent fakeAgent) + { + WebApplicationBuilder builder = WebApplication.CreateBuilder(); + builder.Services.AddAGUI(); + builder.WebHost.UseTestServer(); + + this._app = builder.Build(); + + this._app.MapAGUI("/agent", fakeAgent); + + await this._app.StartAsync(); + + TestServer testServer = this._app.Services.GetRequiredService() as TestServer + ?? throw new InvalidOperationException("TestServer not found"); + + this._client = testServer.CreateClient(); + this._client.BaseAddress = new Uri("http://localhost/agent"); + } + + public async ValueTask DisposeAsync() + { + this._client?.Dispose(); + if (this._app != null) + { + await this._app.DisposeAsync(); + } + } +} + +[SuppressMessage("Performance", "CA1812:Avoid uninstantiated internal classes", Justification = "Instantiated in tests")] +internal sealed class FakeStateAgent : AIAgent +{ + public override string? Description => "Agent for state testing"; + + public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + { + return this.RunStreamingAsync(messages, thread, options, cancellationToken).ToAgentRunResponseAsync(cancellationToken); + } + + public override async IAsyncEnumerable RunStreamingAsync( + IEnumerable messages, + AgentThread? thread = null, + AgentRunOptions? options = null, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + // Check for state in ChatOptions.AdditionalProperties (set by AG-UI hosting layer) + if (options is ChatClientAgentRunOptions { ChatOptions.AdditionalProperties: { } properties } && + properties.TryGetValue("ag_ui_state", out object? stateObj) && + stateObj is JsonElement state && + state.ValueKind == JsonValueKind.Object) + { + // Check if state object has properties (not empty {}) + bool hasProperties = false; + foreach (JsonProperty _ in state.EnumerateObject()) + { + hasProperties = true; + break; + } + + if (hasProperties) + { + // State is present and non-empty - modify it and return as DataContent + Dictionary modifiedState = []; + foreach (JsonProperty prop in state.EnumerateObject()) + { + if (prop.Name == "counter" && prop.Value.ValueKind == JsonValueKind.Number) + { + modifiedState[prop.Name] = prop.Value.GetInt32() + 1; + } + else if (prop.Value.ValueKind == JsonValueKind.Number) + { + modifiedState[prop.Name] = prop.Value.GetInt32(); + } + else if (prop.Value.ValueKind == JsonValueKind.String) + { + modifiedState[prop.Name] = prop.Value.GetString(); + } + else if (prop.Value.ValueKind == JsonValueKind.Object || prop.Value.ValueKind == JsonValueKind.Array) + { + modifiedState[prop.Name] = prop.Value; + } + } + + // Return modified state as DataContent + string modifiedStateJson = JsonSerializer.Serialize(modifiedState); + byte[] modifiedStateBytes = System.Text.Encoding.UTF8.GetBytes(modifiedStateJson); + DataContent modifiedStateContent = new(modifiedStateBytes, "application/json"); + + yield return new AgentRunResponseUpdate + { + MessageId = Guid.NewGuid().ToString("N"), + Role = ChatRole.Assistant, + Contents = [modifiedStateContent] + }; + } + } + + // Always return a text response + string messageId = Guid.NewGuid().ToString("N"); + yield return new AgentRunResponseUpdate + { + MessageId = messageId, + Role = ChatRole.Assistant, + Contents = [new TextContent("State processed")] + }; + + await Task.CompletedTask; + } + + public override AgentThread GetNewThread() => new FakeInMemoryAgentThread(); + + public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) + { + return new FakeInMemoryAgentThread(serializedThread, jsonSerializerOptions); + } + + private sealed class FakeInMemoryAgentThread : InMemoryAgentThread + { + public FakeInMemoryAgentThread() + : base() + { + } + + public FakeInMemoryAgentThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) + : base(serializedThread, jsonSerializerOptions) + { + } + } + + public override object? GetService(Type serviceType, object? serviceKey = null) => null; +} diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs index 9a2f8ac763..e5fb206147 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.AGUI.AspNetCore.UnitTests/AGUIEndpointRouteBuilderExtensionsTests.cs @@ -190,6 +190,264 @@ AIAgent factory(IEnumerable messages, IEnumerable tools, IE Assert.Equal("Second", capturedMessages[1].Text); } + [Fact] + public async Task MapAGUIAgent_ProducesValidAGUIEventStream_WithRunStartAndFinishAsync() + { + // Arrange + DefaultHttpContext httpContext = new(); + RunAgentInput input = new() + { + ThreadId = "thread1", + RunId = "run1", + Messages = [new AGUIUserMessage { Id = "m1", Content = "Test" }] + }; + string json = JsonSerializer.Serialize(input, AGUIJsonSerializerContext.Default.RunAgentInput); + httpContext.Request.Body = new MemoryStream(Encoding.UTF8.GetBytes(json)); + MemoryStream responseStream = new(); + httpContext.Response.Body = responseStream; + + RequestDelegate handler = this.CreateRequestDelegate((messages, tools, context, props) => new TestAgent()); + + // Act + await handler(httpContext); + + // Assert + responseStream.Position = 0; + string responseContent = Encoding.UTF8.GetString(responseStream.ToArray()); + + List events = ParseSseEvents(responseContent); + + JsonElement runStarted = Assert.Single(events, static e => e.GetProperty("type").GetString() == AGUIEventTypes.RunStarted); + JsonElement runFinished = Assert.Single(events, static e => e.GetProperty("type").GetString() == AGUIEventTypes.RunFinished); + + Assert.Equal("thread1", runStarted.GetProperty("threadId").GetString()); + Assert.Equal("run1", runStarted.GetProperty("runId").GetString()); + Assert.Equal("thread1", runFinished.GetProperty("threadId").GetString()); + Assert.Equal("run1", runFinished.GetProperty("runId").GetString()); + } + + [Fact] + public async Task MapAGUIAgent_ProducesTextMessageEvents_InCorrectOrderAsync() + { + // Arrange + DefaultHttpContext httpContext = new(); + RunAgentInput input = new() + { + ThreadId = "thread1", + RunId = "run1", + Messages = [new AGUIUserMessage { Id = "m1", Content = "Hello" }] + }; + string json = JsonSerializer.Serialize(input, AGUIJsonSerializerContext.Default.RunAgentInput); + httpContext.Request.Body = new MemoryStream(Encoding.UTF8.GetBytes(json)); + MemoryStream responseStream = new(); + httpContext.Response.Body = responseStream; + + RequestDelegate handler = this.CreateRequestDelegate((messages, tools, context, props) => new TestAgent()); + + // Act + await handler(httpContext); + + // Assert + responseStream.Position = 0; + string responseContent = Encoding.UTF8.GetString(responseStream.ToArray()); + + List events = ParseSseEvents(responseContent); + List eventTypes = new(events.Count); + foreach (JsonElement evt in events) + { + eventTypes.Add(evt.GetProperty("type").GetString()); + } + + Assert.Contains(AGUIEventTypes.RunStarted, eventTypes); + Assert.Contains(AGUIEventTypes.TextMessageContent, eventTypes); + Assert.Contains(AGUIEventTypes.RunFinished, eventTypes); + + int runStartIndex = eventTypes.IndexOf(AGUIEventTypes.RunStarted); + int firstContentIndex = eventTypes.IndexOf(AGUIEventTypes.TextMessageContent); + int runFinishIndex = eventTypes.LastIndexOf(AGUIEventTypes.RunFinished); + + Assert.True(runStartIndex < firstContentIndex, "Run start should precede text content."); + Assert.True(firstContentIndex < runFinishIndex, "Text content should precede run finish."); + } + + [Fact] + public async Task MapAGUIAgent_EmitsTextMessageContent_WithCorrectDeltaAsync() + { + // Arrange + DefaultHttpContext httpContext = new(); + RunAgentInput input = new() + { + ThreadId = "thread1", + RunId = "run1", + Messages = [new AGUIUserMessage { Id = "m1", Content = "Test" }] + }; + string json = JsonSerializer.Serialize(input, AGUIJsonSerializerContext.Default.RunAgentInput); + httpContext.Request.Body = new MemoryStream(Encoding.UTF8.GetBytes(json)); + MemoryStream responseStream = new(); + httpContext.Response.Body = responseStream; + + RequestDelegate handler = this.CreateRequestDelegate((messages, tools, context, props) => new TestAgent()); + + // Act + await handler(httpContext); + + // Assert + responseStream.Position = 0; + string responseContent = Encoding.UTF8.GetString(responseStream.ToArray()); + + List events = ParseSseEvents(responseContent); + JsonElement textContentEvent = Assert.Single(events, static e => e.GetProperty("type").GetString() == AGUIEventTypes.TextMessageContent); + + Assert.Equal("Test response", textContentEvent.GetProperty("delta").GetString()); + } + + [Fact] + public async Task MapAGUIAgent_WithCustomAgent_ProducesExpectedStreamStructureAsync() + { + // Arrange + AIAgent customAgentFactory(IEnumerable messages, IEnumerable tools, IEnumerable> context, JsonElement props) + { + return new MultiResponseAgent(); + } + + DefaultHttpContext httpContext = new(); + RunAgentInput input = new() + { + ThreadId = "custom_thread", + RunId = "custom_run", + Messages = [new AGUIUserMessage { Id = "m1", Content = "Multi" }] + }; + string json = JsonSerializer.Serialize(input, AGUIJsonSerializerContext.Default.RunAgentInput); + httpContext.Request.Body = new MemoryStream(Encoding.UTF8.GetBytes(json)); + MemoryStream responseStream = new(); + httpContext.Response.Body = responseStream; + + RequestDelegate handler = this.CreateRequestDelegate(customAgentFactory); + + // Act + await handler(httpContext); + + // Assert + responseStream.Position = 0; + string responseContent = Encoding.UTF8.GetString(responseStream.ToArray()); + + List events = ParseSseEvents(responseContent); + List contentEvents = new(); + foreach (JsonElement evt in events) + { + if (evt.GetProperty("type").GetString() == AGUIEventTypes.TextMessageContent) + { + contentEvents.Add(evt); + } + } + + Assert.True(contentEvents.Count >= 3, $"Expected at least 3 text_message.content events, got {contentEvents.Count}"); + + List deltas = new(contentEvents.Count); + foreach (JsonElement contentEvent in contentEvents) + { + deltas.Add(contentEvent.GetProperty("delta").GetString()); + } + + Assert.Contains("First", deltas); + Assert.Contains(" part", deltas); + Assert.Contains(" of response", deltas); + } + + [Fact] + public async Task MapAGUIAgent_ProducesCorrectThreadAndRunIds_InAllEventsAsync() + { + // Arrange + DefaultHttpContext httpContext = new(); + RunAgentInput input = new() + { + ThreadId = "test_thread_123", + RunId = "test_run_456", + Messages = [new AGUIUserMessage { Id = "m1", Content = "Test" }] + }; + string json = JsonSerializer.Serialize(input, AGUIJsonSerializerContext.Default.RunAgentInput); + httpContext.Request.Body = new MemoryStream(Encoding.UTF8.GetBytes(json)); + MemoryStream responseStream = new(); + httpContext.Response.Body = responseStream; + + RequestDelegate handler = this.CreateRequestDelegate((messages, tools, context, props) => new TestAgent()); + + // Act + await handler(httpContext); + + // Assert + responseStream.Position = 0; + string responseContent = Encoding.UTF8.GetString(responseStream.ToArray()); + + List events = ParseSseEvents(responseContent); + JsonElement runStarted = Assert.Single(events, static e => e.GetProperty("type").GetString() == AGUIEventTypes.RunStarted); + + Assert.Equal("test_thread_123", runStarted.GetProperty("threadId").GetString()); + Assert.Equal("test_run_456", runStarted.GetProperty("runId").GetString()); + } + + private static List ParseSseEvents(string responseContent) + { + List events = []; + using StringReader reader = new(responseContent); + StringBuilder dataBuilder = new(); + string? line; + + while ((line = reader.ReadLine()) != null) + { + if (line.StartsWith("data:", StringComparison.Ordinal)) + { + string payload = line.Length > 5 && line[5] == ' ' + ? line.Substring(6) + : line.Substring(5); + dataBuilder.Append(payload); + } + else if (line.Length == 0 && dataBuilder.Length > 0) + { + using JsonDocument document = JsonDocument.Parse(dataBuilder.ToString()); + events.Add(document.RootElement.Clone()); + dataBuilder.Clear(); + } + } + + if (dataBuilder.Length > 0) + { + using JsonDocument document = JsonDocument.Parse(dataBuilder.ToString()); + events.Add(document.RootElement.Clone()); + } + + return events; + } + + private sealed class MultiResponseAgent : AIAgent + { + public override string Id => "multi-response-agent"; + + public override string? Description => "Agent that produces multiple text chunks"; + + public override AgentThread GetNewThread() => new TestInMemoryAgentThread(); + + public override AgentThread DeserializeThread(JsonElement serializedThread, JsonSerializerOptions? jsonSerializerOptions = null) => + new TestInMemoryAgentThread(serializedThread, jsonSerializerOptions); + + public override Task RunAsync(IEnumerable messages, AgentThread? thread = null, AgentRunOptions? options = null, CancellationToken cancellationToken = default) + { + throw new NotImplementedException(); + } + + public override async IAsyncEnumerable RunStreamingAsync( + IEnumerable messages, + AgentThread? thread = null, + AgentRunOptions? options = null, + [System.Runtime.CompilerServices.EnumeratorCancellation] CancellationToken cancellationToken = default) + { + await Task.CompletedTask; + yield return new AgentRunResponseUpdate(new ChatResponseUpdate(ChatRole.Assistant, "First")); + yield return new AgentRunResponseUpdate(new ChatResponseUpdate(ChatRole.Assistant, " part")); + yield return new AgentRunResponseUpdate(new ChatResponseUpdate(ChatRole.Assistant, " of response")); + } + } + private RequestDelegate CreateRequestDelegate( Func, IEnumerable, IEnumerable>, JsonElement, AIAgent> factory) { diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/ConformanceTraces/ChatCompletions/tools/request.json b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/ConformanceTraces/ChatCompletions/tools/request.json new file mode 100644 index 0000000000..b41ac7ab2e --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/ConformanceTraces/ChatCompletions/tools/request.json @@ -0,0 +1,53 @@ +{ + "model": "gpt-4o-mini", + "messages": [ + { + "role": "user", + "content": "What's the weather like in San Francisco?" + } + ], + "max_completion_tokens": 256, + "temperature": 0.7, + "top_p": 1, + "tools": [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": [ "celsius", "fahrenheit" ], + "description": "Temperature unit" + } + }, + "required": [ "location" ] + } + } + }, + { + "type": "function", + "function": { + "name": "get_time", + "description": "Get the current time in a given timezone", + "parameters": { + "type": "object", + "properties": { + "timezone": { + "type": "string", + "description": "The IANA timezone, e.g. America/Los_Angeles" + } + }, + "required": [ "timezone" ] + } + } + } + ] +} \ No newline at end of file diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/ConformanceTraces/ChatCompletions/tools/response.json b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/ConformanceTraces/ChatCompletions/tools/response.json new file mode 100644 index 0000000000..b86280bca0 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/ConformanceTraces/ChatCompletions/tools/response.json @@ -0,0 +1,42 @@ +{ + "id": "chatcmpl-tools-test-001", + "object": "chat.completion", + "created": 1234567890, + "model": "gpt-4o-mini", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_weather", + "arguments": "{\"location\": \"San Francisco, CA\", \"unit\": \"fahrenheit\"}" + } + } + ] + }, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 85, + "completion_tokens": 32, + "total_tokens": 117, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default" +} \ No newline at end of file diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests.csproj b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests.csproj index bd98aebed2..7d64f7ae2b 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests.csproj +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests.csproj @@ -27,16 +27,4 @@ - - - - - - - - - - - - diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIChatCompletionsConformanceTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIChatCompletionsConformanceTests.cs index b777db0ce5..8a38389035 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIChatCompletionsConformanceTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIChatCompletionsConformanceTests.cs @@ -456,6 +456,136 @@ public async Task JsonModeRequestResponseAsync() Assert.Equal(JsonValueKind.String, jsonRoot.GetProperty("occupation").ValueKind); } + [Fact] + public async Task ToolsSerializationDeserializationAsync() + { + // Arrange + string requestJson = LoadChatCompletionsTraceFile("tools/request.json"); + using var expectedResponseDoc = LoadChatCompletionsTraceDocument("tools/response.json"); + + HttpClient client = await this.CreateTestServerAsync( + "tools-agent", + "You are a helpful assistant with access to weather and time tools.", + "tool-call", + (msg) => [new FunctionCallContent("call_abc123", "get_weather", new Dictionary() { + { "location", "San Francisco, CA" }, + { "unit", "fahrenheit" } + })] + ); + + // Act + HttpResponseMessage httpResponse = await this.SendChatCompletionRequestAsync(client, "tools-agent", requestJson); + using var responseDoc = await ParseResponseAsync(httpResponse); + var response = responseDoc.RootElement; + + // Parse the request + using var requestDoc = JsonDocument.Parse(requestJson); + var request = requestDoc.RootElement; + + // Assert - Request has tools array with proper structure + AssertJsonPropertyExists(request, "tools"); + var tools = request.GetProperty("tools"); + Assert.Equal(JsonValueKind.Array, tools.ValueKind); + Assert.Equal(2, tools.GetArrayLength()); + + // Assert - First tool (get_weather) + var weatherTool = tools[0]; + AssertJsonPropertyEquals(weatherTool, "type", "function"); + AssertJsonPropertyExists(weatherTool, "function"); + + var weatherFunction = weatherTool.GetProperty("function"); + AssertJsonPropertyEquals(weatherFunction, "name", "get_weather"); + AssertJsonPropertyExists(weatherFunction, "description"); + AssertJsonPropertyExists(weatherFunction, "parameters"); + + var weatherParams = weatherFunction.GetProperty("parameters"); + AssertJsonPropertyEquals(weatherParams, "type", "object"); + AssertJsonPropertyExists(weatherParams, "properties"); + AssertJsonPropertyExists(weatherParams, "required"); + + // Verify location property exists + var properties = weatherParams.GetProperty("properties"); + AssertJsonPropertyExists(properties, "location"); + AssertJsonPropertyExists(properties, "unit"); + + // Assert - Second tool (get_time) + var timeTool = tools[1]; + AssertJsonPropertyEquals(timeTool, "type", "function"); + + var timeFunction = timeTool.GetProperty("function"); + AssertJsonPropertyEquals(timeFunction, "name", "get_time"); + AssertJsonPropertyExists(timeFunction, "description"); + AssertJsonPropertyExists(timeFunction, "parameters"); + + // Assert - Response structure + AssertJsonPropertyExists(response, "id"); + AssertJsonPropertyEquals(response, "object", "chat.completion"); + AssertJsonPropertyExists(response, "created"); + AssertJsonPropertyExists(response, "model"); + + // Assert - Response has tool_calls in choices + var choices = response.GetProperty("choices"); + Assert.Equal(JsonValueKind.Array, choices.ValueKind); + Assert.True(choices.GetArrayLength() > 0); + + var choice = choices[0]; + AssertJsonPropertyExists(choice, "finish_reason"); + AssertJsonPropertyEquals(choice, "finish_reason", anyOfValues: ["tool_calls", "stop"]); + AssertJsonPropertyExists(choice, "message"); + + var message = choice.GetProperty("message"); + AssertJsonPropertyEquals(message, "role", "assistant"); + AssertJsonPropertyExists(message, "tool_calls"); + + // Assert - Tool calls array structure + var toolCalls = message.GetProperty("tool_calls"); + Assert.Equal(JsonValueKind.Array, toolCalls.ValueKind); + Assert.True(toolCalls.GetArrayLength() > 0); + + var toolCall = toolCalls[0]; + AssertJsonPropertyExists(toolCall, "id"); + AssertJsonPropertyEquals(toolCall, "type", "function"); + AssertJsonPropertyExists(toolCall, "function"); + + var callFunction = toolCall.GetProperty("function"); + AssertJsonPropertyEquals(callFunction, "name", "get_weather"); + AssertJsonPropertyExists(callFunction, "arguments"); + + // Assert - Tool call arguments are valid JSON + string arguments = callFunction.GetProperty("arguments").GetString()!; + using var argsDoc = JsonDocument.Parse(arguments); + var argsRoot = argsDoc.RootElement; + AssertJsonPropertyExists(argsRoot, "location"); + AssertJsonPropertyEquals(argsRoot, "location", "San Francisco, CA"); + AssertJsonPropertyEquals(argsRoot, "unit", "fahrenheit"); + + // Assert - Message content is null when tool_calls present + if (message.TryGetProperty("content", out var contentProp)) + { + Assert.Equal(JsonValueKind.Null, contentProp.ValueKind); + } + + // Assert - Usage statistics + AssertJsonPropertyExists(response, "usage"); + var usage = response.GetProperty("usage"); + AssertJsonPropertyExists(usage, "prompt_tokens"); + AssertJsonPropertyExists(usage, "completion_tokens"); + AssertJsonPropertyExists(usage, "total_tokens"); + + var promptTokens = usage.GetProperty("prompt_tokens").GetInt32(); + var completionTokens = usage.GetProperty("completion_tokens").GetInt32(); + var totalTokens = usage.GetProperty("total_tokens").GetInt32(); + + Assert.True(promptTokens > 0); + Assert.True(completionTokens > 0); + Assert.Equal(promptTokens + completionTokens, totalTokens); + + // Assert - Service tier + AssertJsonPropertyExists(response, "service_tier"); + var serviceTier = response.GetProperty("service_tier").GetString(); + Assert.NotNull(serviceTier); + } + /// /// Helper to parse chat completion chunks from SSE response. /// diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIHttpApiIntegrationTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIHttpApiIntegrationTests.cs index 0c07410b91..1a72b252b5 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIHttpApiIntegrationTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIHttpApiIntegrationTests.cs @@ -45,7 +45,7 @@ public async Task CreateConversationAndResponse_NonStreaming_NonBackground_Updat // Act - Create response (non-streaming, non-background) var createResponseRequest = new { - model = AgentName, + metadata = new { entity_id = AgentName }, conversation = conversationId, input = UserMessage, stream = false @@ -122,7 +122,7 @@ public async Task CreateConversationAndResponse_Streaming_NonBackground_UpdatesC // Act - Create response (streaming, non-background) var createResponseRequest = new { - model = AgentName, + metadata = new { entity_id = AgentName }, conversation = conversationId, input = UserMessage, stream = true @@ -196,7 +196,7 @@ public async Task CreateConversationAndResponse_NonStreaming_Background_UpdatesC // Act - Create response (non-streaming, background) var createResponseRequest = new { - model = AgentName, + metadata = new { entity_id = AgentName }, conversation = conversationId, input = UserMessage, stream = false, diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesAgentResolutionIntegrationTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesAgentResolutionIntegrationTests.cs index c8ce5b770e..9ea9541ccb 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesAgentResolutionIntegrationTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesAgentResolutionIntegrationTests.cs @@ -168,23 +168,23 @@ public async Task CreateResponse_WithMultipleAgents_ResolvesCorrectAgentAsync() } /// - /// Verifies that agent resolution using the model property works correctly. + /// Verifies that agent resolution using the metadata.entity_id property works correctly. /// [Fact] - public async Task CreateResponse_WithModelProperty_ResolvesCorrectAgentAsync() + public async Task CreateResponse_WithMetadataEntityId_ResolvesCorrectAgentAsync() { // Arrange - const string AgentName = "model-agent"; + const string AgentName = "metadata-agent"; const string Instructions = "You are a helpful assistant."; - const string ExpectedResponse = "Response via model property"; + const string ExpectedResponse = "Response via metadata.entity_id"; this._httpClient = await this.CreateTestServerWithAgentResolutionAsync( (AgentName, Instructions, ExpectedResponse)); - // Act - Use raw HTTP request to control the model property + // Act - Use raw HTTP request with metadata.entity_id using StringContent requestContent = new(JsonSerializer.Serialize(new { - model = AgentName, + metadata = new { entity_id = AgentName }, input = new[] { new { type = "message", role = "user", content = "Test message" } @@ -235,7 +235,7 @@ public async Task CreateResponse_WithNonExistentAgent_ReturnsNotFoundAsync() using HttpResponseMessage httpResponse = await this._httpClient!.PostAsync(new Uri("/v1/responses", UriKind.Relative), requestContent); // Assert - Assert.Equal(System.Net.HttpStatusCode.NotFound, httpResponse.StatusCode); + Assert.Equal(System.Net.HttpStatusCode.BadRequest, httpResponse.StatusCode); string responseJson = await httpResponse.Content.ReadAsStringAsync(); Assert.Contains("non-existent-agent", responseJson); @@ -268,7 +268,6 @@ public async Task CreateResponse_WithoutAgentOrModel_ReturnsBadRequestAsync() string responseJson = await httpResponse.Content.ReadAsStringAsync(); Assert.Contains("agent.name", responseJson, StringComparison.OrdinalIgnoreCase); - Assert.Contains("model", responseJson, StringComparison.OrdinalIgnoreCase); } /// diff --git a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesSerializationTests.cs b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesSerializationTests.cs index 9c0bd023df..d3a23a382d 100644 --- a/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesSerializationTests.cs +++ b/dotnet/tests/Microsoft.Agents.AI.Hosting.OpenAI.UnitTests/OpenAIResponsesSerializationTests.cs @@ -344,6 +344,20 @@ public void Deserialize_RefusalStreamingRequest_HasStream() Assert.NotNull(request.Input); } + [Fact] + public void Deserialize_InvalidInputObject_ThrowsHelpfulException() + { + // Arrange + const string Json = "{\"model\":\"gpt-4o-mini\",\"input\":{\"input\":\"testing!\"},\"stream\":true}"; + + // Act & Assert + var exception = Assert.Throws(() => + JsonSerializer.Deserialize(Json, OpenAIHostingJsonContext.Default.CreateResponse)); + + Assert.Contains("ResponseInput must be either a string or an array of messages", exception.Message); + Assert.Contains("Objects are not supported", exception.Message); + } + [Fact] public void Deserialize_AllRequests_CanBeDeserialized() { diff --git a/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/SetMultipleVariablesExecutorTest.cs b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/SetMultipleVariablesExecutorTest.cs new file mode 100644 index 0000000000..62f965a5b0 --- /dev/null +++ b/dotnet/tests/Microsoft.Agents.AI.Workflows.Declarative.UnitTests/ObjectModel/SetMultipleVariablesExecutorTest.cs @@ -0,0 +1,154 @@ +// Copyright (c) Microsoft. All rights reserved. + +using System.Threading.Tasks; +using Microsoft.Agents.AI.Workflows.Declarative.ObjectModel; +using Microsoft.Bot.ObjectModel; +using Microsoft.PowerFx.Types; +using Xunit.Abstractions; + +namespace Microsoft.Agents.AI.Workflows.Declarative.UnitTests.ObjectModel; + +/// +/// Tests for . +/// +public sealed class SetMultipleVariablesExecutorTest(ITestOutputHelper output) : WorkflowActionExecutorTest(output) +{ + [Fact] + public async Task SetMultipleVariablesAsync() + { + // Arrange, Act, Assert + await this.ExecuteTestAsync( + displayName: nameof(SetMultipleVariablesAsync), + assignments: [ + new AssignmentCase("Variable1", new NumberDataValue(42), FormulaValue.New(42)), + new AssignmentCase("Variable2", new StringDataValue("Test"), FormulaValue.New("Test")), + new AssignmentCase("Variable3", new BooleanDataValue(true), FormulaValue.New(true)) + ]); + } + + [Fact] + public async Task SetMultipleVariablesWithExpressionsAsync() + { + // Arrange + this.State.Set("SourceNumber", FormulaValue.New(10)); + this.State.Set("SourceText", FormulaValue.New("Hello")); + this.State.Bind(); + + // Act, Assert + await this.ExecuteTestAsync( + displayName: nameof(SetMultipleVariablesWithExpressionsAsync), + assignments: [ + new AssignmentCase("CalcVariable", ValueExpression.Expression("Local.SourceNumber * 2"), FormulaValue.New(20)), + new AssignmentCase("ConcatVariable", ValueExpression.Expression(@"Concatenate(Local.SourceText, "" World"")"), FormulaValue.New("Hello World")), + new AssignmentCase("BoolVariable", ValueExpression.Expression("Local.SourceNumber > 5"), FormulaValue.New(true)) + ]); + } + + [Fact] + public async Task SetMultipleVariablesWithVariableReferencesAsync() + { + // Arrange + this.State.Set("Source1", FormulaValue.New(123)); + this.State.Set("Source2", FormulaValue.New("Reference")); + this.State.Bind(); + + // Act, Assert + await this.ExecuteTestAsync( + displayName: nameof(SetMultipleVariablesWithVariableReferencesAsync), + assignments: [ + new AssignmentCase("Target1", ValueExpression.Variable(PropertyPath.TopicVariable("Source1")), FormulaValue.New(123)), + new AssignmentCase("Target2", ValueExpression.Variable(PropertyPath.TopicVariable("Source2")), FormulaValue.New("Reference")) + ]); + } + + [Fact] + public async Task SetMultipleVariablesWithNullValuesAsync() + { + // Arrange, Act, Assert + await this.ExecuteTestAsync( + displayName: nameof(SetMultipleVariablesWithNullValuesAsync), + assignments: [ + new AssignmentCase("NullVar1", null, FormulaValue.NewBlank()), + new AssignmentCase("NormalVar", new StringDataValue("NotNull"), FormulaValue.New("NotNull")), + new AssignmentCase("NullVar2", null, FormulaValue.NewBlank()) + ]); + } + + [Fact] + public async Task SetMultipleVariablesUpdateExistingAsync() + { + // Arrange + this.State.Set("ExistingVar1", FormulaValue.New(999)); + this.State.Set("ExistingVar2", FormulaValue.New("OldValue")); + + // Act, Assert + await this.ExecuteTestAsync( + displayName: nameof(SetMultipleVariablesUpdateExistingAsync), + assignments: [ + new AssignmentCase("ExistingVar1", new NumberDataValue(111), FormulaValue.New(111)), + new AssignmentCase("ExistingVar2", new StringDataValue("NewValue"), FormulaValue.New("NewValue")), + new AssignmentCase("NewVar", new BooleanDataValue(false), FormulaValue.New(false)) + ]); + } + + [Fact] + public async Task SetMultipleVariablesEmptyAssignmentsAsync() + { + // Arrange + SetMultipleVariables model = this.CreateModel(nameof(SetMultipleVariablesEmptyAssignmentsAsync), []); + + // Arrange, Act, Assert + Assert.Throws(() => + { + // Empty variables assignment should fail RequiredProperties validation. + _ = new SetMultipleVariablesExecutor(model, this.State); + }); + } + + private async Task ExecuteTestAsync(string displayName, AssignmentCase[] assignments) + { + // Arrange + SetMultipleVariables model = this.CreateModel(displayName, assignments); + + // Act + SetMultipleVariablesExecutor action = new(model, this.State); + await this.ExecuteAsync(action); + + // Assert + VerifyModel(model, action); + foreach (AssignmentCase assignment in assignments) + { + this.VerifyState(assignment.VariableName, assignment.ExpectedValue); + } + } + + private SetMultipleVariables CreateModel(string displayName, AssignmentCase[] assignments) + { + SetMultipleVariables.Builder actionBuilder = new() + { + Id = this.CreateActionId(), + DisplayName = this.FormatDisplayName(displayName), + }; + + foreach (AssignmentCase assignment in assignments) + { + ValueExpression.Builder? valueExpressionBuilder = assignment.ValueExpression switch + { + null => null, + DataValue dataValue => new ValueExpression.Builder(ValueExpression.Literal(dataValue)), + ValueExpression valueExpression => new ValueExpression.Builder(valueExpression), + _ => throw new System.ArgumentException($"Unsupported value type: {assignment.ValueExpression?.GetType().Name}") + }; + + actionBuilder.Assignments.Add(new VariableAssignment.Builder() + { + Variable = PropertyPath.Create(FormatVariablePath(assignment.VariableName)), + Value = valueExpressionBuilder, + }); + } + + return AssignParent(actionBuilder); + } + + private sealed record AssignmentCase(string VariableName, object? ValueExpression, FormulaValue ExpectedValue); +} diff --git a/python/CHANGELOG.md b/python/CHANGELOG.md index b4485b914c..500c0b45cd 100644 --- a/python/CHANGELOG.md +++ b/python/CHANGELOG.md @@ -7,6 +7,33 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.0.0b251111] - 2025-11-11 + +### Added + +- **agent-framework-core**: Add OpenAI Responses Image Generation Stream Support with partial images and unit tests ([#1853](https://github.com/microsoft/agent-framework/pull/1853)) +- **agent-framework-ag-ui**: Add concrete AGUIChatClient implementation ([#2072](https://github.com/microsoft/agent-framework/pull/2072)) + +### Fixed + +- **agent-framework-a2a**: Use the last entry in the task history to avoid empty responses ([#2101](https://github.com/microsoft/agent-framework/pull/2101)) +- **agent-framework-core**: Fix MCP Tool Parameter Descriptions not propagated to LLMs ([#1978](https://github.com/microsoft/agent-framework/pull/1978)) +- **agent-framework-core**: Handle agent user input request in AgentExecutor ([#2022](https://github.com/microsoft/agent-framework/pull/2022)) +- **agent-framework-core**: Fix Model ID attribute not showing up in `invoke_agent` span ([#2061](https://github.com/microsoft/agent-framework/pull/2061)) +- **agent-framework-core**: Fix underlying tool choice bug and enable return to previous Handoff subagent ([#2037](https://github.com/microsoft/agent-framework/pull/2037)) + +## [1.0.0b251108] - 2025-11-08 + +### Added + +- **agent-framework-devui**: Add OpenAI Responses API proxy support + HIL (Human-in-the-Loop) for Workflows ([#1737](https://github.com/microsoft/agent-framework/pull/1737)) +- **agent-framework-purview**: Add Caching and background processing in Python Purview Middleware ([#1844](https://github.com/microsoft/agent-framework/pull/1844)) + +### Changed + +- **agent-framework-devui**: Use metadata.entity_id instead of model field ([#1984](https://github.com/microsoft/agent-framework/pull/1984)) +- **agent-framework-devui**: Serialize workflow input as string to maintain conformance with OpenAI Responses format ([#2021](https://github.com/microsoft/agent-framework/pull/2021)) + ## [1.0.0b251106.post1] - 2025-11-06 ### Fixed @@ -177,7 +204,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 For more information, see the [announcement blog post](https://devblogs.microsoft.com/foundry/introducing-microsoft-agent-framework-the-open-source-engine-for-agentic-ai-apps/). -[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251104...HEAD +[Unreleased]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251111...HEAD +[1.0.0b251111]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251108...python-1.0.0b251111 +[1.0.0b251108]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251106.post1...python-1.0.0b251108 +[1.0.0b251106.post1]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251106...python-1.0.0b251106.post1 +[1.0.0b251106]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251105...python-1.0.0b251106 +[1.0.0b251105]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251104...python-1.0.0b251105 [1.0.0b251104]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251028...python-1.0.0b251104 [1.0.0b251028]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251016...python-1.0.0b251028 [1.0.0b251016]: https://github.com/microsoft/agent-framework/compare/python-1.0.0b251007...python-1.0.0b251016 diff --git a/python/packages/a2a/agent_framework_a2a/_agent.py b/python/packages/a2a/agent_framework_a2a/_agent.py index 68694e3db4..7fe4649a65 100644 --- a/python/packages/a2a/agent_framework_a2a/_agent.py +++ b/python/packages/a2a/agent_framework_a2a/_agent.py @@ -388,6 +388,17 @@ def _task_to_chat_messages(self, task: Task) -> list[ChatMessage]: if task.artifacts is not None: for artifact in task.artifacts: messages.append(self._artifact_to_chat_message(artifact)) + elif task.history is not None and len(task.history) > 0: + # Include the last history item as the agent response + history_item = task.history[-1] + contents = self._a2a_parts_to_contents(history_item.parts) + messages.append( + ChatMessage( + role=Role.ASSISTANT if history_item.role == A2ARole.agent else Role.USER, + contents=contents, + raw_representation=history_item, + ) + ) return messages diff --git a/python/packages/a2a/pyproject.toml b/python/packages/a2a/pyproject.toml index 44e66b5858..2780bdd481 100644 --- a/python/packages/a2a/pyproject.toml +++ b/python/packages/a2a/pyproject.toml @@ -4,7 +4,7 @@ description = "A2A integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251105" +version = "1.0.0b251111" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/ag-ui/README.md b/python/packages/ag-ui/README.md index a7e24b5fbc..2b02b61090 100644 --- a/python/packages/ag-ui/README.md +++ b/python/packages/ag-ui/README.md @@ -10,6 +10,8 @@ pip install agent-framework-ag-ui ## Quick Start +### Server (Host an AI Agent) + ```python from fastapi import FastAPI from agent_framework import ChatAgent @@ -23,6 +25,7 @@ agent = ChatAgent( chat_client=AzureOpenAIChatClient( endpoint="https://your-resource.openai.azure.com/", deployment_name="gpt-4o-mini", + api_key="your-api-key", ), ) @@ -33,9 +36,38 @@ add_agent_framework_fastapi_endpoint(app, agent, "/") # Run with: uvicorn main:app --reload ``` +### Client (Connect to an AG-UI Server) + +```python +import asyncio +from agent_framework import TextContent +from agent_framework_ag_ui import AGUIChatClient + +async def main(): + async with AGUIChatClient(endpoint="http://localhost:8000/") as client: + # Stream responses + async for update in client.get_streaming_response("Hello!"): + for content in update.contents: + if isinstance(content, TextContent): + print(content.text, end="", flush=True) + print() + +asyncio.run(main()) +``` + +The `AGUIChatClient` supports: +- Streaming and non-streaming responses +- Hybrid tool execution (client-side + server-side tools) +- Automatic thread management for conversation continuity +- Integration with `ChatAgent` for client-side history management + ## Documentation -- **[Getting Started Tutorial](getting_started/)** - Step-by-step guide to building your first AG-UI server and client +- **[Getting Started Tutorial](getting_started/)** - Step-by-step guide to building AG-UI servers and clients + - Server setup with FastAPI + - Client examples using `AGUIChatClient` + - Hybrid tool execution (client-side + server-side) + - Thread management and conversation continuity - **[Examples](agent_framework_ag_ui_examples/)** - Complete examples for AG-UI features ## Features diff --git a/python/packages/ag-ui/agent_framework_ag_ui/__init__.py b/python/packages/ag-ui/agent_framework_ag_ui/__init__.py index 1adedb2649..143f2499a0 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/__init__.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/__init__.py @@ -5,6 +5,7 @@ import importlib.metadata from ._agent import AgentFrameworkAgent +from ._client import AGUIChatClient from ._confirmation_strategies import ( ConfirmationStrategy, DefaultConfirmationStrategy, @@ -13,6 +14,8 @@ TaskPlannerConfirmationStrategy, ) from ._endpoint import add_agent_framework_fastapi_endpoint +from ._event_converters import AGUIEventConverter +from ._http_service import AGUIHttpService try: __version__ = importlib.metadata.version(__name__) @@ -22,6 +25,9 @@ __all__ = [ "AgentFrameworkAgent", "add_agent_framework_fastapi_endpoint", + "AGUIChatClient", + "AGUIEventConverter", + "AGUIHttpService", "ConfirmationStrategy", "DefaultConfirmationStrategy", "TaskPlannerConfirmationStrategy", diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_client.py b/python/packages/ag-ui/agent_framework_ag_ui/_client.py new file mode 100644 index 0000000000..ab7eb53940 --- /dev/null +++ b/python/packages/ag-ui/agent_framework_ag_ui/_client.py @@ -0,0 +1,407 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""AG-UI Chat Client implementation.""" + +import json +import logging +import uuid +from collections.abc import AsyncIterable, MutableSequence +from functools import wraps +from typing import Any, TypeVar, cast + +import httpx +from agent_framework import ( + AIFunction, + BaseChatClient, + ChatMessage, + ChatOptions, + ChatResponse, + ChatResponseUpdate, + DataContent, + FunctionCallContent, +) +from agent_framework._middleware import use_chat_middleware +from agent_framework._tools import use_function_invocation +from agent_framework._types import BaseContent, Contents +from agent_framework.observability import use_observability + +from ._event_converters import AGUIEventConverter +from ._http_service import AGUIHttpService +from ._message_adapters import agent_framework_messages_to_agui +from ._utils import convert_tools_to_agui_format + +logger: logging.Logger = logging.getLogger(__name__) + + +class ServerFunctionCallContent(BaseContent): + """Wrapper for server function calls to prevent client re-execution. + + All function calls from the remote server are server-side executions. + This wrapper prevents @use_function_invocation from trying to execute them again. + """ + + function_call_content: FunctionCallContent + + def __init__(self, function_call_content: FunctionCallContent) -> None: + """Initialize with the function call content.""" + super().__init__(type="server_function_call") + self.function_call_content = function_call_content + + +def _unwrap_server_function_call_contents(contents: MutableSequence[Contents | dict[str, Any]]) -> None: + """Replace ServerFunctionCallContent instances with their underlying call content.""" + for idx, content in enumerate(contents): + if isinstance(content, ServerFunctionCallContent): + contents[idx] = content.function_call_content # type: ignore[assignment] + + +TBaseChatClient = TypeVar("TBaseChatClient", bound=type[BaseChatClient]) + + +def _apply_server_function_call_unwrap(chat_client: TBaseChatClient) -> TBaseChatClient: + """Class decorator that unwraps server-side function calls after tool handling.""" + + original_get_streaming_response = chat_client.get_streaming_response + + @wraps(original_get_streaming_response) + async def streaming_wrapper(self, *args: Any, **kwargs: Any) -> AsyncIterable[ChatResponseUpdate]: + async for update in original_get_streaming_response(self, *args, **kwargs): + _unwrap_server_function_call_contents(cast(MutableSequence[Contents | dict[str, Any]], update.contents)) + yield update + + chat_client.get_streaming_response = streaming_wrapper # type: ignore[assignment] + + original_get_response = chat_client.get_response + + @wraps(original_get_response) + async def response_wrapper(self, *args: Any, **kwargs: Any) -> ChatResponse: + response = await original_get_response(self, *args, **kwargs) + if response.messages: + for message in response.messages: + _unwrap_server_function_call_contents( + cast(MutableSequence[Contents | dict[str, Any]], message.contents) + ) + return response + + chat_client.get_response = response_wrapper # type: ignore[assignment] + return chat_client + + +@_apply_server_function_call_unwrap +@use_function_invocation +@use_observability +@use_chat_middleware +class AGUIChatClient(BaseChatClient): + """Chat client for communicating with AG-UI compliant servers. + + This client implements the BaseChatClient interface and automatically handles: + - Thread ID management for conversation continuity + - State synchronization between client and server + - Server-Sent Events (SSE) streaming + - Event conversion to Agent Framework types + + Important: Message History Management + This client sends exactly the messages it receives to the server. It does NOT + automatically maintain conversation history. The server must handle history via thread_id. + + For stateless servers: Use ChatAgent wrapper which will send full message history on each + request. However, even with ChatAgent, the server must echo back all context for the + agent to maintain history across turns. + + Important: Tool Handling (Hybrid Execution - matches .NET) + 1. Client tool metadata sent to server - LLM knows about both client and server tools + 2. Server has its own tools that execute server-side + 3. When LLM calls a client tool, @use_function_invocation executes it locally + 4. Both client and server tools work together (hybrid pattern) + + The wrapping ChatAgent's @use_function_invocation handles client tool execution + automatically when the server's LLM decides to call them. + + Examples: + Direct usage (server manages thread history): + + .. code-block:: python + + from agent_framework.ag_ui import AGUIChatClient + + client = AGUIChatClient(endpoint="http://localhost:8888/") + + # First message - thread ID auto-generated + response = await client.get_response("Hello!") + thread_id = response.additional_properties.get("thread_id") + + # Second message - server retrieves history using thread_id + response2 = await client.get_response( + "How are you?", + metadata={"thread_id": thread_id} + ) + + Recommended usage with ChatAgent (client manages history): + + .. code-block:: python + + from agent_framework import ChatAgent + from agent_framework.ag_ui import AGUIChatClient + + client = AGUIChatClient(endpoint="http://localhost:8888/") + agent = ChatAgent(name="assistant", client=client) + thread = await agent.get_new_thread() + + # ChatAgent automatically maintains history and sends full context + response = await agent.run("Hello!", thread=thread) + response2 = await agent.run("How are you?", thread=thread) + + Streaming usage: + + .. code-block:: python + + async for update in client.get_streaming_response("Tell me a story"): + if update.contents: + for content in update.contents: + if hasattr(content, "text"): + print(content.text, end="", flush=True) + + Context manager: + + .. code-block:: python + + async with AGUIChatClient(endpoint="http://localhost:8888/") as client: + response = await client.get_response("Hello!") + print(response.messages[0].text) + """ + + OTEL_PROVIDER_NAME = "agui" + + def __init__( + self, + *, + endpoint: str, + http_client: httpx.AsyncClient | None = None, + timeout: float = 60.0, + additional_properties: dict[str, Any] | None = None, + **kwargs: Any, + ) -> None: + """Initialize the AG-UI chat client. + + Args: + endpoint: The AG-UI server endpoint URL (e.g., "http://localhost:8888/") + http_client: Optional httpx.AsyncClient instance. If None, one will be created. + timeout: Request timeout in seconds (default: 60.0) + additional_properties: Additional properties to store + **kwargs: Additional arguments passed to BaseChatClient + """ + super().__init__(additional_properties=additional_properties, **kwargs) + self._http_service = AGUIHttpService( + endpoint=endpoint, + http_client=http_client, + timeout=timeout, + ) + + async def close(self) -> None: + """Close the HTTP client.""" + await self._http_service.close() + + async def __aenter__(self) -> "AGUIChatClient": + """Enter async context manager.""" + return self + + async def __aexit__(self, *args: Any) -> None: + """Exit async context manager.""" + await self.close() + + def _register_server_tool_placeholder(self, tool_name: str) -> None: + """Register a declaration-only placeholder so function invocation skips execution.""" + + config = getattr(self, "function_invocation_configuration", None) + if not config: + return + if any(getattr(tool, "name", None) == tool_name for tool in config.additional_tools): + return + + placeholder: AIFunction[Any, Any] = AIFunction( + name=tool_name, + description="Server-managed tool placeholder (AG-UI)", + func=None, + ) + config.additional_tools = list(config.additional_tools) + [placeholder] + registered: set[str] = getattr(self, "_registered_server_tools", set()) + registered.add(tool_name) + self._registered_server_tools = registered # type: ignore[attr-defined] + from agent_framework._logging import get_logger + + logger = get_logger() + logger.debug(f"[AGUIChatClient] Registered server placeholder: {tool_name}") + + def _extract_state_from_messages( + self, messages: MutableSequence[ChatMessage] + ) -> tuple[list[ChatMessage], dict[str, Any] | None]: + """Extract state from last message if present. + + Args: + messages: List of chat messages + + Returns: + Tuple of (messages_without_state, state_dict) + """ + if not messages: + return list(messages), None + + last_message = messages[-1] + + for content in last_message.contents: + if isinstance(content, DataContent) and content.media_type == "application/json": + try: + uri = content.uri + if uri.startswith("data:application/json;base64,"): + import base64 + + encoded_data = uri.split(",", 1)[1] + decoded_bytes = base64.b64decode(encoded_data) + state = json.loads(decoded_bytes.decode("utf-8")) + + messages_without_state = list(messages[:-1]) if len(messages) > 1 else [] + return messages_without_state, state + except (json.JSONDecodeError, ValueError, KeyError) as e: + from agent_framework._logging import get_logger + + logger = get_logger() + logger.warning(f"Failed to extract state from message: {e}") + + return list(messages), None + + def _convert_messages_to_agui_format(self, messages: list[ChatMessage]) -> list[dict[str, Any]]: + """Convert Agent Framework messages to AG-UI format. + + Args: + messages: List of ChatMessage objects + + Returns: + List of AG-UI formatted message dictionaries + """ + return agent_framework_messages_to_agui(messages) + + def _get_thread_id(self, chat_options: ChatOptions) -> str: + """Get or generate thread ID from chat options. + + Args: + chat_options: Chat options containing metadata + + Returns: + Thread ID string + """ + thread_id = None + if chat_options.metadata: + thread_id = chat_options.metadata.get("thread_id") + + if not thread_id: + thread_id = f"thread_{uuid.uuid4().hex}" + + return thread_id + + async def _inner_get_response( + self, + *, + messages: MutableSequence[ChatMessage], + chat_options: ChatOptions, + **kwargs: Any, + ) -> ChatResponse: + """Internal method to get non-streaming response. + + Keyword Args: + messages: List of chat messages + chat_options: Chat options for the request + **kwargs: Additional keyword arguments + + Returns: + ChatResponse object + """ + return await ChatResponse.from_chat_response_generator( + self._inner_get_streaming_response( + messages=messages, + chat_options=chat_options, + **kwargs, + ) + ) + + async def _inner_get_streaming_response( + self, + *, + messages: MutableSequence[ChatMessage], + chat_options: ChatOptions, + **kwargs: Any, + ) -> AsyncIterable[ChatResponseUpdate]: + """Internal method to get streaming response. + + Keyword Args: + messages: List of chat messages + chat_options: Chat options for the request + **kwargs: Additional keyword arguments + + Yields: + ChatResponseUpdate objects + """ + messages_to_send, state = self._extract_state_from_messages(messages) + + thread_id = self._get_thread_id(chat_options) + run_id = f"run_{uuid.uuid4().hex}" + + agui_messages = self._convert_messages_to_agui_format(messages_to_send) + + # Send client tools to server so LLM knows about them + # Client tools execute via ChatAgent's @use_function_invocation wrapper + agui_tools = convert_tools_to_agui_format(chat_options.tools) + + # Build set of client tool names (matches .NET clientToolSet) + # Used to distinguish client vs server tools in response stream + client_tool_set: set[str] = set() + if chat_options.tools: + for tool in chat_options.tools: + if hasattr(tool, "name"): + client_tool_set.add(tool.name) # type: ignore[arg-type] + self._last_client_tool_set = client_tool_set # type: ignore[attr-defined] + + logger.debug( + "[AGUIChatClient] Preparing request", + extra={ + "thread_id": thread_id, + "run_id": run_id, + "client_tools": list(client_tool_set), + "messages": [msg.text for msg in messages_to_send if msg.text], + }, + ) + logger.debug(f"[AGUIChatClient] Client tool set: {client_tool_set}") + + converter = AGUIEventConverter() + + async for event in self._http_service.post_run( + thread_id=thread_id, + run_id=run_id, + messages=agui_messages, + state=state, + tools=agui_tools, + ): + logger.debug(f"[AGUIChatClient] Raw AG-UI event: {event}") + update = converter.convert_event(event) + if update is not None: + logger.debug( + "[AGUIChatClient] Converted update", + extra={"role": update.role, "contents": [type(c).__name__ for c in update.contents]}, + ) + # Distinguish client vs server tools + for i, content in enumerate(update.contents): + if isinstance(content, FunctionCallContent): + logger.debug( + f"[AGUIChatClient] Function call: {content.name}, in client_tool_set: {content.name in client_tool_set}" + ) + if content.name in client_tool_set: + # Client tool - let @use_function_invocation execute it + if not content.additional_properties: + content.additional_properties = {} + content.additional_properties["agui_thread_id"] = thread_id + else: + # Server tool - wrap so @use_function_invocation ignores it + logger.debug(f"[AGUIChatClient] Wrapping server tool: {content.name}") + self._register_server_tool_placeholder(content.name) + update.contents[i] = ServerFunctionCallContent(content) # type: ignore + + yield update diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py b/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py new file mode 100644 index 0000000000..0f485739c9 --- /dev/null +++ b/python/packages/ag-ui/agent_framework_ag_ui/_event_converters.py @@ -0,0 +1,209 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Event converter for AG-UI protocol events to Agent Framework types.""" + +from typing import Any + +from agent_framework import ( + ChatResponseUpdate, + ErrorContent, + FinishReason, + FunctionCallContent, + FunctionResultContent, + Role, + TextContent, +) + + +class AGUIEventConverter: + """Converter for AG-UI events to Agent Framework types. + + Handles conversion of AG-UI protocol events to ChatResponseUpdate objects + while maintaining state, aggregating content, and tracking metadata. + """ + + def __init__(self) -> None: + """Initialize the converter with fresh state.""" + self.current_message_id: str | None = None + self.current_tool_call_id: str | None = None + self.current_tool_name: str | None = None + self.accumulated_tool_args: str = "" + self.thread_id: str | None = None + self.run_id: str | None = None + + def convert_event(self, event: dict[str, Any]) -> ChatResponseUpdate | None: + """Convert a single AG-UI event to ChatResponseUpdate. + + Args: + event: AG-UI event dictionary + + Returns: + ChatResponseUpdate if event produces content, None otherwise + + Examples: + RUN_STARTED event: + + .. code-block:: python + + converter = AGUIEventConverter() + event = {"type": "RUN_STARTED", "threadId": "t1", "runId": "r1"} + update = converter.convert_event(event) + assert update.additional_properties["thread_id"] == "t1" + + TEXT_MESSAGE_CONTENT event: + + .. code-block:: python + + event = {"type": "TEXT_MESSAGE_CONTENT", "messageId": "m1", "delta": "Hello"} + update = converter.convert_event(event) + assert update.contents[0].text == "Hello" + """ + event_type = event.get("type", "") + + if event_type == "RUN_STARTED": + return self._handle_run_started(event) + elif event_type == "TEXT_MESSAGE_START": + return self._handle_text_message_start(event) + elif event_type == "TEXT_MESSAGE_CONTENT": + return self._handle_text_message_content(event) + elif event_type == "TEXT_MESSAGE_END": + return self._handle_text_message_end(event) + elif event_type == "TOOL_CALL_START": + return self._handle_tool_call_start(event) + elif event_type == "TOOL_CALL_ARGS": + return self._handle_tool_call_args(event) + elif event_type == "TOOL_CALL_END": + return self._handle_tool_call_end(event) + elif event_type == "TOOL_CALL_RESULT": + return self._handle_tool_call_result(event) + elif event_type == "RUN_FINISHED": + return self._handle_run_finished(event) + elif event_type == "RUN_ERROR": + return self._handle_run_error(event) + + return None + + def _handle_run_started(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle RUN_STARTED event.""" + self.thread_id = event.get("threadId") + self.run_id = event.get("runId") + + return ChatResponseUpdate( + role=Role.ASSISTANT, + contents=[], + additional_properties={ + "thread_id": self.thread_id, + "run_id": self.run_id, + }, + ) + + def _handle_text_message_start(self, event: dict[str, Any]) -> ChatResponseUpdate | None: + """Handle TEXT_MESSAGE_START event.""" + self.current_message_id = event.get("messageId") + return ChatResponseUpdate( + role=Role.ASSISTANT, + message_id=self.current_message_id, + contents=[], + ) + + def _handle_text_message_content(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle TEXT_MESSAGE_CONTENT event.""" + message_id = event.get("messageId") + delta = event.get("delta", "") + + if message_id != self.current_message_id: + self.current_message_id = message_id + + return ChatResponseUpdate( + role=Role.ASSISTANT, + message_id=self.current_message_id, + contents=[TextContent(text=delta)], + ) + + def _handle_text_message_end(self, event: dict[str, Any]) -> ChatResponseUpdate | None: + """Handle TEXT_MESSAGE_END event.""" + return None + + def _handle_tool_call_start(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle TOOL_CALL_START event.""" + self.current_tool_call_id = event.get("toolCallId") + self.current_tool_name = event.get("toolName") or event.get("toolCallName") or event.get("tool_call_name") + self.accumulated_tool_args = "" + + return ChatResponseUpdate( + role=Role.ASSISTANT, + contents=[ + FunctionCallContent( + call_id=self.current_tool_call_id or "", + name=self.current_tool_name or "", + arguments="", + ) + ], + ) + + def _handle_tool_call_args(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle TOOL_CALL_ARGS event.""" + delta = event.get("delta", "") + self.accumulated_tool_args += delta + + return ChatResponseUpdate( + role=Role.ASSISTANT, + contents=[ + FunctionCallContent( + call_id=self.current_tool_call_id or "", + name=self.current_tool_name or "", + arguments=delta, + ) + ], + ) + + def _handle_tool_call_end(self, event: dict[str, Any]) -> ChatResponseUpdate | None: + """Handle TOOL_CALL_END event.""" + self.accumulated_tool_args = "" + return None + + def _handle_tool_call_result(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle TOOL_CALL_RESULT event.""" + tool_call_id = event.get("toolCallId", "") + result = event.get("result") if event.get("result") is not None else event.get("content") + + return ChatResponseUpdate( + role=Role.TOOL, + contents=[ + FunctionResultContent( + call_id=tool_call_id, + result=result, + ) + ], + ) + + def _handle_run_finished(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle RUN_FINISHED event.""" + return ChatResponseUpdate( + role=Role.ASSISTANT, + finish_reason=FinishReason.STOP, + contents=[], + additional_properties={ + "thread_id": self.thread_id, + "run_id": self.run_id, + }, + ) + + def _handle_run_error(self, event: dict[str, Any]) -> ChatResponseUpdate: + """Handle RUN_ERROR event.""" + error_message = event.get("message", "Unknown error") + + return ChatResponseUpdate( + role=Role.ASSISTANT, + finish_reason=FinishReason.CONTENT_FILTER, + contents=[ + ErrorContent( + message=error_message, + error_code="RUN_ERROR", + ) + ], + additional_properties={ + "thread_id": self.thread_id, + "run_id": self.run_id, + }, + ) diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_events.py b/python/packages/ag-ui/agent_framework_ag_ui/_events.py index b6b2294d45..4117fd50bb 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_events.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_events.py @@ -107,7 +107,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # Skip text content if we're about to emit confirm_changes # The summary should only appear after user confirms if self.should_stop_after_confirm: - logger.debug(" >>> Skipping text content - waiting for confirm_changes response") + logger.debug("Skipping text content - waiting for confirm_changes response") # Save the summary text to show after confirmation self.suppressed_summary += content.text continue @@ -156,7 +156,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba tool_call_name=content.name, parent_message_id=self.current_message_id, ) - logger.info(f" >>> Emitting ToolCallStartEvent with name='{content.name}', id='{tool_call_id}'") + logger.info(f"Emitting ToolCallStartEvent with name='{content.name}', id='{tool_call_id}'") events.append(tool_start_event) # Track tool call for MessagesSnapshotEvent @@ -186,7 +186,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # If it's a dict, convert to JSON delta_str = json.dumps(content.arguments) - logger.info(f" >>> Emitting ToolCallArgsEvent with delta: {delta_str!r}..., id='{tool_call_id}'") + logger.info(f"Emitting ToolCallArgsEvent with delta: {delta_str!r}..., id='{tool_call_id}'") args_event = ToolCallArgsEvent( tool_call_id=tool_call_id, delta=delta_str, @@ -211,7 +211,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba self.streaming_tool_args += json.dumps(content.arguments) logger.debug( - f" >>> Predictive state: accumulated {len(self.streaming_tool_args)} chars for tool '{self.current_tool_call_name}'" + f"Predictive state: accumulated {len(self.streaming_tool_args)} chars for tool '{self.current_tool_call_name}'" ) # Try to parse accumulated arguments (may be incomplete JSON) @@ -262,11 +262,11 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba else str(partial_value) ) logger.info( - f" >>> StateDeltaEvent #{self.state_delta_count} for '{state_key}': " + f"StateDeltaEvent #{self.state_delta_count} for '{state_key}': " f"op=replace, path=/{state_key}, value={value_preview}" ) elif self.state_delta_count % 100 == 0: - logger.info(f" >>> StateDeltaEvent #{self.state_delta_count} emitted") + logger.info(f"StateDeltaEvent #{self.state_delta_count} emitted") events.append(state_delta_event) self.last_emitted_state[state_key] = partial_value @@ -312,11 +312,11 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba else str(state_value) ) logger.info( - f" >>> StateDeltaEvent #{self.state_delta_count} for '{state_key}': " + f"StateDeltaEvent #{self.state_delta_count} for '{state_key}': " f"op=replace, path=/{state_key}, value={value_preview}" ) elif self.state_delta_count % 100 == 0: # Also log every 100th - logger.info(f" >>> StateDeltaEvent #{self.state_delta_count} emitted") + logger.info(f"StateDeltaEvent #{self.state_delta_count} emitted") events.append(state_delta_event) @@ -360,7 +360,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba ], ) logger.info( - f" >>> Emitting StateDeltaEvent for key '{state_key}', value type: {type(state_value)}" + f"Emitting StateDeltaEvent for key '{state_key}', value type: {type(state_value)}" ) events.append(state_delta_event) @@ -376,13 +376,13 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba end_event = ToolCallEndEvent( tool_call_id=content.call_id, ) - logger.info(f" >>> Emitting ToolCallEndEvent for completed tool call '{content.call_id}'") + logger.info(f"Emitting ToolCallEndEvent for completed tool call '{content.call_id}'") events.append(end_event) # Log total StateDeltaEvent count for this tool call if self.state_delta_count > 0: logger.info( - f" >>> Tool call '{content.call_id}' complete: emitted {self.state_delta_count} StateDeltaEvents total" + f"Tool call '{content.call_id}' complete: emitted {self.state_delta_count} StateDeltaEvents total" ) # Reset streaming accumulator and counter for next tool call @@ -410,11 +410,13 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba events.append(result_event) # Track tool result for MessagesSnapshotEvent + # AG-UI protocol expects: { role: "tool", toolCallId: ..., content: ... } + # Use camelCase for Pydantic's alias_generator=to_camel self.tool_results.append( { "id": result_message_id, "role": "tool", - "tool_call_id": content.call_id, + "toolCallId": content.call_id, "content": result_content, } ) @@ -422,6 +424,9 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # Emit MessagesSnapshotEvent with the complete conversation including tool calls and results # This is required for CopilotKit's useCopilotAction to detect tool result if self.pending_tool_calls and self.tool_results: + # Import message adapter + from ._message_adapters import agent_framework_messages_to_agui + # Build assistant message with tool_calls assistant_message = { "id": generate_event_id(), @@ -429,14 +434,19 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba "tool_calls": self.pending_tool_calls.copy(), # Copy the accumulated tool calls } + # Convert Agent Framework messages to AG-UI format (adds required 'id' field) + converted_input_messages = agent_framework_messages_to_agui(self.input_messages) + # Build complete messages array: input messages + assistant message + tool results - all_messages = list(self.input_messages) + [assistant_message] + self.tool_results.copy() + all_messages = converted_input_messages + [assistant_message] + self.tool_results.copy() # Emit MessagesSnapshotEvent using the proper event type + # Note: messages are dict[str, Any] but Pydantic will validate them as Message types messages_snapshot_event = MessagesSnapshotEvent( - type=EventType.MESSAGES_SNAPSHOT, messages=all_messages + type=EventType.MESSAGES_SNAPSHOT, + messages=all_messages, # type: ignore[arg-type] ) - logger.info(f" >>> Emitting MessagesSnapshotEvent with {len(all_messages)} messages") + logger.info(f"Emitting MessagesSnapshotEvent with {len(all_messages)} messages") events.append(messages_snapshot_event) # After tool execution, emit StateSnapshotEvent if we have pending state updates @@ -466,7 +476,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # If so, emit a confirm_changes tool call for the UI modal tool_was_predictive = False logger.debug( - f" >>> Checking predictive state: current_tool='{self.current_tool_call_name}', " + f"Checking predictive state: current_tool='{self.current_tool_call_name}', " f"predict_config={list(self.predict_state_config.keys()) if self.predict_state_config else 'None'}" ) for state_key, config in self.predict_state_config.items(): @@ -474,7 +484,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # We need to match against self.current_tool_call_name if self.current_tool_call_name and config["tool"] == self.current_tool_call_name: logger.info( - f" >>> Tool '{self.current_tool_call_name}' matches predictive config for state key '{state_key}'" + f"Tool '{self.current_tool_call_name}' matches predictive config for state key '{state_key}'" ) tool_was_predictive = True break @@ -483,7 +493,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # Emit confirm_changes tool call sequence confirm_call_id = generate_event_id() - logger.info(" >>> Emitting confirm_changes tool call for predictive update") + logger.info("Emitting confirm_changes tool call for predictive update") # Track confirm_changes tool call for MessagesSnapshotEvent (so it persists after RUN_FINISHED) self.pending_tool_calls.append( @@ -518,6 +528,9 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba events.append(confirm_end) # Emit MessagesSnapshotEvent so confirm_changes persists after RUN_FINISHED + # Import message adapter + from ._message_adapters import agent_framework_messages_to_agui + # Build assistant message with pending confirm_changes tool call assistant_message = { "id": generate_event_id(), @@ -525,23 +538,28 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba "tool_calls": self.pending_tool_calls.copy(), # Includes confirm_changes } + # Convert Agent Framework messages to AG-UI format (adds required 'id' field) + converted_input_messages = agent_framework_messages_to_agui(self.input_messages) + # Build complete messages array: input messages + assistant message + any tool results - all_messages = list(self.input_messages) + [assistant_message] + self.tool_results.copy() + all_messages = converted_input_messages + [assistant_message] + self.tool_results.copy() # Emit MessagesSnapshotEvent + # Note: messages are dict[str, Any] but Pydantic will validate them as Message types messages_snapshot_event = MessagesSnapshotEvent( - type=EventType.MESSAGES_SNAPSHOT, messages=all_messages + type=EventType.MESSAGES_SNAPSHOT, + messages=all_messages, # type: ignore[arg-type] ) logger.info( - f" >>> Emitting MessagesSnapshotEvent for confirm_changes with {len(all_messages)} messages" + f"Emitting MessagesSnapshotEvent for confirm_changes with {len(all_messages)} messages" ) events.append(messages_snapshot_event) # Set flag to stop the run after this - we're waiting for user response self.should_stop_after_confirm = True - logger.info(" >>> Set flag to stop run after confirm_changes") + logger.info("Set flag to stop run after confirm_changes") elif tool_was_predictive: - logger.info(" >>> Skipping confirm_changes - require_confirmation is False") + logger.info("Skipping confirm_changes - require_confirmation is False") # Clear pending updates and reset tool name tracker self.pending_state_updates.clear() @@ -580,7 +598,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba # Update current state self.current_state[state_key] = state_value logger.info( - f" >>> Emitting StateSnapshotEvent for key '{state_key}', value type: {type(state_value)}" + f"Emitting StateSnapshotEvent for key '{state_key}', value type: {type(state_value)}" ) # Emit state snapshot @@ -596,7 +614,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba tool_call_id=content.function_call.call_id, ) logger.info( - f" >>> Emitting ToolCallEndEvent for approval-required tool '{content.function_call.call_id}'" + f"Emitting ToolCallEndEvent for approval-required tool '{content.function_call.call_id}'" ) events.append(end_event) @@ -615,7 +633,7 @@ async def from_agent_run_update(self, update: AgentRunResponseUpdate) -> list[Ba }, }, ) - logger.info(f" >>> Emitting function_approval_request custom event for '{content.function_call.name}'") + logger.info(f"Emitting function_approval_request custom event for '{content.function_call.name}'") events.append(approval_event) return events diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_http_service.py b/python/packages/ag-ui/agent_framework_ag_ui/_http_service.py new file mode 100644 index 0000000000..3c5b288454 --- /dev/null +++ b/python/packages/ag-ui/agent_framework_ag_ui/_http_service.py @@ -0,0 +1,157 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""HTTP service for AG-UI protocol communication.""" + +import json +import logging +from collections.abc import AsyncIterable +from typing import Any + +import httpx + +logger = logging.getLogger(__name__) + + +class AGUIHttpService: + """HTTP service for AG-UI protocol communication. + + Handles HTTP POST requests and Server-Sent Events (SSE) stream parsing + for the AG-UI protocol. + + Examples: + Basic usage: + + .. code-block:: python + + service = AGUIHttpService("http://localhost:8888/") + async for event in service.post_run( + thread_id="thread_123", + run_id="run_456", + messages=[{"role": "user", "content": "Hello"}] + ): + print(event["type"]) + + With context manager: + + .. code-block:: python + + async with AGUIHttpService("http://localhost:8888/") as service: + async for event in service.post_run(...): + print(event) + """ + + def __init__( + self, + endpoint: str, + http_client: httpx.AsyncClient | None = None, + timeout: float = 60.0, + ) -> None: + """Initialize the HTTP service. + + Args: + endpoint: AG-UI server endpoint URL (e.g., "http://localhost:8888/") + http_client: Optional httpx AsyncClient. If None, creates a new one. + timeout: Request timeout in seconds (default: 60.0) + """ + self.endpoint = endpoint.rstrip("/") + self._owns_client = http_client is None + self.http_client = http_client or httpx.AsyncClient(timeout=timeout) + + async def post_run( + self, + thread_id: str, + run_id: str, + messages: list[dict[str, Any]], + state: dict[str, Any] | None = None, + tools: list[dict[str, Any]] | None = None, + ) -> AsyncIterable[dict[str, Any]]: + """Post a run request and stream AG-UI events. + + Args: + thread_id: Thread identifier for conversation continuity + run_id: Unique run identifier + messages: List of messages in AG-UI format + state: Optional state object to send to server + tools: Optional list of tools available to the agent + + Yields: + AG-UI event dictionaries parsed from SSE stream + + Raises: + httpx.HTTPStatusError: If the HTTP request fails + ValueError: If SSE parsing encounters invalid data + + Examples: + .. code-block:: python + + service = AGUIHttpService("http://localhost:8888/") + async for event in service.post_run( + thread_id="thread_abc", + run_id="run_123", + messages=[{"role": "user", "content": "Hello"}], + state={"user_context": {"name": "Alice"}} + ): + if event["type"] == "TEXT_MESSAGE_CONTENT": + print(event["delta"]) + """ + # Build request payload + request_data: dict[str, Any] = { + "thread_id": thread_id, + "run_id": run_id, + "messages": messages, + } + + if state is not None: + request_data["state"] = state + + if tools is not None: + request_data["tools"] = tools + + logger.debug( + f"Posting run to {self.endpoint}: thread_id={thread_id}, run_id={run_id}, " + f"messages={len(messages)}, has_state={state is not None}, has_tools={tools is not None}" + ) + + # Stream the response using SSE + async with self.http_client.stream( + "POST", + self.endpoint, + json=request_data, + headers={"Accept": "text/event-stream"}, + ) as response: + try: + response.raise_for_status() + except httpx.HTTPStatusError as e: + logger.error(f"HTTP request failed: {e.response.status_code} - {e.response.text}") + raise + + async for line in response.aiter_lines(): + # Parse Server-Sent Events format + if line.startswith("data: "): + data = line[6:] # Remove "data: " prefix + try: + event = json.loads(data) + logger.debug(f"Received event: {event.get('type', 'UNKNOWN')}") + yield event + except json.JSONDecodeError as e: + logger.warning(f"Failed to parse SSE data: {data}. Error: {e}") + # Continue processing other events instead of failing + continue + + async def close(self) -> None: + """Close the HTTP client if owned by this service. + + Only closes the client if it was created by this service instance. + If an external client was provided, it remains the caller's + responsibility to close it. + """ + if self._owns_client and self.http_client: + await self.http_client.aclose() + + async def __aenter__(self) -> "AGUIHttpService": + """Enter async context manager.""" + return self + + async def __aexit__(self, *args: Any) -> None: + """Exit async context manager and clean up resources.""" + await self.close() diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py index ebeb2dcacf..da8cb197f2 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_message_adapters.py @@ -2,12 +2,13 @@ """Message format conversion between AG-UI and Agent Framework.""" -from typing import Any +from typing import Any, cast from agent_framework import ( ChatMessage, FunctionApprovalResponseContent, FunctionCallContent, + FunctionResultContent, Role, TextContent, ) @@ -46,7 +47,7 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha result_content = msg.get("result", msg.get("content", "")) chat_msg = ChatMessage( - role=Role.ASSISTANT, # Tool results are assistant messages + role=Role.TOOL, # Tool results must be tool role contents=[FunctionResultContent(call_id=tool_call_id, result=result_content)], ) @@ -56,6 +57,42 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha result.append(chat_msg) continue + # If assistant message includes tool calls, convert to FunctionCallContent(s) + tool_calls = msg.get("tool_calls") or msg.get("toolCalls") + if tool_calls: + contents: list[Any] = [] + # Include any assistant text content if present + content_text = msg.get("content") + if isinstance(content_text, str) and content_text: + contents.append(TextContent(text=content_text)) + # Convert each tool call entry + for tc in tool_calls: + if not isinstance(tc, dict): + continue + # Cast to typed dict for proper type inference + tc_dict = cast(dict[str, Any], tc) + tc_type = tc_dict.get("type") + if tc_type == "function": + func_data = tc_dict.get("function", {}) + func_dict = cast(dict[str, Any], func_data) if isinstance(func_data, dict) else {} + + call_id = str(tc_dict.get("id", "")) + name = str(func_dict.get("name", "")) + arguments = func_dict.get("arguments") + + contents.append( + FunctionCallContent( + call_id=call_id, + name=name, + arguments=arguments, + ) + ) + chat_msg = ChatMessage(role=Role.ASSISTANT, contents=contents) + if "id" in msg: + chat_msg.message_id = msg["id"] + result.append(chat_msg) + continue + role_str = msg.get("role", "user") # Handle tool result messages (with role="tool") @@ -78,11 +115,11 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha # Backend tool results have non-empty content WITHOUT "accepted" field if tool_call_id and result_content and not is_approval: - # Backend tool execution - convert to FunctionResultContent + # Tool execution result - convert to FunctionResultContent with correct role from agent_framework import FunctionResultContent chat_msg = ChatMessage( - role=Role.ASSISTANT, # Tool results are assistant messages + role=Role.TOOL, contents=[FunctionResultContent(call_id=tool_call_id, result=result_content)], ) @@ -97,9 +134,8 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha chat_msg = ChatMessage( role=Role.USER, # Approval responses are user messages contents=[TextContent(text=content)], + additional_properties={"is_tool_result": True, "tool_call_id": msg.get("toolCallId", "")}, ) - # Mark this as a tool result so we can detect it later - chat_msg.metadata = {"is_tool_result": True, "tool_call_id": msg.get("toolCallId", "")} # type: ignore[attr-defined] if "id" in msg: chat_msg.message_id = msg["id"] @@ -112,7 +148,7 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha # Check if this message contains function approvals if "function_approvals" in msg and msg["function_approvals"]: # Convert function approvals to FunctionApprovalResponseContent - contents: list[Any] = [] + approval_contents: list[Any] = [] for approval in msg["function_approvals"]: # Create FunctionCallContent with the modified arguments func_call = FunctionCallContent( @@ -127,9 +163,9 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha id=approval.get("id", ""), function_call=func_call, ) - contents.append(approval_response) + approval_contents.append(approval_response) - chat_msg = ChatMessage(role=role, contents=contents) # type: ignore[arg-type] + chat_msg = ChatMessage(role=role, contents=approval_contents) # type: ignore[arg-type] else: # Regular text message content = msg.get("content", "") @@ -146,21 +182,44 @@ def agui_messages_to_agent_framework(messages: list[dict[str, Any]]) -> list[Cha return result -def agent_framework_messages_to_agui(messages: list[ChatMessage]) -> list[dict[str, Any]]: +def agent_framework_messages_to_agui(messages: list[ChatMessage] | list[dict[str, Any]]) -> list[dict[str, Any]]: """Convert Agent Framework messages to AG-UI format. Args: - messages: List of Agent Framework ChatMessage objects + messages: List of Agent Framework ChatMessage objects or AG-UI dicts (already converted) Returns: List of AG-UI message dictionaries """ + from ._utils import generate_event_id + result: list[dict[str, Any]] = [] for msg in messages: + # If already a dict (AG-UI format), ensure it has an ID and normalize keys for Pydantic + if isinstance(msg, dict): + # Always work on a copy to avoid mutating input + normalized_msg = msg.copy() + # Ensure ID exists + if "id" not in normalized_msg: + normalized_msg["id"] = generate_event_id() + # Normalize tool_call_id to toolCallId for Pydantic's alias_generator=to_camel + if normalized_msg.get("role") == "tool": + if "tool_call_id" in normalized_msg: + normalized_msg["toolCallId"] = normalized_msg["tool_call_id"] + del normalized_msg["tool_call_id"] + elif "toolCallId" not in normalized_msg: + # Tool message missing toolCallId - add empty string to satisfy schema + normalized_msg["toolCallId"] = "" + # Always append the normalized copy, not the original + result.append(normalized_msg) + continue + + # Convert ChatMessage to AG-UI format role = _FRAMEWORK_TO_AGUI_ROLE.get(msg.role, "user") content_text = "" tool_calls: list[dict[str, Any]] = [] + tool_result_call_id: str | None = None for content in msg.contents: if isinstance(content, TextContent): @@ -176,18 +235,32 @@ def agent_framework_messages_to_agui(messages: list[ChatMessage]) -> list[dict[s }, } ) + elif isinstance(content, FunctionResultContent): + # Tool result content - extract call_id and result + tool_result_call_id = content.call_id + # Serialize result to string + if isinstance(content.result, dict): + import json + + content_text = json.dumps(content.result) # type: ignore + elif content.result is not None: + content_text = str(content.result) agui_msg: dict[str, Any] = { + "id": msg.message_id if msg.message_id else generate_event_id(), # Always include id "role": role, "content": content_text, } - if msg.message_id: - agui_msg["id"] = msg.message_id - if tool_calls: agui_msg["tool_calls"] = tool_calls + # If this is a tool result message, add toolCallId (using camelCase for Pydantic) + if tool_result_call_id: + agui_msg["toolCallId"] = tool_result_call_id + # Tool result messages should have role="tool" + agui_msg["role"] = "tool" + result.append(agui_msg) return result diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py index 1440dddf36..b5da7998ca 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_orchestrators.py @@ -16,9 +16,9 @@ TextMessageEndEvent, TextMessageStartEvent, ) -from agent_framework import AgentProtocol, AgentThread, TextContent +from agent_framework import AgentProtocol, AgentThread, ChatAgent, TextContent -from ._utils import generate_event_id +from ._utils import convert_agui_tools_to_agent_framework, generate_event_id if TYPE_CHECKING: from ._agent import AgentConfig @@ -142,14 +142,10 @@ def can_handle(self, context: ExecutionContext) -> bool: True if last message is a tool result """ msg = context.last_message - if not msg or not hasattr(msg, "metadata"): + if not msg: return False - metadata = getattr(msg, "metadata", None) - if not metadata: - return False - - return bool(metadata.get("is_tool_result", False)) + return bool(msg.additional_properties.get("is_tool_result", False)) async def run( self, @@ -274,8 +270,10 @@ async def run( current_state: dict[str, Any] = initial_state.copy() if initial_state else {} # Check if agent uses structured outputs (response_format) - chat_options = getattr(context.agent, "chat_options", None) - response_format = getattr(chat_options, "response_format", None) if chat_options else None + # Use isinstance to narrow type for proper attribute access + response_format = None + if isinstance(context.agent, ChatAgent): + response_format = context.agent.chat_options.response_format skip_text_content = response_format is not None # Create event bridge @@ -334,9 +332,8 @@ async def run( if context.messages: await thread.on_new_messages(context.messages) - # Get the last message as the new input - new_message = context.last_message - if not new_message: + # Use the full incoming message batch to preserve tool-call adjacency + if not context.messages: logger.warning("No messages provided in AG-UI input") yield event_bridge.create_run_finished_event() return @@ -362,11 +359,68 @@ async def run( ) messages_to_run.append(state_context_msg) - messages_to_run.append(new_message) + # Preserve order from client to satisfy provider constraints (assistant tool_calls must + # immediately precede tool result messages). Using the full batch avoids reordering. + messages_to_run.extend(context.messages) + + # Handle client tools for hybrid execution + # Client sends tool metadata, server merges with its own tools. + # Client tools have func=None (declaration-only), so @use_function_invocation + # will return the function call without executing (passes back to client). + from agent_framework import BaseChatClient + + client_tools = convert_agui_tools_to_agent_framework(context.input_data.get("tools")) + + # Extract server tools - use type narrowing when possible + server_tools: list[Any] = [] + if isinstance(context.agent, ChatAgent): + server_tools = context.agent.chat_options.tools or [] + else: + # AgentProtocol allows duck-typed implementations - fallback to attribute access + # This supports test mocks and custom agent implementations + try: + chat_options_attr = getattr(context.agent, "chat_options", None) + if chat_options_attr is not None: + server_tools = getattr(chat_options_attr, "tools", None) or [] + except AttributeError: + pass + + # Register client tools as additional (declaration-only) so they are not executed on server + if client_tools: + if isinstance(context.agent, ChatAgent): + # Type-safe path for ChatAgent + chat_client = context.agent.chat_client + if ( + isinstance(chat_client, BaseChatClient) + and chat_client.function_invocation_configuration is not None + ): + chat_client.function_invocation_configuration.additional_tools = client_tools + logger.debug( + f"[TOOLS] Registered {len(client_tools)} client tools as additional_tools (declaration-only)" + ) + else: + # Fallback for AgentProtocol implementations (test mocks, custom agents) + try: + chat_client_attr = getattr(context.agent, "chat_client", None) + if chat_client_attr is not None: + fic = getattr(chat_client_attr, "function_invocation_configuration", None) + if fic is not None: + fic.additional_tools = client_tools # type: ignore[attr-defined] + logger.debug( + f"[TOOLS] Registered {len(client_tools)} client tools as additional_tools (declaration-only)" + ) + except AttributeError: + pass + + combined_tools: list[Any] = [] + if server_tools: + combined_tools.extend(server_tools) + if client_tools: + combined_tools.extend(client_tools) # Collect all updates to get the final structured output all_updates: list[Any] = [] - async for update in context.agent.run_stream(messages_to_run, thread=thread): + async for update in context.agent.run_stream(messages_to_run, thread=thread, tools=combined_tools or None): all_updates.append(update) events = await event_bridge.from_agent_run_update(update) for event in events: @@ -374,7 +428,7 @@ async def run( # After agent completes, check if we should stop (waiting for user to confirm changes) if event_bridge.should_stop_after_confirm: - logger.info(" >>> Stopping run after confirm_changes - waiting for user response") + logger.info("Stopping run after confirm_changes - waiting for user response") yield event_bridge.create_run_finished_event() return diff --git a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py index e30d682fcb..8b271988dc 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui/_utils.py +++ b/python/packages/ag-ui/agent_framework_ag_ui/_utils.py @@ -4,10 +4,13 @@ import copy import uuid +from collections.abc import Callable, MutableMapping, Sequence from dataclasses import asdict, is_dataclass from datetime import date, datetime from typing import Any +from agent_framework import AIFunction, ToolProtocol + def generate_event_id() -> str: """Generate a unique event ID.""" @@ -55,3 +58,109 @@ def make_json_safe(obj: Any) -> Any: # noqa: ANN401 if isinstance(obj, dict): return {key: make_json_safe(value) for key, value in obj.items()} # type: ignore[misc] return str(obj) + + +def convert_agui_tools_to_agent_framework( + agui_tools: list[dict[str, Any]] | None, +) -> list[AIFunction[Any, Any]] | None: + """Convert AG-UI tool definitions to Agent Framework AIFunction declarations. + + Creates declaration-only AIFunction instances (no executable implementation). + These are used to tell the LLM about available tools. The actual execution + happens on the client side via @use_function_invocation. + + CRITICAL: These tools MUST have func=None so that declaration_only returns True. + This prevents the server from trying to execute client-side tools. + + Args: + agui_tools: List of AG-UI tool definitions with name, description, parameters + + Returns: + List of AIFunction declarations, or None if no tools provided + """ + if not agui_tools: + return None + + result: list[AIFunction[Any, Any]] = [] + for tool_def in agui_tools: + # Create declaration-only AIFunction (func=None means no implementation) + # When func=None, the declaration_only property returns True, + # which tells @use_function_invocation to return the function call + # without executing it (so it can be sent back to the client) + func: AIFunction[Any, Any] = AIFunction( + name=tool_def.get("name", ""), + description=tool_def.get("description", ""), + func=None, # CRITICAL: Makes declaration_only=True + input_model=tool_def.get("parameters", {}), + ) + result.append(func) + + return result + + +def convert_tools_to_agui_format( + tools: ( + ToolProtocol + | Callable[..., Any] + | MutableMapping[str, Any] + | Sequence[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] + | None + ), +) -> list[dict[str, Any]] | None: + """Convert tools to AG-UI format. + + This sends only the metadata (name, description, JSON schema) to the server. + The actual executable implementation stays on the client side. + The @use_function_invocation decorator handles client-side execution when + the server requests a function. + + Args: + tools: Tools to convert (single tool or sequence of tools) + + Returns: + List of tool specifications in AG-UI format, or None if no tools provided + """ + if not tools: + return None + + # Normalize to list + if not isinstance(tools, list): + tool_list: list[ToolProtocol | Callable[..., Any] | MutableMapping[str, Any]] = [tools] # type: ignore[list-item] + else: + tool_list = tools # type: ignore[assignment] + + results: list[dict[str, Any]] = [] + + for tool in tool_list: + if isinstance(tool, dict): + # Already in dict format, pass through + results.append(tool) # type: ignore[arg-type] + elif isinstance(tool, AIFunction): + # Convert AIFunction to AG-UI tool format + results.append( + { + "name": tool.name, + "description": tool.description, + "parameters": tool.parameters(), + } + ) + elif callable(tool): + # Convert callable to AIFunction first, then to AG-UI format + from agent_framework import ai_function + + ai_func = ai_function(tool) + results.append( + { + "name": ai_func.name, + "description": ai_func.description, + "parameters": ai_func.parameters(), + } + ) + elif isinstance(tool, ToolProtocol): + # Handle other ToolProtocol implementations + # For now, we'll skip non-AIFunction tools as they may not have + # the parameters() method. This matches .NET behavior which only + # converts AIFunctionDeclaration instances. + continue + + return results if results else None diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md index 88887f6070..cd9c3c71c7 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/README.md @@ -14,7 +14,7 @@ pip install agent-framework-ag-ui from fastapi import FastAPI from agent_framework import ChatAgent from agent_framework.azure import AzureOpenAIChatClient -from agent_framework_ag_ui import add_agent_framework_fastapi_endpoint +from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint # Create your agent agent = ChatAgent( @@ -104,7 +104,7 @@ State is injected as system messages and updated via predictive state updates: ```python from agent_framework import ChatAgent from agent_framework.azure import AzureOpenAIChatClient -from agent_framework_ag_ui import AgentFrameworkAgent +from agent_framework.ag_ui import AgentFrameworkAgent # Create your agent agent = ChatAgent( @@ -141,7 +141,7 @@ Predictive state updates automatically stream tool arguments as optimistic state ```python from agent_framework import ChatAgent from agent_framework.azure import AzureOpenAIChatClient -from agent_framework_ag_ui import AgentFrameworkAgent +from agent_framework.ag_ui import AgentFrameworkAgent # Create your agent agent = ChatAgent( @@ -170,7 +170,7 @@ Provide domain-specific confirmation messages: from typing import Any from agent_framework import ChatAgent from agent_framework.azure import AzureOpenAIChatClient -from agent_framework_ag_ui import AgentFrameworkAgent, ConfirmationStrategy +from agent_framework.ag_ui import AgentFrameworkAgent, ConfirmationStrategy class CustomConfirmationStrategy(ConfirmationStrategy): def on_approval_accepted(self, steps: list[dict[str, Any]]) -> str: @@ -216,7 +216,7 @@ def sensitive_action(param: str) -> str: Add custom execution flows by implementing the Orchestrator pattern: ```python -from agent_framework_ag_ui._orchestrators import Orchestrator, ExecutionContext +from agent_framework.ag_ui._orchestrators import Orchestrator, ExecutionContext class MyCustomOrchestrator(Orchestrator): def can_handle(self, context: ExecutionContext) -> bool: diff --git a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py index ef7a438d9b..a2856dbf23 100644 --- a/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py +++ b/python/packages/ag-ui/agent_framework_ag_ui_examples/agents/task_steps_agent.py @@ -128,7 +128,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non import uuid logger = logging.getLogger(__name__) - logger.info(">>> TaskStepsAgentWithExecution.run_agent() called - wrapper is active") + logger.info("TaskStepsAgentWithExecution.run_agent() called - wrapper is active") # First, run the base agent to generate the plan - buffer text messages final_state: dict[str, Any] | None = None @@ -138,41 +138,41 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non async for event in self._base_agent.run_agent(input_data): event_type_str = str(event.type) if hasattr(event, "type") else type(event).__name__ - logger.info(f">>> Processing event: {event_type_str}") + logger.info(f"Processing event: {event_type_str}") match event: case StateSnapshotEvent(snapshot=snapshot): final_state = snapshot - logger.info(f">>> Captured STATE_SNAPSHOT event with state: {final_state}") + logger.info(f"Captured STATE_SNAPSHOT event with state: {final_state}") yield event case RunFinishedEvent(): run_finished_event = event - logger.info(">>> Captured RUN_FINISHED event - will send after step execution and summary") + logger.info("Captured RUN_FINISHED event - will send after step execution and summary") case ToolCallStartEvent(tool_call_id=call_id): tool_call_id = call_id - logger.info(f">>> Captured tool_call_id: {tool_call_id}") + logger.info(f"Captured tool_call_id: {tool_call_id}") yield event case TextMessageStartEvent() | TextMessageContentEvent() | TextMessageEndEvent(): buffered_text_events.append(event) - logger.info(f">>> Buffered {event_type_str} from first LLM call") + logger.info(f"Buffered {event_type_str} from first LLM call") case _: - logger.info(f">>> Yielding event immediately: {event_type_str}") + logger.info(f"Yielding event immediately: {event_type_str}") yield event - logger.info(f">>> Base agent completed. Final state: {final_state}") + logger.info(f"Base agent completed. Final state: {final_state}") # Now simulate executing the steps if final_state and "steps" in final_state: steps = final_state["steps"] - logger.info(f">>> Starting step execution simulation for {len(steps)} steps") + logger.info(f"Starting step execution simulation for {len(steps)} steps") for i in range(len(steps)): - logger.info(f">>> Simulating execution of step {i + 1}/{len(steps)}: {steps[i].get('description')}") + logger.info(f"Simulating execution of step {i + 1}/{len(steps)}: {steps[i].get('description')}") await asyncio.sleep(1.0) # Simulate work # Update step to completed steps[i]["status"] = "completed" - logger.info(f">>> Step {i + 1} marked as completed") + logger.info(f"Step {i + 1} marked as completed") # Send delta event with manual JSON patch format delta_event = StateDeltaEvent( @@ -185,7 +185,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non } ], ) - logger.info(f">>> Yielding StateDeltaEvent for step {i + 1}") + logger.info(f"Yielding StateDeltaEvent for step {i + 1}") yield delta_event # Send final snapshot @@ -193,11 +193,11 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non type=EventType.STATE_SNAPSHOT, snapshot={"steps": steps}, ) - logger.info(">>> Yielding final StateSnapshotEvent with all steps completed") + logger.info("Yielding final StateSnapshotEvent with all steps completed") yield final_snapshot # SECOND LLM call: Stream summary from chat client directly - logger.info(">>> Making SECOND LLM call to generate summary after step execution") + logger.info("Making SECOND LLM call to generate summary after step execution") # Get the underlying chat agent and client chat_agent = self._base_agent.agent # type: ignore @@ -236,7 +236,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non ) # Stream the LLM response and manually emit text events - logger.info(">>> Calling chat client for summary") + logger.info("Calling chat client for summary") message_id = str(uuid.uuid4()) @@ -268,7 +268,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non type=EventType.TEXT_MESSAGE_END, message_id=message_id, ) - logger.info(f">>> Summary complete: {accumulated_text}") + logger.info(f"Summary complete: {accumulated_text}") # Build complete message for persistence summary_message = { @@ -285,7 +285,7 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non messages=final_messages, ) except Exception as e: - logger.error(f">>> Error generating summary: {e}") + logger.error(f"Error generating summary: {e}") # Generate a new message ID for the error error_message_id = str(uuid.uuid4()) # Yield TEXT_MESSAGE_START for error @@ -306,11 +306,11 @@ async def run_agent(self, input_data: dict[str, Any]) -> AsyncGenerator[Any, Non message_id=error_message_id, ) else: - logger.warning(f">>> No steps found in final_state to execute. final_state={final_state}") + logger.warning(f"No steps found in final_state to execute. final_state={final_state}") # Finally send the original RUN_FINISHED event if run_finished_event: - logger.info(">>> Yielding original RUN_FINISHED event") + logger.info("Yielding original RUN_FINISHED event") yield run_finished_event diff --git a/python/packages/ag-ui/getting_started/README.md b/python/packages/ag-ui/getting_started/README.md index f656e49979..cb32b73197 100644 --- a/python/packages/ag-ui/getting_started/README.md +++ b/python/packages/ag-ui/getting_started/README.md @@ -2,6 +2,135 @@ The AG-UI (Agent UI) protocol provides a standardized way for client applications to interact with AI agents over HTTP. This tutorial demonstrates how to build both server and client applications using the AG-UI protocol with Python. +## Quick Start - Client Examples + +If you want to quickly try out the AG-UI client, we provide three ready-to-use examples: + +### Basic Interactive Client (`client.py`) + +A simple command-line chat client that demonstrates: +- Streaming responses in real-time +- Automatic thread management for conversation continuity +- Direct `AGUIChatClient` usage (caller manages message history) + +**Run:** +```bash +python client.py +``` + +**Note:** This example sends only the current message to the server. The server is responsible for maintaining conversation history using the thread_id. + +### Advanced Features Client (`client_advanced.py`) + +Demonstrates advanced capabilities: +- Tool/function calling +- Both streaming and non-streaming responses +- Multi-turn conversations +- Error handling patterns + +**Run:** +```bash +python client_advanced.py +``` + +**Note:** This example shows direct `AGUIChatClient` usage. Tool execution and conversation continuity depend on server-side configuration and capabilities. + +### ChatAgent Integration (`client_with_agent.py`) + +Best practice example using `ChatAgent` wrapper with **AgentThread** +- **AgentThread** maintains conversation state +- Client-side conversation history management via `thread.message_store` +- **Hybrid tool execution**: client-side + server-side tools simultaneously +- Full conversation history sent on each request +- Tool calling with conversation context + +**To demonstrate hybrid tools:** + +1. **Start server with server-side tool** (Terminal 1): + ```bash + # Server has get_time_zone tool + python server.py + ``` + +2. **Run client with client-side tool** (Terminal 2): + ```bash + # Client has get_weather tool + python client_with_agent.py + ``` + +All examples require a running AG-UI server (see Step 1 below for setup). + +## Understanding AG-UI Architecture + +### Thread Management + +The AG-UI protocol supports two approaches to conversation history: + +1. **Server-Managed Threads** (client.py, client_advanced.py) + - Client sends only the current message + thread_id + - Server maintains full conversation history + - Requires server to support stateful thread storage + - Lighter network payload + +2. **Client-Managed History** (client_with_agent.py) + - Client maintains full conversation history locally + - Full message history sent with each request + - Works with any AG-UI server (stateful or stateless) + +The `ChatAgent` wrapper (used in client_with_agent.py) collects messages from local storage and sends the full history to `AGUIChatClient`, which then forwards everything to the server. + +### Tool/Function Calling + +The AG-UI protocol supports **hybrid tool execution** - both client-side AND server-side tools can coexist in the same conversation. + +**The Hybrid Pattern** (client_with_agent.py): +``` +Client defines: Server defines: +- get_weather() - get_current_time() +- read_sensors() - get_server_forecast() + +User: "What's the weather in SF and what time is it?" + ↓ +ChatAgent sends: full history + tool definitions for get_weather, read_sensors + ↓ +Server LLM decides: "I need get_weather('SF') and get_current_time()" + ↓ +Server executes get_current_time() → "2025-11-11 14:30:00 UTC" +Server sends function call request → get_weather('SF') + ↓ +ChatAgent intercepts get_weather call → executes locally + ↓ +Client sends result → "Sunny, 72°F" + ↓ +Server combines both results → "It's sunny and 72°F in SF, and the current time is 2:30 PM UTC" + ↓ +Client receives final response +``` + +**How it works:** + +1. **Client-Side Tools** (`client_with_agent.py`): + - Tools defined in ChatAgent's `tools` parameter execute locally + - Tool metadata (name, description, schema) sent to server for planning + - When server requests client tool → client intercepts → executes locally → sends result + +2. **Server-Side Tools**: + - Defined in server agent's configuration + - Server executes directly without client involvement + - Results included in server's response + +3. **Hybrid Pattern (Both Together)**: + - Server LLM sees ALL tool definitions (client + server) + - Decides which to use based on task + - Server tools execute server-side + - Client tools execute client-side + +**Direct AGUIChatClient Usage** (client_advanced.py): +Even without ChatAgent wrapper, client-side tools work: +- Tools passed in ChatOptions execute locally +- Server can also have its own tools +- Hybrid execution works automatically + ## What is AG-UI? AG-UI is a protocol that enables: @@ -35,13 +164,13 @@ The AG-UI server hosts your AI agent and exposes it via HTTP endpoints using Fas ### Install Required Packages ```bash -pip install agent-framework-ag-ui agent-framework-core fastapi uvicorn +pip install agent-framework-ag-ui ``` Or using uv: ```bash -uv pip install agent-framework-ag-ui agent-framework-core fastapi uvicorn +uv pip install agent-framework-ag-ui ``` ### Server Code @@ -57,17 +186,20 @@ import os from agent_framework import ChatAgent from agent_framework.azure import AzureOpenAIChatClient -from agent_framework_ag_ui import add_agent_framework_fastapi_endpoint +from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from fastapi import FastAPI # Read required configuration endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") deployment_name = os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME") +api_key = os.environ.get("AZURE_OPENAI_API_KEY") if not endpoint: raise ValueError("AZURE_OPENAI_ENDPOINT environment variable is required") if not deployment_name: raise ValueError("AZURE_OPENAI_DEPLOYMENT_NAME environment variable is required") +if not api_key: + raise ValueError("AZURE_OPENAI_API_KEY environment variable is required") # Create the AI agent agent = ChatAgent( @@ -76,6 +208,7 @@ agent = ChatAgent( chat_client=AzureOpenAIChatClient( endpoint=endpoint, deployment_name=deployment_name, + api_key=api_key, ), ) @@ -137,12 +270,14 @@ The server will start listening on `http://127.0.0.1:5100`. ## Step 2: Creating an AG-UI Client -The AG-UI client connects to the remote server and displays streaming responses. +The AG-UI client connects to the remote server and displays streaming responses. The `AGUIChatClient` is a built-in implementation that integrates with the Agent Framework's standard chat interface. ### Install Required Packages +The `AGUIChatClient` is included in the `agent-framework-ag-ui` package (already installed if you installed the server packages). + ```bash -pip install httpx +pip install agent-framework-ag-ui ``` ### Client Code @@ -152,122 +287,61 @@ Create a file named `client.py`: ```python # Copyright (c) Microsoft. All rights reserved. -"""AG-UI client example.""" +"""AG-UI client example using AGUIChatClient.""" import asyncio -import json import os -from typing import AsyncIterator - -import httpx - - -class AGUIClient: - """Simple AG-UI protocol client.""" - - def __init__(self, server_url: str): - """Initialize the client. - - Args: - server_url: The AG-UI server endpoint URL - """ - self.server_url = server_url - self.thread_id: str | None = None - - async def send_message(self, message: str) -> AsyncIterator[dict]: - """Send a message and stream the response. - - Args: - message: The user message to send - - Yields: - AG-UI events from the server - """ - # Prepare the request - request_data = { - "messages": [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": message}, - ] - } - - # Include thread_id if we have one (for conversation continuity) - if self.thread_id: - request_data["thread_id"] = self.thread_id - - # Stream the response - async with httpx.AsyncClient(timeout=60.0) as client: - async with client.stream( - "POST", - self.server_url, - json=request_data, - headers={"Accept": "text/event-stream"}, - ) as response: - response.raise_for_status() - - async for line in response.aiter_lines(): - # Parse Server-Sent Events format - if line.startswith("data: "): - data = line[6:] # Remove "data: " prefix - try: - event = json.loads(data) - yield event - - # Capture thread_id from RUN_STARTED event - if event.get("type") == "RUN_STARTED" and not self.thread_id: - self.thread_id = event.get("threadId") - except json.JSONDecodeError: - continue + +from agent_framework import TextContent +from agent_framework.ag_ui import AGUIChatClient async def main(): - """Main client loop.""" + """Main client loop demonstrating AGUIChatClient usage.""" # Get server URL from environment or use default server_url = os.environ.get("AGUI_SERVER_URL", "http://127.0.0.1:5100/") print(f"Connecting to AG-UI server at: {server_url}\n") - client = AGUIClient(server_url) + # Create client with context manager for automatic cleanup + async with AGUIChatClient(endpoint=server_url) as client: + thread_id: str | None = None - try: - while True: - # Get user input - message = input("\nUser (:q or quit to exit): ") - if not message.strip(): - print("Request cannot be empty.") - continue + try: + while True: + # Get user input + message = input("\nUser (:q or quit to exit): ") + if not message.strip(): + print("Request cannot be empty.") + continue - if message.lower() in (":q", "quit"): - break + if message.lower() in (":q", "quit"): + break - # Send message and display streaming response - print("\n", end="") - async for event in client.send_message(message): - event_type = event.get("type", "") + # Send message and stream the response + print("\nAssistant: ", end="", flush=True) - if event_type == "RUN_STARTED": - thread_id = event.get("threadId", "") - run_id = event.get("runId", "") - print(f"\033[93m[Run Started - Thread: {thread_id}, Run: {run_id}]\033[0m") + # Use metadata to maintain conversation continuity + metadata = {"thread_id": thread_id} if thread_id else None - elif event_type == "TEXT_MESSAGE_CONTENT": - # Stream text content in cyan - print(f"\033[96m{event.get('delta', '')}\033[0m", end="", flush=True) + async for update in client.get_streaming_response(message, metadata=metadata): + # Extract thread ID from first update + if not thread_id and update.additional_properties: + thread_id = update.additional_properties.get("thread_id") + if thread_id: + print(f"\n[Thread: {thread_id}]") + print("Assistant: ", end="", flush=True) - elif event_type == "RUN_FINISHED": - thread_id = event.get("threadId", "") - run_id = event.get("runId", "") - print(f"\n\033[92m[Run Finished - Thread: {thread_id}, Run: {run_id}]\033[0m") + # Stream text content as it arrives + for content in update.contents: + if isinstance(content, TextContent) and content.text: + print(content.text, end="", flush=True) - elif event_type == "RUN_ERROR": - error_message = event.get("message", "Unknown error") - print(f"\n\033[91m[Run Error - Message: {error_message}]\033[0m") + print() # New line after response - print() - - except KeyboardInterrupt: - print("\n\nExiting...") - except Exception as e: - print(f"\n\033[91mAn error occurred: {e}\033[0m") + except KeyboardInterrupt: + print("\n\nExiting...") + except Exception as e: + print(f"\nAn error occurred: {e}") if __name__ == "__main__": @@ -276,17 +350,13 @@ if __name__ == "__main__": ### Key Concepts -- **Server-Sent Events (SSE)**: The protocol uses SSE format (`data: {json}\n\n`) -- **Event Types**: Different events provide metadata and content (all event types use UPPERCASE with underscores): - - `RUN_STARTED`: Signals the agent has started processing - - `TEXT_MESSAGE_START`: Signals the start of a text message from the agent - - `TEXT_MESSAGE_CONTENT`: Incremental text streamed from the agent (with `delta` field) - - `TEXT_MESSAGE_END`: Signals the end of a text message - - `RUN_FINISHED`: Signals successful completion - - `RUN_ERROR`: Error information if something goes wrong -- **Field Naming**: Event fields use camelCase (e.g., `threadId`, `runId`, `messageId`) when accessing JSON events -- **Thread Management**: The `threadId` maintains conversation context across requests -- **Client-Side Instructions**: System messages are sent from the client +- **`AGUIChatClient`**: Built-in client that implements the Agent Framework's `BaseChatClient` interface +- **Automatic Event Handling**: The client automatically converts AG-UI events to Agent Framework types +- **Thread Management**: Pass `thread_id` in metadata to maintain conversation context across requests +- **Streaming Responses**: Use `get_streaming_response()` for real-time streaming or `get_response()` for non-streaming +- **Context Manager**: Use `async with` for automatic cleanup of HTTP connections +- **Standard Interface**: Works with all Agent Framework patterns (ChatAgent, tools, etc.) +- **Hybrid Tool Execution**: Supports both client-side and server-side tools executing together in the same conversation ### Configure and Run the Client @@ -312,326 +382,12 @@ Connecting to AG-UI server at: http://127.0.0.1:5100/ User (:q or quit to exit): What is the capital of France? -[Run Started - Thread: abc123, Run: xyz789] -The capital of France is Paris. It is known for its rich history, culture, +[Thread: abc123] +Assistant: The capital of France is Paris. It is known for its rich history, culture, and iconic landmarks such as the Eiffel Tower and the Louvre Museum. -[Run Finished - Thread: abc123, Run: xyz789] User (:q or quit to exit): Tell me a fun fact about space - -[Run Started - Thread: abc123, Run: def456] -Here's a fun fact: A day on Venus is longer than its year! Venus takes -about 243 Earth days to rotate once on its axis, but only about 225 Earth -days to orbit the Sun. -[Run Finished - Thread: abc123, Run: def456] - -User (:q or quit to exit): :q -``` - -### Color-Coded Output - -The client displays different content types with distinct colors: -- **Yellow**: Run started notifications -- **Cyan**: Agent text responses (streamed in real-time) -- **Green**: Run completion notifications -- **Red**: Error messages - -## Testing with curl (Optional) - -Before running the client, you can test the server manually using curl: - -```bash -curl -N http://127.0.0.1:5100/ \ - -H "Content-Type: application/json" \ - -H "Accept: text/event-stream" \ - -d '{ - "messages": [ - {"role": "user", "content": "What is the capital of France?"} - ] - }' -``` - -You should see Server-Sent Events streaming back: - ``` -data: {"type":"RUN_STARTED","threadId":"...","runId":"..."} - -data: {"type":"TEXT_MESSAGE_START","messageId":"...","role":"assistant"} - -data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"...","delta":"The"} - -data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"...","delta":" capital"} - -... - -data: {"type":"TEXT_MESSAGE_END","messageId":"..."} - -data: {"type":"RUN_FINISHED","threadId":"...","runId":"..."} -``` - -## How It Works - -### Server-Side Flow - -1. Client sends HTTP POST request with messages -2. FastAPI endpoint receives the request -3. `AgentFrameworkAgent` wrapper orchestrates the execution -4. Agent processes the messages using Agent Framework -5. `AgentFrameworkEventBridge` converts agent updates to AG-UI events -6. Responses are streamed back as Server-Sent Events (SSE) -7. Connection closes when the run completes - -### Client-Side Flow - -1. Client sends HTTP POST request to server endpoint -2. Server responds with SSE stream -3. Client parses incoming `data:` lines as JSON events -4. Each event is displayed based on its type -5. `threadId` is captured for conversation continuity -6. Stream completes when `RUN_FINISHED` event arrives - -### Protocol Details - -The AG-UI protocol uses: -- **HTTP POST** for sending requests -- **Server-Sent Events (SSE)** for streaming responses -- **JSON** for event serialization -- **Thread IDs** for maintaining conversation context -- **Run IDs** for tracking individual executions -- **Event type naming**: UPPERCASE with underscores (e.g., `RUN_STARTED`, `TEXT_MESSAGE_CONTENT`) -- **Field naming**: camelCase (e.g., `threadId`, `runId`, `messageId`) - -## Advanced Features - -The Python AG-UI implementation supports all 7 AG-UI features: - -### 1. Backend Tool Rendering - -Add tools to your agent for backend execution: - -```python -from typing import Any - -from agent_framework import ChatAgent, ai_function -from agent_framework.azure import AzureOpenAIChatClient - - -@ai_function -def get_weather(location: str) -> dict[str, Any]: - """Get weather for a location.""" - return {"temperature": 72, "conditions": "sunny"} - - -agent = ChatAgent( - name="weather_agent", - instructions="Use tools to help users.", - chat_client=AzureOpenAIChatClient( - endpoint="https://your-resource.openai.azure.com/", - deployment_name="gpt-4o-mini", - ), - tools=[get_weather], -) -``` - -The client will receive `TOOL_CALL_START`, `TOOL_CALL_ARGS`, `TOOL_CALL_END`, and `TOOL_CALL_RESULT` events. - -### 2. Human in the Loop - -Request user confirmation before executing tools: - -```python -from fastapi import FastAPI -from agent_framework import ChatAgent -from agent_framework.azure import AzureOpenAIChatClient -from agent_framework_ag_ui import AgentFrameworkAgent, add_agent_framework_fastapi_endpoint - -agent = ChatAgent( - name="my_agent", - instructions="You are a helpful assistant.", - chat_client=AzureOpenAIChatClient( - endpoint="https://your-resource.openai.azure.com/", - deployment_name="gpt-4o-mini", - ), -) - -wrapped_agent = AgentFrameworkAgent( - agent=agent, - require_confirmation=True, # Enable human-in-the-loop -) - -app = FastAPI() -add_agent_framework_fastapi_endpoint(app, wrapped_agent, "/") -``` - -The client receives tool approval request events and can send approval responses. - -### 3. State Management - -Share state between client and server: - -```python -wrapped_agent = AgentFrameworkAgent( - agent=agent, - state_schema={ - "location": {"type": "string"}, - "preferences": {"type": "object"}, - }, -) -``` - -Events include `STATE_SNAPSHOT` and `STATE_DELTA` for bidirectional sync. - -### 4. Predictive State Updates - -Stream tool arguments as optimistic state updates: - -```python -wrapped_agent = AgentFrameworkAgent( - agent=agent, - predict_state_config={ - "location": {"tool": "get_weather", "tool_argument": "location"} - }, - require_confirmation=False, # Auto-update without confirmation -) -``` - -State updates stream in real-time as the LLM generates tool arguments. - -## Common Patterns - -### Custom Server Configuration - -```python -from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware - -app = FastAPI() - -# Add CORS for web clients -app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], -) - -add_agent_framework_fastapi_endpoint(app, agent, "/agent") -``` - -### Multiple Agents - -```python -app = FastAPI() - -weather_agent = ChatAgent(name="weather", ...) -finance_agent = ChatAgent(name="finance", ...) - -add_agent_framework_fastapi_endpoint(app, weather_agent, "/weather") -add_agent_framework_fastapi_endpoint(app, finance_agent, "/finance") -``` - -### Custom Client Timeout - -```python -async with httpx.AsyncClient(timeout=300.0) as client: - async with client.stream("POST", server_url, ...) as response: - async for line in response.aiter_lines(): - # Process events - pass -``` - -### Error Handling - -```python -try: - async for event in client.send_message(message): - if event.get("type") == "RUN_ERROR": - error_msg = event.get("message", "Unknown error") - print(f"Error: {error_msg}") - # Handle error appropriately -except httpx.HTTPError as e: - print(f"HTTP error: {e}") -except Exception as e: - print(f"Unexpected error: {e}") -``` - -### Conversation Continuity - -The client automatically maintains `threadId` across requests: - -```python -client = AGUIClient(server_url) - -# First message -async for event in client.send_message("Hello"): - # Client captures threadId from RUN_STARTED - pass - -# Second message - uses same threadId -async for event in client.send_message("Continue our conversation"): - # Conversation context is maintained - pass -``` - -## AG-UI Event Reference - -### Core Events - -| Event Type | Description | Key Fields | -|------------|-------------|------------| -| `RUN_STARTED` | Agent execution started | `threadId`, `runId` | -| `RUN_FINISHED` | Agent execution completed | `threadId`, `runId` | -| `RUN_ERROR` | Agent execution error | `message` | - -### Text Message Events - -| Event Type | Description | Key Fields | -|------------|-------------|------------| -| `TEXT_MESSAGE_START` | Start of agent text message | `messageId`, `role` | -| `TEXT_MESSAGE_CONTENT` | Streaming text content | `messageId`, `delta` | -| `TEXT_MESSAGE_END` | End of agent text message | `messageId` | - -### Tool Events - -| Event Type | Description | Key Fields | -|------------|-------------|------------| -| `TOOL_CALL_START` | Tool call initiated | `toolCallId`, `toolCallName` | -| `TOOL_CALL_ARGS` | Tool arguments streaming | `toolCallId`, `delta` | -| `TOOL_CALL_END` | Tool call complete | `toolCallId` | -| `TOOL_CALL_RESULT` | Tool execution result | `toolCallId`, `content` | - -### State Events - -| Event Type | Description | Key Fields | -|------------|-------------|------------| -| `STATE_SNAPSHOT` | Complete state | `snapshot` | -| `STATE_DELTA` | State changes (JSON Patch) | `delta` | - -### Other Events - -| Event Type | Description | Key Fields | -|------------|-------------|------------| -| `MESSAGES_SNAPSHOT` | Conversation history | `messages` | -| `CUSTOM` | Custom event data | `name`, `value` | - -## Next Steps - -Now that you understand the basics of AG-UI, you can: - -- **Add Tools**: Create custom `@ai_function` tools for your domain -- **Web Integration**: Build React/Vue frontends using the AG-UI protocol -- **State Management**: Implement shared state for generative UI applications -- **Human-in-the-Loop**: Add approval workflows for sensitive operations -- **Deployment**: Deploy to Azure Container Apps or Azure App Service -- **Multi-Agent Systems**: Coordinate multiple specialized agents -- **Monitoring**: Add logging and OpenTelemetry for observability - -## Additional Resources - -- [AG-UI Examples](../agent_framework_ag_ui_examples/README.md): Complete working examples for all 7 features -- [Agent Framework Documentation](../../core/README.md): Learn more about creating agents -- [AG-UI Protocol Spec](https://docs.ag-ui.com/): Official protocol documentation ## Troubleshooting diff --git a/python/packages/ag-ui/getting_started/client.py b/python/packages/ag-ui/getting_started/client.py index 82d3d1358e..621d8536cd 100644 --- a/python/packages/ag-ui/getting_started/client.py +++ b/python/packages/ag-ui/getting_started/client.py @@ -1,121 +1,71 @@ # Copyright (c) Microsoft. All rights reserved. -"""AG-UI client example.""" +"""AG-UI client example using AGUIChatClient. + +This example demonstrates how to use the AGUIChatClient to connect to +a remote AG-UI server and interact with it using the Agent Framework's +standard chat interface. +""" import asyncio -import json import os -from collections.abc import AsyncIterator - -import httpx - - -class AGUIClient: - """Simple AG-UI protocol client.""" - - def __init__(self, server_url: str): - """Initialize the client. - - Args: - server_url: The AG-UI server endpoint URL - """ - self.server_url = server_url - self.thread_id: str | None = None - - async def send_message(self, message: str) -> AsyncIterator[dict]: - """Send a message and stream the response. - - Args: - message: The user message to send - - Yields: - AG-UI events from the server - """ - # Prepare the request - request_data: dict[str, object] = { - "messages": [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": message}, - ] - } - - # Include thread_id if we have one (for conversation continuity) - if self.thread_id: - request_data["thread_id"] = self.thread_id - - # Stream the response - async with httpx.AsyncClient(timeout=60.0) as client: - async with client.stream( - "POST", - self.server_url, - json=request_data, - headers={"Accept": "text/event-stream"}, - ) as response: - response.raise_for_status() - - async for line in response.aiter_lines(): - # Parse Server-Sent Events format - if line.startswith("data: "): - data = line[6:] # Remove "data: " prefix - try: - event = json.loads(data) - yield event - - # Capture thread_id from RUN_STARTED event - if event.get("type") == "RUN_STARTED" and not self.thread_id: - self.thread_id = event.get("threadId") - except json.JSONDecodeError: - continue + +from agent_framework_ag_ui import AGUIChatClient async def main(): - """Main client loop.""" + """Main client loop demonstrating AGUIChatClient usage.""" # Get server URL from environment or use default server_url = os.environ.get("AGUI_SERVER_URL", "http://127.0.0.1:5100/") print(f"Connecting to AG-UI server at: {server_url}\n") - - client = AGUIClient(server_url) - - try: - while True: - # Get user input - message = input("\nUser (:q or quit to exit): ") - if not message.strip(): - print("Request cannot be empty.") - continue - - if message.lower() in (":q", "quit"): - break - - # Send message and display streaming response - print("\n", end="") - async for event in client.send_message(message): - event_type = event.get("type", "") - - if event_type == "RUN_STARTED": - thread_id = event.get("threadId", "") - run_id = event.get("runId", "") - print(f"\033[93m[Run Started - Thread: {thread_id}, Run: {run_id}]\033[0m") - - elif event_type == "TEXT_MESSAGE_CONTENT": - # Stream text content in cyan - print(f"\033[96m{event.get('delta', '')}\033[0m", end="", flush=True) - - elif event_type == "RUN_FINISHED": - thread_id = event.get("threadId", "") - run_id = event.get("runId", "") - print(f"\n\033[92m[Run Finished - Thread: {thread_id}, Run: {run_id}]\033[0m") - - elif event_type == "RUN_ERROR": - error_message = event.get("message", "Unknown error") - print(f"\n\033[91m[Run Error - Message: {error_message}]\033[0m") - - print() - - except KeyboardInterrupt: - print("\n\nExiting...") - except Exception as e: - print(f"\n\033[91mAn error occurred: {e}\033[0m") + print("Using AGUIChatClient with automatic thread management and Agent Framework integration.\n") + + # Create client with context manager for automatic cleanup + async with AGUIChatClient(endpoint=server_url) as client: + thread_id: str | None = None + + try: + while True: + # Get user input + message = input("\nUser (:q or quit to exit): ") + if not message.strip(): + print("Request cannot be empty.") + continue + + if message.lower() in (":q", "quit"): + break + + # Send message and stream the response + print("\nAssistant: ", end="", flush=True) + + # Use metadata to maintain conversation continuity + metadata = {"thread_id": thread_id} if thread_id else None + + async for update in client.get_streaming_response(message, metadata=metadata): + # Extract and display thread ID from first update + if not thread_id and update.additional_properties: + thread_id = update.additional_properties.get("thread_id") + if thread_id: + print(f"\n\033[93m[Thread: {thread_id}]\033[0m", end="", flush=True) + print("\nAssistant: ", end="", flush=True) + + # Display text content as it streams + from agent_framework import TextContent + + for content in update.contents: + if isinstance(content, TextContent) and content.text: + print(f"\033[96m{content.text}\033[0m", end="", flush=True) + + # Display finish reason if present + if update.finish_reason: + print(f"\n\033[92m[Finished: {update.finish_reason}]\033[0m", end="", flush=True) + + print() # New line after response + + except KeyboardInterrupt: + print("\n\nExiting...") + except Exception as e: + print(f"\n\033[91mAn error occurred: {e}\033[0m") if __name__ == "__main__": diff --git a/python/packages/ag-ui/getting_started/client_advanced.py b/python/packages/ag-ui/getting_started/client_advanced.py new file mode 100644 index 0000000000..cb45a0b8da --- /dev/null +++ b/python/packages/ag-ui/getting_started/client_advanced.py @@ -0,0 +1,235 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Advanced AG-UI client example with tools and features. + +This example demonstrates advanced AGUIChatClient features including: +- Tool/function calling +- Non-streaming responses +- Multiple conversation turns +- Error handling +""" + +import asyncio +import os + +from agent_framework import ai_function + +from agent_framework_ag_ui import AGUIChatClient + + +@ai_function +def get_weather(location: str) -> str: + """Get the current weather for a location. + + Args: + location: The city or location name + """ + # Simulate weather lookup + weather_data = { + "seattle": "Rainy, 55°F", + "san francisco": "Foggy, 62°F", + "new york": "Sunny, 68°F", + "london": "Cloudy, 52°F", + } + return weather_data.get(location.lower(), f"Weather data not available for {location}") + + +@ai_function +def calculate(a: float, b: float, operation: str) -> str: + """Perform basic arithmetic operations. + + Args: + a: First number + b: Second number + operation: Operation to perform (add, subtract, multiply, divide) + """ + try: + if operation == "add": + result = a + b + elif operation == "subtract": + result = a - b + elif operation == "multiply": + result = a * b + elif operation == "divide": + result = a / b + else: + return f"Unsupported operation: {operation}" + return f"The result is: {result}" + except Exception as e: + return f"Error calculating: {e}" + + +async def streaming_example(client: AGUIChatClient, thread_id: str | None = None): + """Demonstrate streaming responses.""" + print("\n" + "=" * 60) + print("STREAMING EXAMPLE") + print("=" * 60) + + metadata = {"thread_id": thread_id} if thread_id else None + + print("\nUser: Tell me a short joke\n") + print("Assistant: ", end="", flush=True) + + async for update in client.get_streaming_response("Tell me a short joke", metadata=metadata): + if not thread_id and update.additional_properties: + thread_id = update.additional_properties.get("thread_id") + + from agent_framework import TextContent + + for content in update.contents: + if isinstance(content, TextContent) and content.text: + print(content.text, end="", flush=True) + + print("\n") + return thread_id + + +async def non_streaming_example(client: AGUIChatClient, thread_id: str | None = None): + """Demonstrate non-streaming responses.""" + print("\n" + "=" * 60) + print("NON-STREAMING EXAMPLE") + print("=" * 60) + + metadata = {"thread_id": thread_id} if thread_id else None + + print("\nUser: What is 2 + 2?\n") + + response = await client.get_response("What is 2 + 2?", metadata=metadata) + + print(f"Assistant: {response.text}") + + if response.additional_properties: + thread_id = response.additional_properties.get("thread_id") + print(f"\n[Thread: {thread_id}]") + + return thread_id + + +async def tool_example(client: AGUIChatClient, thread_id: str | None = None): + """Demonstrate sending tool definitions to the server. + + IMPORTANT: When using AGUIChatClient directly (without ChatAgent wrapper): + - Tools are sent as DEFINITIONS only + - No automatic client-side execution (no function invocation middleware) + - Server must have matching tool implementations to execute them + + For CLIENT-SIDE tool execution (like .NET AGUIClient sample): + - Use ChatAgent wrapper with tools + - See client_with_agent.py for the hybrid pattern + - ChatAgent middleware intercepts and executes client tools locally + - Server can have its own tools that execute server-side + - Both client and server tools work together in same conversation + + This example sends tool definitions and assumes server-side execution. + """ + print("\n" + "=" * 60) + print("TOOL DEFINITION EXAMPLE") + print("=" * 60) + + metadata = {"thread_id": thread_id} if thread_id else None + + print("\nUser: What's the weather in Seattle?\n") + print("Sending tool definitions to server...") + print("(Server must be configured with matching tools to execute them)\n") + + response = await client.get_response( + "What's the weather in Seattle?", tools=[get_weather, calculate], metadata=metadata + ) + + print(f"Assistant: {response.text}") + + # Show tool calls if any + from agent_framework import FunctionCallContent + + tool_called = False + for message in response.messages: + for content in message.contents: + if isinstance(content, FunctionCallContent): + print(f"\n[Tool Called: {content.name}]") + tool_called = True + + if not tool_called: + print("\n[Note: No tools were called - server may not be configured for tool execution]") + + if response.additional_properties: + thread_id = response.additional_properties.get("thread_id") + + return thread_id + + +async def conversation_example(client: AGUIChatClient): + """Demonstrate multi-turn conversation. + + Note: Conversation continuity depends on the server maintaining thread state. + Some servers may require explicit message history to be sent with each request. + """ + print("\n" + "=" * 60) + print("MULTI-TURN CONVERSATION EXAMPLE") + print("=" * 60) + print("\nNote: This example uses thread_id for context. Server must support thread-based state.\n") + + # First turn + print("User: My name is Alice\n") + response1 = await client.get_response("My name is Alice") + print(f"Assistant: {response1.text}") + thread_id = response1.additional_properties.get("thread_id") + print(f"\n[Thread: {thread_id}]") + + # Second turn - using same thread + print("\nUser: What's my name?\n") + response2 = await client.get_response("What's my name?", metadata={"thread_id": thread_id}) + print(f"Assistant: {response2.text}") + + # Check if context was maintained + if "alice" not in response2.text.lower(): + print("\n[Note: Server may not maintain thread context - consider using ChatAgent for history management]") + + # Third turn + print("\nUser: Can you also tell me what 10 * 5 is?\n") + response3 = await client.get_response( + "Can you also tell me what 10 * 5 is?", metadata={"thread_id": thread_id}, tools=[calculate] + ) + print(f"Assistant: {response3.text}") + + +async def main(): + """Run all examples.""" + # Get server URL from environment or use default + server_url = os.environ.get("AGUI_SERVER_URL", "http://127.0.0.1:5100/") + + print("=" * 60) + print("AG-UI Chat Client Advanced Examples") + print("=" * 60) + print(f"\nServer: {server_url}") + print("\nThese examples demonstrate various AGUIChatClient features:") + print(" 1. Streaming responses") + print(" 2. Non-streaming responses") + print(" 3. Tool/function calling") + print(" 4. Multi-turn conversations") + + try: + async with AGUIChatClient(endpoint=server_url) as client: + # Run examples in sequence + thread_id = await streaming_example(client) + thread_id = await non_streaming_example(client, thread_id) + await tool_example(client, thread_id) + + # Separate conversation with new thread + await conversation_example(client) + + print("\n" + "=" * 60) + print("All examples completed successfully!") + print("=" * 60) + + except ConnectionError as e: + print(f"\n\033[91mConnection Error: {e}\033[0m") + print("\nMake sure an AG-UI server is running at the specified endpoint.") + except Exception as e: + print(f"\n\033[91mError: {e}\033[0m") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/packages/ag-ui/getting_started/client_with_agent.py b/python/packages/ag-ui/getting_started/client_with_agent.py new file mode 100644 index 0000000000..ac69189b53 --- /dev/null +++ b/python/packages/ag-ui/getting_started/client_with_agent.py @@ -0,0 +1,186 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Example showing ChatAgent with AGUIChatClient for hybrid tool execution. + +This demonstrates the HYBRID pattern matching .NET AGUIClient implementation: + +1. AgentThread Pattern (like .NET): + - Create thread with agent.get_new_thread() + - Pass thread to agent.run_stream() on each turn + - Thread automatically maintains conversation history via message_store + +2. Hybrid Tool Execution: + - AGUIChatClient has @use_function_invocation decorator + - Client-side tools (get_weather) can execute locally when server requests them + - Server may also have its own tools that execute server-side + - Both work together: server LLM decides which tool to call, decorator handles client execution + +This matches .NET pattern: thread maintains state, tools execute on appropriate side. +""" + +import asyncio +import logging +import os + +from agent_framework import ChatAgent, FunctionCallContent, FunctionResultContent, TextContent, ai_function + +from agent_framework_ag_ui import AGUIChatClient + +# Enable debug logging +logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + + +@ai_function(description="Get the current weather for a location.") +def get_weather(location: str) -> str: + """Get the current weather for a location. + + Args: + location: The city or location name + """ + print(f"[CLIENT] get_weather tool called with location: {location}") + weather_data = { + "seattle": "Rainy, 55°F", + "san francisco": "Foggy, 62°F", + "new york": "Sunny, 68°F", + "london": "Cloudy, 52°F", + } + result = weather_data.get(location.lower(), f"Weather data not available for {location}") + print(f"[CLIENT] get_weather returning: {result}") + return result + + +async def main(): + """Demonstrate ChatAgent + AGUIChatClient hybrid tool execution. + + This matches the .NET pattern from Program.cs where: + - AIAgent agent = chatClient.CreateAIAgent(tools: [...]) + - AgentThread thread = agent.GetNewThread() + - RunStreamingAsync(messages, thread) + + Python equivalent: + - agent = ChatAgent(chat_client=AGUIChatClient(...), tools=[...]) + - thread = agent.get_new_thread() # Creates thread with message_store + - agent.run_stream(message, thread=thread) # Thread accumulates history + """ + server_url = os.environ.get("AGUI_SERVER_URL", "http://127.0.0.1:5100/") + + print("=" * 70) + print("ChatAgent + AGUIChatClient: Hybrid Tool Execution") + print("=" * 70) + print(f"\nServer: {server_url}") + print("\nThis example demonstrates:") + print(" 1. AgentThread maintains conversation state (like .NET)") + print(" 2. Client-side tools execute locally via @use_function_invocation") + print(" 3. Server may have additional tools that execute server-side") + print(" 4. HYBRID: Client and server tools work together simultaneously\n") + + try: + # Create remote client in async context manager + async with AGUIChatClient(endpoint=server_url) as remote_client: + # Wrap in ChatAgent for conversation history management + agent = ChatAgent( + name="remote_assistant", + instructions="You are a helpful assistant. Remember user information across the conversation.", + chat_client=remote_client, + tools=[get_weather], + ) + + # Create a thread to maintain conversation state (like .NET AgentThread) + thread = agent.get_new_thread() + + print("=" * 70) + print("CONVERSATION WITH HISTORY") + print("=" * 70) + + # Turn 1: Introduce + print("\nUser: My name is Alice and I live in Seattle\n") + async for chunk in agent.run_stream("My name is Alice and I live in Seattle", thread=thread): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + # Turn 2: Ask about name (tests history) + print("User: What's my name?\n") + async for chunk in agent.run_stream("What's my name?", thread=thread): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + # Turn 3: Ask about location (tests history) + print("User: Where do I live?\n") + async for chunk in agent.run_stream("Where do I live?", thread=thread): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + # Turn 4: Test client-side tool (get_weather is client-side) + print("User: What's the weather forecast for today in Seattle?\n") + async for chunk in agent.run_stream("What's the weather forecast for today in Seattle?", thread=thread): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + # Turn 5: Test server-side tool (get_time_zone is server-side only) + print("User: What time zone is Seattle in?\n") + async for chunk in agent.run_stream("What time zone is Seattle in?", thread=thread): + if chunk.text: + print(chunk.text, end="", flush=True) + print("\n") + + # Show thread state + if thread.message_store: + + def _preview_for_message(m) -> str: + # Prefer plain text when present + if getattr(m, "text", ""): + t = m.text + return (t[:60] + "...") if len(t) > 60 else t + # Build from contents when no direct text + parts: list[str] = [] + for c in getattr(m, "contents", []) or []: + if isinstance(c, FunctionCallContent): + args = c.arguments + if isinstance(args, dict): + try: + import json as _json + + args_str = _json.dumps(args) + except Exception: + args_str = str(args) + else: + args_str = str(args or "{}") + parts.append(f"tool_call {c.name} {args_str}") + elif isinstance(c, FunctionResultContent): + parts.append(f"tool_result[{c.call_id}]: {str(c.result)[:40]}") + elif isinstance(c, TextContent): + if c.text: + parts.append(c.text) + else: + typename = getattr(c, "type", c.__class__.__name__) + parts.append(f"<{typename}>") + preview = " | ".join(parts) if parts else "" + return (preview[:60] + "...") if len(preview) > 60 else preview + + messages = await thread.message_store.list_messages() + print(f"\n[THREAD STATE] {len(messages)} messages in thread's message_store") + for i, msg in enumerate(messages[-6:], 1): # Show last 6 + role = msg.role.value if hasattr(msg.role, "value") else str(msg.role) + text_preview = _preview_for_message(msg) + print(f" {i}. [{role}]: {text_preview}") + + except ConnectionError as e: + print(f"\n\033[91mConnection Error: {e}\033[0m") + print("\nMake sure an AG-UI server is running at the specified endpoint.") + except Exception as e: + print(f"\n\033[91mError: {e}\033[0m") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/python/packages/ag-ui/getting_started/server.py b/python/packages/ag-ui/getting_started/server.py index 34e2edbd5f..e4ed669516 100644 --- a/python/packages/ag-ui/getting_started/server.py +++ b/python/packages/ag-ui/getting_started/server.py @@ -1,18 +1,26 @@ # Copyright (c) Microsoft. All rights reserved. -"""AG-UI server example.""" +"""AG-UI server example with server-side tools.""" +import logging import os -from agent_framework import ChatAgent +from agent_framework import ChatAgent, ai_function +from agent_framework.ag_ui import add_agent_framework_fastapi_endpoint from agent_framework.azure import AzureOpenAIChatClient from dotenv import load_dotenv from fastapi import FastAPI -from agent_framework_ag_ui import add_agent_framework_fastapi_endpoint - load_dotenv() +# Enable debug logging +logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + + # Read required configuration endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") deployment_name = os.environ.get("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME") @@ -22,14 +30,43 @@ if not deployment_name: raise ValueError("AZURE_OPENAI_CHAT_DEPLOYMENT_NAME environment variable is required") -# Create the AI agent + +# Server-side tool (executes on server) +@ai_function(description="Get the time zone for a location.") +def get_time_zone(location: str) -> str: + """Get the time zone for a location. + + Args: + location: The city or location name + """ + print(f"[SERVER] get_time_zone tool called with location: {location}") + timezone_data = { + "seattle": "Pacific Time (UTC-8)", + "san francisco": "Pacific Time (UTC-8)", + "new york": "Eastern Time (UTC-5)", + "london": "Greenwich Mean Time (UTC+0)", + } + result = timezone_data.get(location.lower(), f"Time zone data not available for {location}") + print(f"[SERVER] get_time_zone returning: {result}") + return result + + +# Create the AI agent with ONLY server-side tools +# IMPORTANT: Do NOT include tools that the client provides! +# In this example: +# - get_time_zone: SERVER-ONLY tool (only server has this) +# - get_weather: CLIENT-ONLY tool (client provides this, server should NOT include it) +# The client will send get_weather tool metadata so the LLM knows about it, +# and @use_function_invocation on AGUIChatClient will execute it client-side. +# This matches the .NET AG-UI hybrid execution pattern. agent = ChatAgent( name="AGUIAssistant", - instructions="You are a helpful assistant.", + instructions="You are a helpful assistant. Use get_weather for weather and get_time_zone for time zones.", chat_client=AzureOpenAIChatClient( endpoint=endpoint, deployment_name=deployment_name, ), + tools=[get_time_zone], # ONLY server-side tools ) # Create FastAPI app @@ -41,4 +78,4 @@ if __name__ == "__main__": import uvicorn - uvicorn.run(app, host="127.0.0.1", port=5100) + uvicorn.run(app, host="127.0.0.1", port=5100, log_level="debug", access_log=True) diff --git a/python/packages/ag-ui/pyproject.toml b/python/packages/ag-ui/pyproject.toml index 400e5e6e34..9216a17e24 100644 --- a/python/packages/ag-ui/pyproject.toml +++ b/python/packages/ag-ui/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "agent-framework-ag-ui" -version = "1.0.0b251106.post1" +version = "1.0.0b251111" description = "AG-UI protocol integration for Agent Framework" readme = "README.md" license-files = ["LICENSE"] diff --git a/python/packages/ag-ui/tests/test_client.py b/python/packages/ag-ui/tests/test_client.py new file mode 100644 index 0000000000..cfececd771 --- /dev/null +++ b/python/packages/ag-ui/tests/test_client.py @@ -0,0 +1,317 @@ +"""Tests for AGUIChatClient.""" + +import json + +from agent_framework import ChatMessage, ChatOptions, FunctionCallContent, Role, ai_function + +from agent_framework_ag_ui._client import AGUIChatClient, ServerFunctionCallContent + + +class TestAGUIChatClient: + """Test suite for AGUIChatClient.""" + + async def test_client_initialization(self) -> None: + """Test client initialization.""" + client = AGUIChatClient(endpoint="http://localhost:8888/") + + assert client._http_service is not None + assert client._http_service.endpoint.startswith("http://localhost:8888") + + async def test_client_context_manager(self) -> None: + """Test client as async context manager.""" + async with AGUIChatClient(endpoint="http://localhost:8888/") as client: + assert client is not None + + async def test_extract_state_from_messages_no_state(self) -> None: + """Test state extraction when no state is present.""" + client = AGUIChatClient(endpoint="http://localhost:8888/") + messages = [ + ChatMessage(role="user", text="Hello"), + ChatMessage(role="assistant", text="Hi there"), + ] + + result_messages, state = client._extract_state_from_messages(messages) + + assert result_messages == messages + assert state is None + + async def test_extract_state_from_messages_with_state(self) -> None: + """Test state extraction from last message.""" + import base64 + + client = AGUIChatClient(endpoint="http://localhost:8888/") + + state_data = {"key": "value", "count": 42} + state_json = json.dumps(state_data) + state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8") + + from agent_framework import DataContent + + messages = [ + ChatMessage(role="user", text="Hello"), + ChatMessage( + role="user", + contents=[DataContent(uri=f"data:application/json;base64,{state_b64}")], + ), + ] + + result_messages, state = client._extract_state_from_messages(messages) + + assert len(result_messages) == 1 + assert result_messages[0].text == "Hello" + assert state == state_data + + async def test_extract_state_invalid_json(self) -> None: + """Test state extraction with invalid JSON.""" + import base64 + + client = AGUIChatClient(endpoint="http://localhost:8888/") + + invalid_json = "not valid json" + state_b64 = base64.b64encode(invalid_json.encode("utf-8")).decode("utf-8") + + from agent_framework import DataContent + + messages = [ + ChatMessage( + role="user", + contents=[DataContent(uri=f"data:application/json;base64,{state_b64}")], + ), + ] + + result_messages, state = client._extract_state_from_messages(messages) + + assert result_messages == messages + assert state is None + + async def test_convert_messages_to_agui_format(self) -> None: + """Test message conversion to AG-UI format.""" + client = AGUIChatClient(endpoint="http://localhost:8888/") + messages = [ + ChatMessage(role=Role.USER, text="What is the weather?"), + ChatMessage(role=Role.ASSISTANT, text="Let me check.", message_id="msg_123"), + ] + + agui_messages = client._convert_messages_to_agui_format(messages) + + assert len(agui_messages) == 2 + assert agui_messages[0]["role"] == "user" + assert agui_messages[0]["content"] == "What is the weather?" + assert agui_messages[1]["role"] == "assistant" + assert agui_messages[1]["content"] == "Let me check." + assert agui_messages[1]["id"] == "msg_123" + + async def test_get_thread_id_from_metadata(self) -> None: + """Test thread ID extraction from metadata.""" + client = AGUIChatClient(endpoint="http://localhost:8888/") + chat_options = ChatOptions(metadata={"thread_id": "existing_thread_123"}) + + thread_id = client._get_thread_id(chat_options) + + assert thread_id == "existing_thread_123" + + async def test_get_thread_id_generation(self) -> None: + """Test automatic thread ID generation.""" + client = AGUIChatClient(endpoint="http://localhost:8888/") + chat_options = ChatOptions() + + thread_id = client._get_thread_id(chat_options) + + assert thread_id.startswith("thread_") + assert len(thread_id) > 7 + + async def test_get_streaming_response(self, monkeypatch) -> None: + """Test streaming response method.""" + mock_events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": "Hello"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": " world"}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + async def mock_post_run(*args, **kwargs): + for event in mock_events: + yield event + + client = AGUIChatClient(endpoint="http://localhost:8888/") + monkeypatch.setattr(client._http_service, "post_run", mock_post_run) + + messages = [ChatMessage(role="user", text="Test message")] + chat_options = ChatOptions() + + updates = [] + async for update in client._inner_get_streaming_response(messages=messages, chat_options=chat_options): + updates.append(update) + + assert len(updates) == 4 + assert updates[0].additional_properties["thread_id"] == "thread_1" + assert updates[1].contents[0].text == "Hello" + assert updates[2].contents[0].text == " world" + + async def test_get_response_non_streaming(self, monkeypatch) -> None: + """Test non-streaming response method.""" + mock_events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": "Complete response"}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + async def mock_post_run(*args, **kwargs): + for event in mock_events: + yield event + + client = AGUIChatClient(endpoint="http://localhost:8888/") + monkeypatch.setattr(client._http_service, "post_run", mock_post_run) + + messages = [ChatMessage(role="user", text="Test message")] + chat_options = ChatOptions() + + response = await client._inner_get_response(messages=messages, chat_options=chat_options) + + assert response is not None + assert len(response.messages) > 0 + assert "Complete response" in response.text + + async def test_tool_handling(self, monkeypatch) -> None: + """Test that client tool metadata is sent to server. + + Client tool metadata (name, description, schema) is sent to server for planning. + When server requests a client function, @use_function_invocation decorator + intercepts and executes it locally. This matches .NET AG-UI implementation. + """ + from agent_framework import ai_function + + @ai_function + def test_tool(param: str) -> str: + """Test tool.""" + return "result" + + mock_events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + async def mock_post_run(*args, **kwargs): + # Client tool metadata should be sent to server + tools = kwargs.get("tools") + assert tools is not None + assert len(tools) == 1 + assert tools[0]["name"] == "test_tool" + assert tools[0]["description"] == "Test tool." + assert "parameters" in tools[0] + for event in mock_events: + yield event + + client = AGUIChatClient(endpoint="http://localhost:8888/") + monkeypatch.setattr(client._http_service, "post_run", mock_post_run) + + messages = [ChatMessage(role="user", text="Test with tools")] + chat_options = ChatOptions(tools=[test_tool]) + + response = await client._inner_get_response(messages=messages, chat_options=chat_options) + + assert response is not None + + async def test_server_tool_calls_unwrapped_after_invocation(self, monkeypatch) -> None: + """Ensure server-side tool calls are exposed as FunctionCallContent after processing.""" + + mock_events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "TOOL_CALL_START", "toolCallId": "call_1", "toolName": "get_time_zone"}, + {"type": "TOOL_CALL_ARGS", "toolCallId": "call_1", "delta": '{"location": "Seattle"}'}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + async def mock_post_run(*args, **kwargs): + for event in mock_events: + yield event + + client = AGUIChatClient(endpoint="http://localhost:8888/") + monkeypatch.setattr(client._http_service, "post_run", mock_post_run) + + messages = [ChatMessage(role="user", text="Test server tool execution")] + chat_options = ChatOptions() + + updates = [] + async for update in client.get_streaming_response(messages, chat_options=chat_options): + updates.append(update) + + function_calls = [ + content for update in updates for content in update.contents if isinstance(content, FunctionCallContent) + ] + assert function_calls + assert function_calls[0].name == "get_time_zone" + assert not any( + isinstance(content, ServerFunctionCallContent) for update in updates for content in update.contents + ) + + async def test_server_tool_calls_not_executed_locally(self, monkeypatch) -> None: + """Server tools should not trigger local function invocation even when client tools exist.""" + + @ai_function + def client_tool() -> str: + """Client tool stub.""" + return "client" + + mock_events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "TOOL_CALL_START", "toolCallId": "call_1", "toolName": "get_time_zone"}, + {"type": "TOOL_CALL_ARGS", "toolCallId": "call_1", "delta": '{"location": "Seattle"}'}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + async def mock_post_run(*args, **kwargs): + for event in mock_events: + yield event + + async def fake_auto_invoke(*args, **kwargs): + function_call = kwargs.get("function_call_content") or args[0] + raise AssertionError(f"Unexpected local execution of server tool: {getattr(function_call, 'name', '?')}") + + monkeypatch.setattr("agent_framework._tools._auto_invoke_function", fake_auto_invoke) + + client = AGUIChatClient(endpoint="http://localhost:8888/") + monkeypatch.setattr(client._http_service, "post_run", mock_post_run) + + messages = [ChatMessage(role="user", text="Test server tool execution")] + chat_options = ChatOptions(tool_choice="auto", tools=[client_tool]) + + async for _ in client.get_streaming_response(messages, chat_options=chat_options): + pass + + async def test_state_transmission(self, monkeypatch) -> None: + """Test state is properly transmitted to server.""" + import base64 + + state_data = {"user_id": "123", "session": "abc"} + state_json = json.dumps(state_data) + state_b64 = base64.b64encode(state_json.encode("utf-8")).decode("utf-8") + + from agent_framework import DataContent + + messages = [ + ChatMessage(role="user", text="Hello"), + ChatMessage( + role="user", + contents=[DataContent(uri=f"data:application/json;base64,{state_b64}")], + ), + ] + + mock_events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + async def mock_post_run(*args, **kwargs): + assert kwargs.get("state") == state_data + for event in mock_events: + yield event + + client = AGUIChatClient(endpoint="http://localhost:8888/") + monkeypatch.setattr(client._http_service, "post_run", mock_post_run) + + chat_options = ChatOptions() + + response = await client._inner_get_response(messages=messages, chat_options=chat_options) + + assert response is not None diff --git a/python/packages/ag-ui/tests/test_event_converters.py b/python/packages/ag-ui/tests/test_event_converters.py new file mode 100644 index 0000000000..d05b1fe720 --- /dev/null +++ b/python/packages/ag-ui/tests/test_event_converters.py @@ -0,0 +1,287 @@ +"""Tests for AG-UI event converter.""" + +from agent_framework import FinishReason, Role + +from agent_framework_ag_ui._event_converters import AGUIEventConverter + + +class TestAGUIEventConverter: + """Test suite for AGUIEventConverter.""" + + def test_run_started_event(self) -> None: + """Test conversion of RUN_STARTED event.""" + converter = AGUIEventConverter() + event = { + "type": "RUN_STARTED", + "threadId": "thread_123", + "runId": "run_456", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.ASSISTANT + assert update.additional_properties["thread_id"] == "thread_123" + assert update.additional_properties["run_id"] == "run_456" + assert converter.thread_id == "thread_123" + assert converter.run_id == "run_456" + + def test_text_message_start_event(self) -> None: + """Test conversion of TEXT_MESSAGE_START event.""" + converter = AGUIEventConverter() + event = { + "type": "TEXT_MESSAGE_START", + "messageId": "msg_789", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.ASSISTANT + assert update.message_id == "msg_789" + assert converter.current_message_id == "msg_789" + + def test_text_message_content_event(self) -> None: + """Test conversion of TEXT_MESSAGE_CONTENT event.""" + converter = AGUIEventConverter() + event = { + "type": "TEXT_MESSAGE_CONTENT", + "messageId": "msg_1", + "delta": "Hello", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.ASSISTANT + assert update.message_id == "msg_1" + assert len(update.contents) == 1 + assert update.contents[0].text == "Hello" + + def test_text_message_streaming(self) -> None: + """Test streaming text across multiple TEXT_MESSAGE_CONTENT events.""" + converter = AGUIEventConverter() + events = [ + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": "Hello"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": " world"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": "!"}, + ] + + updates = [converter.convert_event(event) for event in events] + + assert all(update is not None for update in updates) + assert all(update.message_id == "msg_1" for update in updates) + assert updates[0].contents[0].text == "Hello" + assert updates[1].contents[0].text == " world" + assert updates[2].contents[0].text == "!" + + def test_text_message_end_event(self) -> None: + """Test conversion of TEXT_MESSAGE_END event.""" + converter = AGUIEventConverter() + event = { + "type": "TEXT_MESSAGE_END", + "messageId": "msg_1", + } + + update = converter.convert_event(event) + + assert update is None + + def test_tool_call_start_event(self) -> None: + """Test conversion of TOOL_CALL_START event.""" + converter = AGUIEventConverter() + event = { + "type": "TOOL_CALL_START", + "toolCallId": "call_123", + "toolName": "get_weather", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.ASSISTANT + assert len(update.contents) == 1 + assert update.contents[0].call_id == "call_123" + assert update.contents[0].name == "get_weather" + assert update.contents[0].arguments == "" + assert converter.current_tool_call_id == "call_123" + assert converter.current_tool_name == "get_weather" + + def test_tool_call_start_with_tool_call_name(self) -> None: + """Ensure TOOL_CALL_START with toolCallName still sets the tool name.""" + converter = AGUIEventConverter() + event = { + "type": "TOOL_CALL_START", + "toolCallId": "call_abc", + "toolCallName": "get_weather", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.contents[0].name == "get_weather" + assert converter.current_tool_name == "get_weather" + + def test_tool_call_start_with_tool_call_name_snake_case(self) -> None: + """Support tool_call_name snake_case field for backwards compatibility.""" + converter = AGUIEventConverter() + event = { + "type": "TOOL_CALL_START", + "toolCallId": "call_snake", + "tool_call_name": "get_weather", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.contents[0].name == "get_weather" + assert converter.current_tool_name == "get_weather" + + def test_tool_call_args_streaming(self) -> None: + """Test streaming tool arguments across multiple TOOL_CALL_ARGS events.""" + converter = AGUIEventConverter() + converter.current_tool_call_id = "call_123" + converter.current_tool_name = "search" + + events = [ + {"type": "TOOL_CALL_ARGS", "delta": '{"query": "'}, + {"type": "TOOL_CALL_ARGS", "delta": 'latest news"}'}, + ] + + updates = [converter.convert_event(event) for event in events] + + assert all(update is not None for update in updates) + assert updates[0].contents[0].arguments == '{"query": "' + assert updates[1].contents[0].arguments == 'latest news"}' + assert converter.accumulated_tool_args == '{"query": "latest news"}' + + def test_tool_call_end_event(self) -> None: + """Test conversion of TOOL_CALL_END event.""" + converter = AGUIEventConverter() + converter.accumulated_tool_args = '{"location": "Seattle"}' + + event = { + "type": "TOOL_CALL_END", + "toolCallId": "call_123", + } + + update = converter.convert_event(event) + + assert update is None + assert converter.accumulated_tool_args == "" + + def test_tool_call_result_event(self) -> None: + """Test conversion of TOOL_CALL_RESULT event.""" + converter = AGUIEventConverter() + event = { + "type": "TOOL_CALL_RESULT", + "toolCallId": "call_123", + "result": {"temperature": 22, "condition": "sunny"}, + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.TOOL + assert len(update.contents) == 1 + assert update.contents[0].call_id == "call_123" + assert update.contents[0].result == {"temperature": 22, "condition": "sunny"} + + def test_run_finished_event(self) -> None: + """Test conversion of RUN_FINISHED event.""" + converter = AGUIEventConverter() + converter.thread_id = "thread_123" + converter.run_id = "run_456" + + event = { + "type": "RUN_FINISHED", + "threadId": "thread_123", + "runId": "run_456", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.ASSISTANT + assert update.finish_reason == FinishReason.STOP + assert update.additional_properties["thread_id"] == "thread_123" + assert update.additional_properties["run_id"] == "run_456" + + def test_run_error_event(self) -> None: + """Test conversion of RUN_ERROR event.""" + converter = AGUIEventConverter() + converter.thread_id = "thread_123" + converter.run_id = "run_456" + + event = { + "type": "RUN_ERROR", + "message": "Connection timeout", + } + + update = converter.convert_event(event) + + assert update is not None + assert update.role == Role.ASSISTANT + assert update.finish_reason == FinishReason.CONTENT_FILTER + assert len(update.contents) == 1 + assert update.contents[0].message == "Connection timeout" + assert update.contents[0].error_code == "RUN_ERROR" + + def test_unknown_event_type(self) -> None: + """Test handling of unknown event types.""" + converter = AGUIEventConverter() + event = { + "type": "UNKNOWN_EVENT", + "data": "some data", + } + + update = converter.convert_event(event) + + assert update is None + + def test_full_conversation_flow(self) -> None: + """Test complete conversation flow with multiple event types.""" + converter = AGUIEventConverter() + + events = [ + {"type": "RUN_STARTED", "threadId": "thread_1", "runId": "run_1"}, + {"type": "TEXT_MESSAGE_START", "messageId": "msg_1"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": "I'll check"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": " the weather."}, + {"type": "TEXT_MESSAGE_END", "messageId": "msg_1"}, + {"type": "TOOL_CALL_START", "toolCallId": "call_1", "toolName": "get_weather"}, + {"type": "TOOL_CALL_ARGS", "delta": '{"location": "Seattle"}'}, + {"type": "TOOL_CALL_END", "toolCallId": "call_1"}, + {"type": "TOOL_CALL_RESULT", "toolCallId": "call_1", "result": "Sunny, 72°F"}, + {"type": "TEXT_MESSAGE_START", "messageId": "msg_2"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_2", "delta": "It's sunny!"}, + {"type": "TEXT_MESSAGE_END", "messageId": "msg_2"}, + {"type": "RUN_FINISHED", "threadId": "thread_1", "runId": "run_1"}, + ] + + updates = [converter.convert_event(event) for event in events] + non_none_updates = [u for u in updates if u is not None] + + assert len(non_none_updates) == 10 + assert converter.thread_id == "thread_1" + assert converter.run_id == "run_1" + + def test_multiple_tool_calls(self) -> None: + """Test handling multiple tool calls in sequence.""" + converter = AGUIEventConverter() + + events = [ + {"type": "TOOL_CALL_START", "toolCallId": "call_1", "toolName": "search"}, + {"type": "TOOL_CALL_ARGS", "delta": '{"query": "weather"}'}, + {"type": "TOOL_CALL_END", "toolCallId": "call_1"}, + {"type": "TOOL_CALL_START", "toolCallId": "call_2", "toolName": "fetch"}, + {"type": "TOOL_CALL_ARGS", "delta": '{"url": "http://api.weather.com"}'}, + {"type": "TOOL_CALL_END", "toolCallId": "call_2"}, + ] + + updates = [converter.convert_event(event) for event in events] + non_none_updates = [u for u in updates if u is not None] + + assert len(non_none_updates) == 4 + assert non_none_updates[0].contents[0].name == "search" + assert non_none_updates[2].contents[0].name == "fetch" diff --git a/python/packages/ag-ui/tests/test_http_service.py b/python/packages/ag-ui/tests/test_http_service.py new file mode 100644 index 0000000000..641ae4f88b --- /dev/null +++ b/python/packages/ag-ui/tests/test_http_service.py @@ -0,0 +1,238 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Tests for AGUIHttpService.""" + +import json +from unittest.mock import AsyncMock, Mock + +import httpx +import pytest + +from agent_framework_ag_ui._http_service import AGUIHttpService + + +@pytest.fixture +def mock_http_client(): + """Create a mock httpx.AsyncClient.""" + client = AsyncMock(spec=httpx.AsyncClient) + return client + + +@pytest.fixture +def sample_events(): + """Sample AG-UI events for testing.""" + return [ + {"type": "RUN_STARTED", "threadId": "thread_123", "runId": "run_456"}, + {"type": "TEXT_MESSAGE_START", "messageId": "msg_1", "role": "assistant"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": "Hello"}, + {"type": "TEXT_MESSAGE_CONTENT", "messageId": "msg_1", "delta": " world"}, + {"type": "TEXT_MESSAGE_END", "messageId": "msg_1"}, + {"type": "RUN_FINISHED", "threadId": "thread_123", "runId": "run_456"}, + ] + + +def create_sse_response(events: list[dict]) -> str: + """Create SSE formatted response from events.""" + lines = [] + for event in events: + lines.append(f"data: {json.dumps(event)}\n") + return "\n".join(lines) + + +async def test_http_service_initialization(): + """Test AGUIHttpService initialization.""" + # Test with default client + service = AGUIHttpService("http://localhost:8888/") + assert service.endpoint == "http://localhost:8888" + assert service._owns_client is True + assert isinstance(service.http_client, httpx.AsyncClient) + await service.close() + + # Test with custom client + custom_client = httpx.AsyncClient() + service = AGUIHttpService("http://localhost:8888/", http_client=custom_client) + assert service._owns_client is False + assert service.http_client is custom_client + # Shouldn't close the custom client + await service.close() + await custom_client.aclose() + + +async def test_http_service_strips_trailing_slash(): + """Test that endpoint trailing slash is stripped.""" + service = AGUIHttpService("http://localhost:8888/") + assert service.endpoint == "http://localhost:8888" + await service.close() + + +async def test_post_run_successful_streaming(mock_http_client, sample_events): + """Test successful streaming of events.""" + + # Create async generator for lines + async def mock_aiter_lines(): + sse_data = create_sse_response(sample_events) + for line in sse_data.split("\n"): + if line: + yield line + + # Create mock response + mock_response = AsyncMock() + mock_response.status_code = 200 + # aiter_lines is called as a method, so it should return a new generator each time + mock_response.aiter_lines = mock_aiter_lines + + # Setup mock streaming context manager + mock_stream_context = AsyncMock() + mock_stream_context.__aenter__.return_value = mock_response + mock_stream_context.__aexit__.return_value = None + mock_http_client.stream.return_value = mock_stream_context + + service = AGUIHttpService("http://localhost:8888/", http_client=mock_http_client) + + events = [] + async for event in service.post_run( + thread_id="thread_123", run_id="run_456", messages=[{"role": "user", "content": "Hello"}] + ): + events.append(event) + + assert len(events) == len(sample_events) + assert events[0]["type"] == "RUN_STARTED" + assert events[-1]["type"] == "RUN_FINISHED" + + # Verify request was made correctly + mock_http_client.stream.assert_called_once() + call_args = mock_http_client.stream.call_args + assert call_args.args[0] == "POST" + assert call_args.args[1] == "http://localhost:8888" + assert call_args.kwargs["headers"] == {"Accept": "text/event-stream"} + + +async def test_post_run_with_state_and_tools(mock_http_client): + """Test posting run with state and tools.""" + + async def mock_aiter_lines(): + return + yield # Make it an async generator + + mock_response = AsyncMock() + mock_response.status_code = 200 + mock_response.aiter_lines = mock_aiter_lines + + mock_stream_context = AsyncMock() + mock_stream_context.__aenter__.return_value = mock_response + mock_stream_context.__aexit__.return_value = None + mock_http_client.stream.return_value = mock_stream_context + + service = AGUIHttpService("http://localhost:8888/", http_client=mock_http_client) + + state = {"user_context": {"name": "Alice"}} + tools = [{"type": "function", "function": {"name": "test_tool"}}] + + async for _ in service.post_run(thread_id="thread_123", run_id="run_456", messages=[], state=state, tools=tools): + pass + + # Verify state and tools were included in request + call_args = mock_http_client.stream.call_args + request_data = call_args.kwargs["json"] + assert request_data["state"] == state + assert request_data["tools"] == tools + + +async def test_post_run_http_error(mock_http_client): + """Test handling of HTTP errors.""" + mock_response = Mock() + mock_response.status_code = 500 + mock_response.text = "Internal Server Error" + + def raise_http_error(): + raise httpx.HTTPStatusError("Server error", request=Mock(), response=mock_response) + + mock_response_async = AsyncMock() + mock_response_async.raise_for_status = raise_http_error + + mock_stream_context = AsyncMock() + mock_stream_context.__aenter__.return_value = mock_response_async + mock_stream_context.__aexit__.return_value = None + mock_http_client.stream.return_value = mock_stream_context + + service = AGUIHttpService("http://localhost:8888/", http_client=mock_http_client) + + with pytest.raises(httpx.HTTPStatusError): + async for _ in service.post_run(thread_id="thread_123", run_id="run_456", messages=[]): + pass + + +async def test_post_run_invalid_json(mock_http_client): + """Test handling of invalid JSON in SSE stream.""" + invalid_sse = "data: {invalid json}\n\ndata: " + json.dumps({"type": "RUN_FINISHED"}) + "\n" + + async def mock_aiter_lines(): + for line in invalid_sse.split("\n"): + if line: + yield line + + mock_response = AsyncMock() + mock_response.status_code = 200 + mock_response.aiter_lines = mock_aiter_lines + + mock_stream_context = AsyncMock() + mock_stream_context.__aenter__.return_value = mock_response + mock_stream_context.__aexit__.return_value = None + mock_http_client.stream.return_value = mock_stream_context + + service = AGUIHttpService("http://localhost:8888/", http_client=mock_http_client) + + events = [] + async for event in service.post_run(thread_id="thread_123", run_id="run_456", messages=[]): + events.append(event) + + # Should skip invalid JSON and continue with valid events + assert len(events) == 1 + assert events[0]["type"] == "RUN_FINISHED" + + +async def test_context_manager(): + """Test context manager functionality.""" + async with AGUIHttpService("http://localhost:8888/") as service: + assert service.http_client is not None + assert service._owns_client is True + + # Client should be closed after exiting context + + +async def test_context_manager_with_external_client(): + """Test context manager doesn't close external client.""" + external_client = httpx.AsyncClient() + + async with AGUIHttpService("http://localhost:8888/", http_client=external_client) as service: + assert service.http_client is external_client + assert service._owns_client is False + + # External client should still be open + # (caller's responsibility to close) + await external_client.aclose() + + +async def test_post_run_empty_response(mock_http_client): + """Test handling of empty response stream.""" + + async def mock_aiter_lines(): + return + yield # Make it an async generator + + mock_response = AsyncMock() + mock_response.status_code = 200 + mock_response.aiter_lines = mock_aiter_lines + + mock_stream_context = AsyncMock() + mock_stream_context.__aenter__.return_value = mock_response + mock_stream_context.__aexit__.return_value = None + mock_http_client.stream.return_value = mock_stream_context + + service = AGUIHttpService("http://localhost:8888/", http_client=mock_http_client) + + events = [] + async for event in service.post_run(thread_id="thread_123", run_id="run_456", messages=[]): + events.append(event) + + assert len(events) == 0 diff --git a/python/packages/ag-ui/tests/test_message_adapters.py b/python/packages/ag-ui/tests/test_message_adapters.py index 1a5bb0ccd7..a21375b87b 100644 --- a/python/packages/ag-ui/tests/test_message_adapters.py +++ b/python/packages/ag-ui/tests/test_message_adapters.py @@ -63,10 +63,9 @@ def test_agui_tool_result_to_agent_framework(): assert isinstance(message.contents[0], TextContent) assert message.contents[0].text == '{"accepted": true, "steps": []}' - assert hasattr(message, "metadata") - assert message.metadata is not None - assert message.metadata.get("is_tool_result") is True - assert message.metadata.get("tool_call_id") == "call_123" + assert message.additional_properties is not None + assert message.additional_properties.get("is_tool_result") is True + assert message.additional_properties.get("tool_call_id") == "call_123" def test_agui_multiple_messages_to_agent_framework(): @@ -159,6 +158,36 @@ def test_agui_message_without_id(): assert messages[0].message_id is None +def test_agui_with_tool_calls_to_agent_framework(): + """Assistant message with tool_calls is converted to FunctionCallContent.""" + agui_msg = { + "role": "assistant", + "content": "Calling tool", + "tool_calls": [ + { + "id": "call-123", + "type": "function", + "function": {"name": "get_weather", "arguments": {"location": "Seattle"}}, + } + ], + "id": "msg-789", + } + + messages = agui_messages_to_agent_framework([agui_msg]) + + assert len(messages) == 1 + msg = messages[0] + assert msg.role == Role.ASSISTANT + assert msg.message_id == "msg-789" + # First content is text, second is the function call + assert isinstance(msg.contents[0], TextContent) + assert msg.contents[0].text == "Calling tool" + assert isinstance(msg.contents[1], FunctionCallContent) + assert msg.contents[1].call_id == "call-123" + assert msg.contents[1].name == "get_weather" + assert msg.contents[1].arguments == {"location": "Seattle"} + + def test_agent_framework_to_agui_with_tool_calls(): """Test converting Agent Framework message with tool calls to AG-UI.""" msg = ChatMessage( @@ -198,13 +227,15 @@ def test_agent_framework_to_agui_multiple_text_contents(): def test_agent_framework_to_agui_no_message_id(): - """Test message without message_id.""" + """Test message without message_id - should auto-generate ID.""" msg = ChatMessage(role=Role.USER, contents=[TextContent(text="Hello")]) messages = agent_framework_messages_to_agui([msg]) assert len(messages) == 1 - assert "id" not in messages[0] + assert "id" in messages[0] # ID should be auto-generated + assert messages[0]["id"] # ID should not be empty + assert len(messages[0]["id"]) > 0 # ID should be a valid string def test_agent_framework_to_agui_system_role(): diff --git a/python/packages/ag-ui/tests/test_orchestrators.py b/python/packages/ag-ui/tests/test_orchestrators.py new file mode 100644 index 0000000000..a400e78458 --- /dev/null +++ b/python/packages/ag-ui/tests/test_orchestrators.py @@ -0,0 +1,82 @@ +"""Tests for AG-UI orchestrators.""" + +from collections.abc import AsyncGenerator +from types import SimpleNamespace +from typing import Any + +from agent_framework import AgentRunResponseUpdate, TextContent, ai_function +from agent_framework._tools import FunctionInvocationConfiguration + +from agent_framework_ag_ui._agent import AgentConfig +from agent_framework_ag_ui._orchestrators import DefaultOrchestrator, ExecutionContext + + +@ai_function +def server_tool() -> str: + """Server-executable tool.""" + return "server" + + +class DummyAgent: + """Minimal agent stub to capture run_stream parameters.""" + + def __init__(self) -> None: + self.chat_options = SimpleNamespace(tools=[server_tool], response_format=None) + self.tools = [server_tool] + self.chat_client = SimpleNamespace( + function_invocation_configuration=FunctionInvocationConfiguration(), + ) + self.seen_tools: list[Any] | None = None + + async def run_stream( + self, + messages: list[Any], + *, + thread: Any, + tools: list[Any] | None = None, + ) -> AsyncGenerator[AgentRunResponseUpdate, None]: + self.seen_tools = tools + yield AgentRunResponseUpdate(contents=[TextContent(text="ok")], role="assistant") + + +async def test_default_orchestrator_merges_client_tools() -> None: + """Client tool declarations are merged with server tools before running agent.""" + + agent = DummyAgent() + orchestrator = DefaultOrchestrator() + + input_data = { + "messages": [ + { + "role": "user", + "content": [{"type": "input_text", "text": "Hello"}], + } + ], + "tools": [ + { + "name": "get_weather", + "description": "Client weather lookup.", + "parameters": { + "type": "object", + "properties": {"location": {"type": "string"}}, + "required": ["location"], + }, + } + ], + } + + context = ExecutionContext( + input_data=input_data, + agent=agent, + config=AgentConfig(), + ) + + events = [] + async for event in orchestrator.run(context): + events.append(event) + + assert agent.seen_tools is not None + tool_names = [getattr(tool, "name", "?") for tool in agent.seen_tools] + assert "server_tool" in tool_names + assert "get_weather" in tool_names + assert agent.chat_client.function_invocation_configuration.additional_tools diff --git a/python/packages/ag-ui/tests/test_utils.py b/python/packages/ag-ui/tests/test_utils.py index 9bc477310c..e4324ab187 100644 --- a/python/packages/ag-ui/tests/test_utils.py +++ b/python/packages/ag-ui/tests/test_utils.py @@ -197,3 +197,109 @@ def test_make_json_safe_fallback(): result = make_json_safe(obj) # Objects with __dict__ return their __dict__ dict assert isinstance(result, dict) + + +def test_convert_tools_to_agui_format_with_ai_function(): + """Test converting AIFunction to AG-UI format.""" + from agent_framework import ai_function + + from agent_framework_ag_ui._utils import convert_tools_to_agui_format + + @ai_function + def test_func(param: str, count: int = 5) -> str: + """Test function.""" + return f"{param} {count}" + + result = convert_tools_to_agui_format([test_func]) + + assert result is not None + assert len(result) == 1 + assert result[0]["name"] == "test_func" + assert result[0]["description"] == "Test function." + assert "parameters" in result[0] + assert "properties" in result[0]["parameters"] + + +def test_convert_tools_to_agui_format_with_callable(): + """Test converting plain callable to AG-UI format.""" + from agent_framework_ag_ui._utils import convert_tools_to_agui_format + + def plain_func(x: int) -> int: + """A plain function.""" + return x * 2 + + result = convert_tools_to_agui_format([plain_func]) + + assert result is not None + assert len(result) == 1 + assert result[0]["name"] == "plain_func" + assert result[0]["description"] == "A plain function." + assert "parameters" in result[0] + + +def test_convert_tools_to_agui_format_with_dict(): + """Test converting dict tool to AG-UI format.""" + from agent_framework_ag_ui._utils import convert_tools_to_agui_format + + tool_dict = { + "name": "custom_tool", + "description": "Custom tool", + "parameters": {"type": "object"}, + } + + result = convert_tools_to_agui_format([tool_dict]) + + assert result is not None + assert len(result) == 1 + assert result[0] == tool_dict + + +def test_convert_tools_to_agui_format_with_none(): + """Test converting None tools.""" + from agent_framework_ag_ui._utils import convert_tools_to_agui_format + + result = convert_tools_to_agui_format(None) + + assert result is None + + +def test_convert_tools_to_agui_format_with_single_tool(): + """Test converting single tool (not in list).""" + from agent_framework import ai_function + + from agent_framework_ag_ui._utils import convert_tools_to_agui_format + + @ai_function + def single_tool(arg: str) -> str: + """Single tool.""" + return arg + + result = convert_tools_to_agui_format(single_tool) + + assert result is not None + assert len(result) == 1 + assert result[0]["name"] == "single_tool" + + +def test_convert_tools_to_agui_format_with_multiple_tools(): + """Test converting multiple tools.""" + from agent_framework import ai_function + + from agent_framework_ag_ui._utils import convert_tools_to_agui_format + + @ai_function + def tool1(x: int) -> int: + """Tool 1.""" + return x + + @ai_function + def tool2(y: str) -> str: + """Tool 2.""" + return y + + result = convert_tools_to_agui_format([tool1, tool2]) + + assert result is not None + assert len(result) == 2 + assert result[0]["name"] == "tool1" + assert result[1]["name"] == "tool2" diff --git a/python/packages/anthropic/pyproject.toml b/python/packages/anthropic/pyproject.toml index 1b5a7598b6..cacd760e76 100644 --- a/python/packages/anthropic/pyproject.toml +++ b/python/packages/anthropic/pyproject.toml @@ -4,7 +4,7 @@ description = "Anthropic integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251105" +version = "1.0.0b251111" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/azure-ai/pyproject.toml b/python/packages/azure-ai/pyproject.toml index 156c0e06c1..6df8e99063 100644 --- a/python/packages/azure-ai/pyproject.toml +++ b/python/packages/azure-ai/pyproject.toml @@ -4,7 +4,7 @@ description = "Azure AI Foundry integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251105" +version = "1.0.0b251111" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/chatkit/pyproject.toml b/python/packages/chatkit/pyproject.toml index c7af5f3a4c..8c0a5047e4 100644 --- a/python/packages/chatkit/pyproject.toml +++ b/python/packages/chatkit/pyproject.toml @@ -4,7 +4,7 @@ description = "OpenAI ChatKit integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251105" +version = "1.0.0b251111" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/copilotstudio/pyproject.toml b/python/packages/copilotstudio/pyproject.toml index 7cc8708c95..9872355b4e 100644 --- a/python/packages/copilotstudio/pyproject.toml +++ b/python/packages/copilotstudio/pyproject.toml @@ -4,7 +4,7 @@ description = "Copilot Studio integration for Microsoft Agent Framework." authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251105" +version = "1.0.0b251111" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" diff --git a/python/packages/core/agent_framework/_agents.py b/python/packages/core/agent_framework/_agents.py index 0125adb188..e3ea1bdea6 100644 --- a/python/packages/core/agent_framework/_agents.py +++ b/python/packages/core/agent_framework/_agents.py @@ -587,9 +587,11 @@ def __init__( name: str | None = None, description: str | None = None, chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, - conversation_id: str | None = None, context_providers: ContextProvider | list[ContextProvider] | AggregateContextProvider | None = None, middleware: Middleware | list[Middleware] | None = None, + # chat option params + allow_multiple_tool_calls: bool | None = None, + conversation_id: str | None = None, frequency_penalty: float | None = None, logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, @@ -630,15 +632,17 @@ def __init__( description: A brief description of the agent's purpose. chat_message_store_factory: Factory function to create an instance of ChatMessageStoreProtocol. If not provided, the default in-memory store will be used. - conversation_id: The conversation ID for service-managed threads. - Cannot be used together with chat_message_store_factory. context_providers: The collection of multiple context providers to include during agent invocation. middleware: List of middleware to intercept agent and function invocations. + allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. + conversation_id: The conversation ID for service-managed threads. + Cannot be used together with chat_message_store_factory. frequency_penalty: The frequency penalty to use. logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. metadata: Additional metadata to include in the request. model_id: The model_id to use for the agent. + This overrides the model_id set in the chat client if it contains one. presence_penalty: The presence penalty to use. response_format: The format of the response. seed: The random seed to use. @@ -687,7 +691,8 @@ def __init__( self._local_mcp_tools = [tool for tool in normalized_tools if isinstance(tool, MCPTool)] agent_tools = [tool for tool in normalized_tools if not isinstance(tool, MCPTool)] self.chat_options = ChatOptions( - model_id=model_id, + model_id=model_id or (str(chat_client.model_id) if hasattr(chat_client, "model_id") else None), + allow_multiple_tool_calls=allow_multiple_tool_calls, conversation_id=conversation_id, frequency_penalty=frequency_penalty, instructions=instructions, @@ -758,6 +763,7 @@ async def run( messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, *, thread: AgentThread | None = None, + allow_multiple_tool_calls: bool | None = None, frequency_penalty: float | None = None, logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, @@ -793,6 +799,7 @@ async def run( Keyword Args: thread: The thread to use for the agent. + allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. frequency_penalty: The frequency penalty to use. logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. @@ -844,6 +851,7 @@ async def run( co = run_chat_options & ChatOptions( model_id=model_id, conversation_id=thread.service_thread_id, + allow_multiple_tool_calls=allow_multiple_tool_calls, frequency_penalty=frequency_penalty, logit_bias=logit_bias, max_tokens=max_tokens, @@ -887,6 +895,7 @@ async def run_stream( messages: str | ChatMessage | list[str] | list[ChatMessage] | None = None, *, thread: AgentThread | None = None, + allow_multiple_tool_calls: bool | None = None, frequency_penalty: float | None = None, logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, @@ -922,6 +931,7 @@ async def run_stream( Keyword Args: thread: The thread to use for the agent. + allow_multiple_tool_calls: Whether to allow multiple tool calls in a single response. frequency_penalty: The frequency penalty to use. logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. @@ -971,6 +981,7 @@ async def run_stream( co = run_chat_options & ChatOptions( conversation_id=thread.service_thread_id, + allow_multiple_tool_calls=allow_multiple_tool_calls, frequency_penalty=frequency_penalty, logit_bias=logit_bias, max_tokens=max_tokens, diff --git a/python/packages/core/agent_framework/_clients.py b/python/packages/core/agent_framework/_clients.py index 6eba0a1b56..630e7f8709 100644 --- a/python/packages/core/agent_framework/_clients.py +++ b/python/packages/core/agent_framework/_clients.py @@ -224,7 +224,7 @@ def _merge_chat_options( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: list[ToolProtocol | dict[str, Any] | Callable[..., Any]] | None = None, top_p: float | None = None, user: str | None = None, @@ -496,7 +496,7 @@ async def get_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -591,7 +591,7 @@ async def get_streaming_response( stop: str | Sequence[str] | None = None, store: bool | None = None, temperature: float | None = None, - tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = "auto", + tool_choice: ToolMode | Literal["auto", "required", "none"] | dict[str, Any] | None = None, tools: ToolProtocol | Callable[..., Any] | MutableMapping[str, Any] @@ -714,6 +714,8 @@ def create_agent( chat_message_store_factory: Callable[[], ChatMessageStoreProtocol] | None = None, context_providers: ContextProvider | list[ContextProvider] | AggregateContextProvider | None = None, middleware: Middleware | list[Middleware] | None = None, + allow_multiple_tool_calls: bool | None = None, + conversation_id: str | None = None, frequency_penalty: float | None = None, logit_bias: dict[str | int, float] | None = None, max_tokens: int | None = None, @@ -751,6 +753,8 @@ def create_agent( If not provided, the default in-memory store will be used. context_providers: Context providers to include during agent invocation. middleware: List of middleware to intercept agent and function invocations. + allow_multiple_tool_calls: Whether to allow multiple tool calls per agent turn. + conversation_id: The conversation ID to associate with the agent's messages. frequency_penalty: The frequency penalty to use. logit_bias: The logit bias to use. max_tokens: The maximum number of tokens to generate. @@ -801,6 +805,8 @@ def create_agent( chat_message_store_factory=chat_message_store_factory, context_providers=context_providers, middleware=middleware, + allow_multiple_tool_calls=allow_multiple_tool_calls, + conversation_id=conversation_id, frequency_penalty=frequency_penalty, logit_bias=logit_bias, max_tokens=max_tokens, diff --git a/python/packages/core/agent_framework/_mcp.py b/python/packages/core/agent_framework/_mcp.py index 50f6a91c2e..873b7f04cc 100644 --- a/python/packages/core/agent_framework/_mcp.py +++ b/python/packages/core/agent_framework/_mcp.py @@ -19,7 +19,7 @@ from mcp.shared.context import RequestContext from mcp.shared.exceptions import McpError from mcp.shared.session import RequestResponder -from pydantic import BaseModel, create_model +from pydantic import BaseModel, Field, create_model from ._tools import AIFunction, HostedMCPSpecificApproval from ._types import ChatMessage, Contents, DataContent, Role, TextContent, UriContent @@ -224,13 +224,20 @@ def resolve_type(prop_details: dict[str, Any]) -> type: prop_details = json.loads(prop_details) if isinstance(prop_details, str) else prop_details python_type = resolve_type(prop_details) + description = prop_details.get("description", "") # Create field definition for create_model if prop_name in required: - field_definitions[prop_name] = (python_type, ...) + field_definitions[prop_name] = ( + (python_type, Field(description=description)) if description else (python_type, ...) + ) else: default_value = prop_details.get("default", None) - field_definitions[prop_name] = (python_type, default_value) + field_definitions[prop_name] = ( + (python_type, Field(default=default_value, description=description)) + if description + else (python_type, default_value) + ) return create_model(f"{tool.name}_input", **field_definitions) diff --git a/python/packages/core/agent_framework/_tools.py b/python/packages/core/agent_framework/_tools.py index 8297f80c8e..6edd258e15 100644 --- a/python/packages/core/agent_framework/_tools.py +++ b/python/packages/core/agent_framework/_tools.py @@ -1525,6 +1525,12 @@ async def function_invocation_wrapper( prepped_messages = prepare_messages(messages) response: "ChatResponse | None" = None fcc_messages: "list[ChatMessage]" = [] + + # If tools are provided but tool_choice is not set, default to "auto" for function invocation + tools = _extract_tools(kwargs) + if tools and kwargs.get("tool_choice") is None: + kwargs["tool_choice"] = "auto" + for attempt_idx in range(config.max_iterations if config.enabled else 0): fcc_todo = _collect_approval_responses(prepped_messages) if fcc_todo: diff --git a/python/packages/core/agent_framework/_types.py b/python/packages/core/agent_framework/_types.py index 9f2ad10d85..8dc7c00655 100644 --- a/python/packages/core/agent_framework/_types.py +++ b/python/packages/core/agent_framework/_types.py @@ -1050,6 +1050,50 @@ def _validate_uri(cls, uri: str) -> str: def has_top_level_media_type(self, top_level_media_type: Literal["application", "audio", "image", "text"]) -> bool: return _has_top_level_media_type(self.media_type, top_level_media_type) + @staticmethod + def detect_image_format_from_base64(image_base64: str) -> str: + """Detect image format from base64 data by examining the binary header. + + Args: + image_base64: Base64 encoded image data + + Returns: + Image format as string (png, jpeg, webp, gif) with png as fallback + """ + try: + # Constants for image format detection + # ~75 bytes of binary data should be enough to detect most image formats + FORMAT_DETECTION_BASE64_CHARS = 100 + + # Decode a small portion to detect format + decoded_data = base64.b64decode(image_base64[:FORMAT_DETECTION_BASE64_CHARS]) + if decoded_data.startswith(b"\x89PNG"): + return "png" + if decoded_data.startswith(b"\xff\xd8\xff"): + return "jpeg" + if decoded_data.startswith(b"RIFF") and b"WEBP" in decoded_data[:12]: + return "webp" + if decoded_data.startswith(b"GIF87a") or decoded_data.startswith(b"GIF89a"): + return "gif" + return "png" # Default fallback + except Exception: + return "png" # Fallback if decoding fails + + @classmethod + def create_data_uri_from_base64(cls, image_base64: str) -> tuple[str, str]: + """Create a data URI and media type from base64 image data. + + Args: + image_base64: Base64 encoded image data + + Returns: + Tuple of (data_uri, media_type) + """ + format_type = cls.detect_image_format_from_base64(image_base64) + uri = f"data:image/{format_type};base64,{image_base64}" + media_type = f"image/{format_type}" + return uri, media_type + class UriContent(BaseContent): """Represents a URI content. diff --git a/python/packages/core/agent_framework/_workflows/_agent_executor.py b/python/packages/core/agent_framework/_workflows/_agent_executor.py index 0c05abbb69..8fa85b7f84 100644 --- a/python/packages/core/agent_framework/_workflows/_agent_executor.py +++ b/python/packages/core/agent_framework/_workflows/_agent_executor.py @@ -2,11 +2,14 @@ import logging from dataclasses import dataclass -from typing import Any +from typing import Any, cast + +from agent_framework import FunctionApprovalRequestContent, FunctionApprovalResponseContent from .._agents import AgentProtocol, ChatAgent from .._threads import AgentThread from .._types import AgentRunResponse, AgentRunResponseUpdate, ChatMessage +from ._checkpoint_encoding import decode_checkpoint_value, encode_checkpoint_value from ._conversation_state import encode_chat_messages from ._events import ( AgentRunEvent, @@ -14,6 +17,7 @@ ) from ._executor import Executor, handler from ._message_utils import normalize_messages_input +from ._request_info_mixin import response_handler from ._workflow_context import WorkflowContext logger = logging.getLogger(__name__) @@ -83,6 +87,8 @@ def __init__( super().__init__(exec_id) self._agent = agent self._agent_thread = agent_thread or self._agent.get_new_thread() + self._pending_agent_requests: dict[str, FunctionApprovalRequestContent] = {} + self._pending_responses_to_agent: list[FunctionApprovalResponseContent] = [] self._output_response = output_response self._cache: list[ChatMessage] = [] @@ -93,50 +99,6 @@ def workflow_output_types(self) -> list[type[Any]]: return [AgentRunResponse] return [] - async def _run_agent_and_emit(self, ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse]) -> None: - """Execute the underlying agent, emit events, and enqueue response. - - Checks ctx.is_streaming() to determine whether to emit incremental AgentRunUpdateEvent - events (streaming mode) or a single AgentRunEvent (non-streaming mode). - """ - if ctx.is_streaming(): - # Streaming mode: emit incremental updates - updates: list[AgentRunResponseUpdate] = [] - async for update in self._agent.run_stream( - self._cache, - thread=self._agent_thread, - ): - updates.append(update) - await ctx.add_event(AgentRunUpdateEvent(self.id, update)) - - if isinstance(self._agent, ChatAgent): - response_format = self._agent.chat_options.response_format - response = AgentRunResponse.from_agent_run_response_updates( - updates, - output_format_type=response_format, - ) - else: - response = AgentRunResponse.from_agent_run_response_updates(updates) - else: - # Non-streaming mode: use run() and emit single event - response = await self._agent.run( - self._cache, - thread=self._agent_thread, - ) - await ctx.add_event(AgentRunEvent(self.id, response)) - - if self._output_response: - await ctx.yield_output(response) - - # Always construct a full conversation snapshot from inputs (cache) - # plus agent outputs (agent_run_response.messages). Do not mutate - # response.messages so AgentRunEvent remains faithful to the raw output. - full_conversation: list[ChatMessage] = list(self._cache) + list(response.messages) - - agent_response = AgentExecutorResponse(self.id, response, full_conversation=full_conversation) - await ctx.send_message(agent_response) - self._cache.clear() - @handler async def run( self, request: AgentExecutorRequest, ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse] @@ -192,6 +154,31 @@ async def from_messages( self._cache = normalize_messages_input(messages) await self._run_agent_and_emit(ctx) + @response_handler + async def handle_user_input_response( + self, + original_request: FunctionApprovalRequestContent, + response: FunctionApprovalResponseContent, + ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse], + ) -> None: + """Handle user input responses for function approvals during agent execution. + + This will hold the executor's execution until all pending user input requests are resolved. + + Args: + original_request: The original function approval request sent by the agent. + response: The user's response to the function approval request. + ctx: The workflow context for emitting events and outputs. + """ + self._pending_responses_to_agent.append(response) + self._pending_agent_requests.pop(original_request.id, None) + + if not self._pending_agent_requests: + # All pending requests have been resolved; resume agent execution + self._cache = normalize_messages_input(ChatMessage(role="user", contents=self._pending_responses_to_agent)) + self._pending_responses_to_agent.clear() + await self._run_agent_and_emit(ctx) + async def snapshot_state(self) -> dict[str, Any]: """Capture current executor state for checkpointing. @@ -226,6 +213,8 @@ async def snapshot_state(self) -> dict[str, Any]: return { "cache": encode_chat_messages(self._cache), "agent_thread": serialized_thread, + "pending_agent_requests": encode_checkpoint_value(self._pending_agent_requests), + "pending_responses_to_agent": encode_checkpoint_value(self._pending_responses_to_agent), } async def restore_state(self, state: dict[str, Any]) -> None: @@ -258,7 +247,109 @@ async def restore_state(self, state: dict[str, Any]) -> None: else: self._agent_thread = self._agent.get_new_thread() + pending_requests_payload = state.get("pending_agent_requests") + if pending_requests_payload: + self._pending_agent_requests = decode_checkpoint_value(pending_requests_payload) + + pending_responses_payload = state.get("pending_responses_to_agent") + if pending_responses_payload: + self._pending_responses_to_agent = decode_checkpoint_value(pending_responses_payload) + def reset(self) -> None: """Reset the internal cache of the executor.""" logger.debug("AgentExecutor %s: Resetting cache", self.id) self._cache.clear() + + async def _run_agent_and_emit(self, ctx: WorkflowContext[AgentExecutorResponse, AgentRunResponse]) -> None: + """Execute the underlying agent, emit events, and enqueue response. + + Checks ctx.is_streaming() to determine whether to emit incremental AgentRunUpdateEvent + events (streaming mode) or a single AgentRunEvent (non-streaming mode). + """ + if ctx.is_streaming(): + # Streaming mode: emit incremental updates + response = await self._run_agent_streaming(cast(WorkflowContext, ctx)) + else: + # Non-streaming mode: use run() and emit single event + response = await self._run_agent(cast(WorkflowContext, ctx)) + + if response is None: + # Agent did not complete (e.g., waiting for user input); do not emit response + logger.info("AgentExecutor %s: Agent did not complete, awaiting user input", self.id) + return + + if self._output_response: + await ctx.yield_output(response) + + # Always construct a full conversation snapshot from inputs (cache) + # plus agent outputs (agent_run_response.messages). Do not mutate + # response.messages so AgentRunEvent remains faithful to the raw output. + full_conversation: list[ChatMessage] = list(self._cache) + list(response.messages) + + agent_response = AgentExecutorResponse(self.id, response, full_conversation=full_conversation) + await ctx.send_message(agent_response) + self._cache.clear() + + async def _run_agent(self, ctx: WorkflowContext) -> AgentRunResponse | None: + """Execute the underlying agent in non-streaming mode. + + Args: + ctx: The workflow context for emitting events. + + Returns: + The complete AgentRunResponse, or None if waiting for user input. + """ + response = await self._agent.run( + self._cache, + thread=self._agent_thread, + ) + await ctx.add_event(AgentRunEvent(self.id, response)) + + # Handle any user input requests + if response.user_input_requests: + for user_input_request in response.user_input_requests: + self._pending_agent_requests[user_input_request.id] = user_input_request + await ctx.request_info(user_input_request, FunctionApprovalResponseContent) + return None + + return response + + async def _run_agent_streaming(self, ctx: WorkflowContext) -> AgentRunResponse | None: + """Execute the underlying agent in streaming mode and collect the full response. + + Args: + ctx: The workflow context for emitting events. + + Returns: + The complete AgentRunResponse, or None if waiting for user input. + """ + updates: list[AgentRunResponseUpdate] = [] + user_input_requests: list[FunctionApprovalRequestContent] = [] + async for update in self._agent.run_stream( + self._cache, + thread=self._agent_thread, + ): + updates.append(update) + await ctx.add_event(AgentRunUpdateEvent(self.id, update)) + + if update.user_input_requests: + user_input_requests.extend(update.user_input_requests) + + # Build the final AgentRunResponse from the collected updates + if isinstance(self._agent, ChatAgent): + response_format = self._agent.chat_options.response_format + response = AgentRunResponse.from_agent_run_response_updates( + updates, + output_format_type=response_format, + ) + else: + response = AgentRunResponse.from_agent_run_response_updates(updates) + + # Handle any user input requests after the streaming completes + if user_input_requests: + for user_input_request in user_input_requests: + self._pending_agent_requests[user_input_request.id] = user_input_request + await ctx.request_info(user_input_request, FunctionApprovalResponseContent) + return None + + return response diff --git a/python/packages/core/agent_framework/_workflows/_handoff.py b/python/packages/core/agent_framework/_workflows/_handoff.py index 3c5995aeaf..c29e3f55ad 100644 --- a/python/packages/core/agent_framework/_workflows/_handoff.py +++ b/python/packages/core/agent_framework/_workflows/_handoff.py @@ -85,8 +85,8 @@ def _clone_chat_agent(agent: ChatAgent) -> ChatAgent: # so we need to recombine them here to pass the complete tools list to the constructor. # This makes sure MCP tools are preserved when cloning agents for handoff workflows. all_tools = list(options.tools) if options.tools else [] - if agent._local_mcp_tools: - all_tools.extend(agent._local_mcp_tools) + if agent._local_mcp_tools: # type: ignore + all_tools.extend(agent._local_mcp_tools) # type: ignore return ChatAgent( chat_client=agent.chat_client, @@ -133,6 +133,14 @@ class _ConversationWithUserInput: full_conversation: list[ChatMessage] = field(default_factory=lambda: []) # type: ignore[misc] +@dataclass +class _ConversationForUserInput: + """Internal message from coordinator to gateway specifying which agent will receive the response.""" + + conversation: list[ChatMessage] + next_agent_id: str + + class _AutoHandoffMiddleware(FunctionMiddleware): """Intercept handoff tool invocations and short-circuit execution with synthetic results.""" @@ -275,6 +283,7 @@ def __init__( termination_condition: Callable[[list[ChatMessage]], bool | Awaitable[bool]], id: str, handoff_tool_targets: Mapping[str, str] | None = None, + return_to_previous: bool = False, ) -> None: """Create a coordinator that manages routing between specialists and the user.""" super().__init__(id) @@ -284,6 +293,8 @@ def __init__( self._input_gateway_id = input_gateway_id self._termination_condition = termination_condition self._handoff_tool_targets = {k.lower(): v for k, v in (handoff_tool_targets or {}).items()} + self._return_to_previous = return_to_previous + self._current_agent_id: str | None = None # Track the current agent handling conversation def _get_author_name(self) -> str: """Get the coordinator name for orchestrator-generated messages.""" @@ -293,7 +304,7 @@ def _get_author_name(self) -> str: async def handle_agent_response( self, response: AgentExecutorResponse, - ctx: WorkflowContext[AgentExecutorRequest | list[ChatMessage], list[ChatMessage]], + ctx: WorkflowContext[AgentExecutorRequest | list[ChatMessage], list[ChatMessage] | _ConversationForUserInput], ) -> None: """Process an agent's response and determine whether to route, request input, or terminate.""" # Hydrate coordinator state (and detect new run) using checkpointable executor state @@ -329,6 +340,9 @@ async def handle_agent_response( # Check for handoff from ANY agent (starting agent or specialist) target = self._resolve_specialist(response.agent_run_response, conversation) if target is not None: + # Update current agent when handoff occurs + self._current_agent_id = target + logger.info(f"Handoff detected: {source} -> {target}. Routing control to specialist '{target}'.") await self._persist_state(ctx) # Clean tool-related content before sending to next agent cleaned = clean_conversation_for_handoff(conversation) @@ -340,10 +354,15 @@ async def handle_agent_response( if not is_starting_agent and source not in self._specialist_ids: raise RuntimeError(f"HandoffCoordinator received response from unknown executor '{source}'.") + # Update current agent when they respond without handoff + self._current_agent_id = source + logger.info( + f"Agent '{source}' responded without handoff. " + f"Requesting user input. Return-to-previous: {self._return_to_previous}" + ) await self._persist_state(ctx) if await self._check_termination(): - logger.info("Handoff workflow termination condition met. Ending conversation.") # Clean the output conversation for display cleaned_output = clean_conversation_for_handoff(conversation) await ctx.yield_output(cleaned_output) @@ -352,7 +371,13 @@ async def handle_agent_response( # Clean conversation before sending to gateway for user input request # This removes tool messages that shouldn't be shown to users cleaned_for_display = clean_conversation_for_handoff(conversation) - await ctx.send_message(cleaned_for_display, target_id=self._input_gateway_id) + + # The awaiting_agent_id is the agent that just responded and is awaiting user input + # This is the source of the current response + next_agent_id = source + + message_to_gateway = _ConversationForUserInput(conversation=cleaned_for_display, next_agent_id=next_agent_id) + await ctx.send_message(message_to_gateway, target_id=self._input_gateway_id) # type: ignore[arg-type] @handler async def handle_user_input( @@ -367,14 +392,26 @@ async def handle_user_input( # Check termination before sending to agent if await self._check_termination(): - logger.info("Handoff workflow termination condition met. Ending conversation.") await ctx.yield_output(list(self._conversation)) return - # Clean before sending to starting agent + # Determine routing target based on return-to-previous setting + target_agent_id = self._starting_agent_id + if self._return_to_previous and self._current_agent_id: + # Route back to the current agent that's handling the conversation + target_agent_id = self._current_agent_id + logger.info( + f"Return-to-previous enabled: routing user input to current agent '{target_agent_id}' " + f"(bypassing coordinator '{self._starting_agent_id}')" + ) + else: + logger.info(f"Routing user input to coordinator '{target_agent_id}'") + # Note: Stack is only used for specialist-to-specialist handoffs, not user input routing + + # Clean before sending to target agent cleaned = clean_conversation_for_handoff(self._conversation) request = AgentExecutorRequest(messages=cleaned, should_respond=True) - await ctx.send_message(request, target_id=self._starting_agent_id) + await ctx.send_message(request, target_id=target_agent_id) def _resolve_specialist(self, agent_response: AgentRunResponse, conversation: list[ChatMessage]) -> str | None: """Resolve the specialist executor id requested by the agent response, if any.""" @@ -444,22 +481,27 @@ async def _persist_state(self, ctx: WorkflowContext[Any, Any]) -> None: def _snapshot_pattern_metadata(self) -> dict[str, Any]: """Serialize pattern-specific state. - Handoff has no additional metadata beyond base conversation state. + Includes the current agent for return-to-previous routing. Returns: - Empty dict (no pattern-specific state) + Dict containing current agent if return-to-previous is enabled """ + if self._return_to_previous: + return { + "current_agent_id": self._current_agent_id, + } return {} def _restore_pattern_metadata(self, metadata: dict[str, Any]) -> None: """Restore pattern-specific state. - Handoff has no additional metadata beyond base conversation state. + Restores the current agent for return-to-previous routing. Args: - metadata: Pattern-specific state dict (ignored) + metadata: Pattern-specific state dict """ - pass + if self._return_to_previous and "current_agent_id" in metadata: + self._current_agent_id = metadata["current_agent_id"] def _restore_conversation_from_state(self, state: Mapping[str, Any]) -> list[ChatMessage]: """Rehydrate the coordinator's conversation history from checkpointed state. @@ -507,8 +549,21 @@ def __init__( self._prompt = prompt or "Provide your next input for the conversation." @handler - async def request_input(self, conversation: list[ChatMessage], ctx: WorkflowContext) -> None: + async def request_input(self, message: _ConversationForUserInput, ctx: WorkflowContext) -> None: """Emit a `HandoffUserInputRequest` capturing the conversation snapshot.""" + if not message.conversation: + raise ValueError("Handoff workflow requires non-empty conversation before requesting user input.") + request = HandoffUserInputRequest( + conversation=list(message.conversation), + awaiting_agent_id=message.next_agent_id, + prompt=self._prompt, + source_executor_id=self.id, + ) + await ctx.request_info(request, object) + + @handler + async def request_input_legacy(self, conversation: list[ChatMessage], ctx: WorkflowContext) -> None: + """Legacy handler for backward compatibility - emit user input request with starting agent.""" if not conversation: raise ValueError("Handoff workflow requires non-empty conversation before requesting user input.") request = HandoffUserInputRequest( @@ -558,7 +613,7 @@ def _as_user_messages(payload: Any) -> list[ChatMessage]: def _default_termination_condition(conversation: list[ChatMessage]) -> bool: - """Default termination: stop after 10 user messages to prevent infinite loops.""" + """Default termination: stop after 10 user messages.""" user_message_count = sum(1 for msg in conversation if msg.role == Role.USER) return user_message_count >= 10 @@ -743,6 +798,7 @@ def __init__( ) self._auto_register_handoff_tools: bool = True self._handoff_config: dict[str, list[str]] = {} # Maps agent_id -> [target_agent_ids] + self._return_to_previous: bool = False if participants: self.participants(participants) @@ -1198,6 +1254,77 @@ async def check_termination(conv: list[ChatMessage]) -> bool: self._termination_condition = condition return self + def enable_return_to_previous(self, enabled: bool = True) -> "HandoffBuilder": + """Enable direct return to the current agent after user input, bypassing the coordinator. + + When enabled, after a specialist responds without requesting another handoff, user input + routes directly back to that same specialist instead of always routing back to the + coordinator agent for re-evaluation. + + This is useful when a specialist needs multiple turns with the user to gather information + or resolve an issue, avoiding unnecessary coordinator involvement while maintaining context. + + Flow Comparison: + + **Default (disabled):** + User -> Coordinator -> Specialist -> User -> Coordinator -> Specialist -> ... + + **With return_to_previous (enabled):** + User -> Coordinator -> Specialist -> User -> Specialist -> ... + + Args: + enabled: Whether to enable return-to-previous routing. Default is True. + + Returns: + Self for method chaining. + + Example: + + .. code-block:: python + + workflow = ( + HandoffBuilder(participants=[triage, technical_support, billing]) + .set_coordinator("triage") + .add_handoff(triage, [technical_support, billing]) + .enable_return_to_previous() # Enable direct return routing + .build() + ) + + # Flow: User asks question + # -> Triage routes to Technical Support + # -> Technical Support asks clarifying question + # -> User provides more info + # -> Routes back to Technical Support (not Triage) + # -> Technical Support continues helping + + Multi-tier handoff example: + + .. code-block:: python + + workflow = ( + HandoffBuilder(participants=[triage, specialist_a, specialist_b]) + .set_coordinator("triage") + .add_handoff(triage, [specialist_a, specialist_b]) + .add_handoff(specialist_a, specialist_b) + .enable_return_to_previous() + .build() + ) + + # Flow: User asks question + # -> Triage routes to Specialist A + # -> Specialist A hands off to Specialist B + # -> Specialist B asks clarifying question + # -> User provides more info + # -> Routes back to Specialist B (who is currently handling the conversation) + + Note: + This feature routes to whichever agent most recently responded, whether that's + the coordinator or a specialist. The conversation continues with that agent until + they either hand off to another agent or the termination condition is met. + """ + self._return_to_previous = enabled + return self + def build(self) -> Workflow: """Construct the final Workflow instance from the configured builder. @@ -1326,6 +1453,7 @@ def _handoff_orchestrator_factory(_: _GroupChatConfig) -> Executor: termination_condition=self._termination_condition, id="handoff-coordinator", handoff_tool_targets=handoff_tool_targets, + return_to_previous=self._return_to_previous, ) wiring = _GroupChatConfig( diff --git a/python/packages/core/agent_framework/ag_ui/__init__.py b/python/packages/core/agent_framework/ag_ui/__init__.py new file mode 100644 index 0000000000..c5569ed7a9 --- /dev/null +++ b/python/packages/core/agent_framework/ag_ui/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft. All rights reserved. + +import importlib +from typing import Any + +PACKAGE_NAME = "agent_framework_ag_ui" +PACKAGE_EXTRA = "ag-ui" +_IMPORTS = [ + "__version__", + "AgentFrameworkAgent", + "add_agent_framework_fastapi_endpoint", + "AGUIChatClient", + "AGUIEventConverter", + "AGUIHttpService", + "ConfirmationStrategy", + "DefaultConfirmationStrategy", + "TaskPlannerConfirmationStrategy", + "RecipeConfirmationStrategy", + "DocumentWriterConfirmationStrategy", +] + + +def __getattr__(name: str) -> Any: + if name in _IMPORTS: + try: + return getattr(importlib.import_module(PACKAGE_NAME), name) + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + f"The '{PACKAGE_EXTRA}' extra is not installed, please do `pip install agent-framework-{PACKAGE_EXTRA}`" + ) from exc + raise AttributeError(f"Module {PACKAGE_NAME} has no attribute {name}.") + + +def __dir__() -> list[str]: + return _IMPORTS diff --git a/python/packages/core/agent_framework/ag_ui/__init__.pyi b/python/packages/core/agent_framework/ag_ui/__init__.pyi new file mode 100644 index 0000000000..201e1a0256 --- /dev/null +++ b/python/packages/core/agent_framework/ag_ui/__init__.pyi @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft. All rights reserved. + +from agent_framework_ag_ui import ( + AgentFrameworkAgent, + AGUIChatClient, + AGUIEventConverter, + AGUIHttpService, + ConfirmationStrategy, + DefaultConfirmationStrategy, + DocumentWriterConfirmationStrategy, + RecipeConfirmationStrategy, + TaskPlannerConfirmationStrategy, + __version__, + add_agent_framework_fastapi_endpoint, +) + +__all__ = [ + "AGUIChatClient", + "AGUIEventConverter", + "AGUIHttpService", + "AgentFrameworkAgent", + "ConfirmationStrategy", + "DefaultConfirmationStrategy", + "DocumentWriterConfirmationStrategy", + "RecipeConfirmationStrategy", + "TaskPlannerConfirmationStrategy", + "__version__", + "add_agent_framework_fastapi_endpoint", +] diff --git a/python/packages/core/agent_framework/observability.py b/python/packages/core/agent_framework/observability.py index c17ce12666..3e44fae23c 100644 --- a/python/packages/core/agent_framework/observability.py +++ b/python/packages/core/agent_framework/observability.py @@ -846,6 +846,7 @@ async def trace_get_response( kwargs.get("model_id") or (chat_options.model_id if (chat_options := kwargs.get("chat_options")) else None) or getattr(self, "model_id", None) + or "unknown" ) service_url = str( service_url_func() @@ -933,6 +934,7 @@ async def trace_get_streaming_response( kwargs.get("model_id") or (chat_options.model_id if (chat_options := kwargs.get("chat_options")) else None) or getattr(self, "model_id", None) + or "unknown" ) service_url = str( service_url_func() @@ -1324,7 +1326,10 @@ def _get_span( attributes: dict[str, Any], span_name_attribute: str, ) -> Generator["trace.Span", Any, Any]: - """Start a span for a agent run.""" + """Start a span for a agent run. + + Note: `attributes` must contain the `span_name_attribute` key. + """ span = get_tracer().start_span(f"{attributes[OtelAttr.OPERATION]} {attributes[span_name_attribute]}") span.set_attributes(attributes) with trace.use_span( @@ -1353,7 +1358,8 @@ def _get_span_attributes(**kwargs: Any) -> dict[str, Any]: attributes[SpanAttributes.LLM_SYSTEM] = system_name if provider_name := kwargs.get("provider_name"): attributes[OtelAttr.PROVIDER_NAME] = provider_name - attributes[SpanAttributes.LLM_REQUEST_MODEL] = kwargs.get("model", "unknown") + if model_id := kwargs.get("model", chat_options.model_id): + attributes[SpanAttributes.LLM_REQUEST_MODEL] = model_id if service_url := kwargs.get("service_url"): attributes[OtelAttr.ADDRESS] = service_url if conversation_id := kwargs.get("conversation_id", chat_options.conversation_id): diff --git a/python/packages/core/agent_framework/openai/_responses_client.py b/python/packages/core/agent_framework/openai/_responses_client.py index 430e61fcab..352d8c8325 100644 --- a/python/packages/core/agent_framework/openai/_responses_client.py +++ b/python/packages/core/agent_framework/openai/_responses_client.py @@ -276,6 +276,14 @@ def _tools_to_response_tools( # Map the parameter name and remove the old one mapped_tool[api_param] = mapped_tool.pop(user_param) + # Validate partial_images parameter for streaming image generation + # OpenAI API requires partial_images to be between 0-3 (inclusive) for image_generation tool + # Reference: https://platform.openai.com/docs/api-reference/responses/create#responses_create-tools-image_generation_tool-partial_images + if "partial_images" in mapped_tool: + partial_images = mapped_tool["partial_images"] + if not isinstance(partial_images, int) or partial_images < 0 or partial_images > 3: + raise ValueError("partial_images must be an integer between 0 and 3 (inclusive).") + response_tools.append(mapped_tool) else: response_tools.append(tool_dict) @@ -707,29 +715,8 @@ def _create_response_content( uri = item.result media_type = None if not uri.startswith("data:"): - # Raw base64 string - convert to proper data URI format - # Detect format from base64 data - import base64 - - try: - # Decode a small portion to detect format - decoded_data = base64.b64decode(uri[:100]) # First ~75 bytes should be enough - if decoded_data.startswith(b"\x89PNG"): - format_type = "png" - elif decoded_data.startswith(b"\xff\xd8\xff"): - format_type = "jpeg" - elif decoded_data.startswith(b"RIFF") and b"WEBP" in decoded_data[:12]: - format_type = "webp" - elif decoded_data.startswith(b"GIF87a") or decoded_data.startswith(b"GIF89a"): - format_type = "gif" - else: - # Default to png if format cannot be detected - format_type = "png" - except Exception: - # Fallback to png if decoding fails - format_type = "png" - uri = f"data:image/{format_type};base64,{uri}" - media_type = f"image/{format_type}" + # Raw base64 string - convert to proper data URI format using helper + uri, media_type = DataContent.create_data_uri_from_base64(uri) else: # Parse media type from existing data URI try: @@ -945,6 +932,25 @@ def _create_streaming_response_content( raw_representation=event, ) ) + case "response.image_generation_call.partial_image": + # Handle streaming partial image generation + image_base64 = event.partial_image_b64 + partial_index = event.partial_image_index + + # Use helper function to create data URI from base64 + uri, media_type = DataContent.create_data_uri_from_base64(image_base64) + + contents.append( + DataContent( + uri=uri, + media_type=media_type, + additional_properties={ + "partial_image_index": partial_index, + "is_partial_image": True, + }, + raw_representation=event, + ) + ) case _: logger.debug("Unparsed event of type: %s: %s", event.type, event) diff --git a/python/packages/core/pyproject.toml b/python/packages/core/pyproject.toml index 38eb1323db..0dc26386c2 100644 --- a/python/packages/core/pyproject.toml +++ b/python/packages/core/pyproject.toml @@ -4,7 +4,7 @@ description = "Microsoft Agent Framework for building AI Agents with Python. Thi authors = [{ name = "Microsoft", email = "af-support@microsoft.com"}] readme = "README.md" requires-python = ">=3.10" -version = "1.0.0b251105" +version = "1.0.0b251111" license-files = ["LICENSE"] urls.homepage = "https://aka.ms/agent-framework" urls.source = "https://github.com/microsoft/agent-framework/tree/main/python" @@ -42,13 +42,14 @@ dependencies = [ [project.optional-dependencies] all = [ "agent-framework-a2a", + "agent-framework-ag-ui", + "agent-framework-anthropic", "agent-framework-azure-ai", "agent-framework-copilotstudio", - "agent-framework-mem0", - "agent-framework-redis", "agent-framework-devui", + "agent-framework-mem0", "agent-framework-purview", - "agent-framework-anthropic", + "agent-framework-redis", ] [tool.uv] diff --git a/python/packages/core/tests/core/test_observability.py b/python/packages/core/tests/core/test_observability.py index c79f31dca4..d994867f6a 100644 --- a/python/packages/core/tests/core/test_observability.py +++ b/python/packages/core/tests/core/test_observability.py @@ -279,6 +279,45 @@ async def test_chat_client_streaming_observability( assert span.attributes[OtelAttr.OUTPUT_MESSAGES] is not None +async def test_chat_client_without_model_id_observability(mock_chat_client, span_exporter: InMemorySpanExporter): + """Test telemetry shouldn't fail when the model_id is not provided for unknown reason.""" + client = use_observability(mock_chat_client)() + messages = [ChatMessage(role=Role.USER, text="Test")] + span_exporter.clear() + response = await client.get_response(messages=messages) + + assert response is not None + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "chat unknown" + assert span.attributes[OtelAttr.OPERATION.value] == OtelAttr.CHAT_COMPLETION_OPERATION + assert span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "unknown" + + +async def test_chat_client_streaming_without_model_id_observability( + mock_chat_client, span_exporter: InMemorySpanExporter +): + """Test streaming telemetry shouldn't fail when the model_id is not provided for unknown reason.""" + client = use_observability(mock_chat_client)() + messages = [ChatMessage(role=Role.USER, text="Test")] + span_exporter.clear() + # Collect all yielded updates + updates = [] + async for update in client.get_streaming_response(messages=messages): + updates.append(update) + + # Verify we got the expected updates, this shouldn't be dependent on otel + assert len(updates) == 2 + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + assert span.name == "chat unknown" + assert span.attributes[OtelAttr.OPERATION.value] == OtelAttr.CHAT_COMPLETION_OPERATION + assert span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "unknown" + + def test_prepend_user_agent_with_none_value(): """Test prepend user agent with None value in headers.""" headers = {"User-Agent": None} @@ -368,6 +407,7 @@ def __init__(self): self.name = "test_agent" self.display_name = "Test Agent" self.description = "Test agent description" + self.chat_options = ChatOptions(model_id="TestModel") async def run(self, messages=None, *, thread=None, **kwargs): return AgentRunResponse( @@ -405,7 +445,7 @@ async def test_agent_instrumentation_enabled( assert span.attributes[OtelAttr.AGENT_ID] == "test_agent_id" assert span.attributes[OtelAttr.AGENT_NAME] == "Test Agent" assert span.attributes[OtelAttr.AGENT_DESCRIPTION] == "Test agent description" - assert span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "unknown" + assert span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "TestModel" assert span.attributes[OtelAttr.INPUT_TOKENS] == 15 assert span.attributes[OtelAttr.OUTPUT_TOKENS] == 25 if enable_sensitive_data: @@ -433,7 +473,7 @@ async def test_agent_streaming_response_with_diagnostics_enabled_via_decorator( assert span.attributes[OtelAttr.AGENT_ID] == "test_agent_id" assert span.attributes[OtelAttr.AGENT_NAME] == "Test Agent" assert span.attributes[OtelAttr.AGENT_DESCRIPTION] == "Test agent description" - assert span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "unknown" + assert span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "TestModel" if enable_sensitive_data: assert span.attributes.get(OtelAttr.OUTPUT_MESSAGES) is not None # Streaming, so no usage yet diff --git a/python/packages/core/tests/core/test_types.py b/python/packages/core/tests/core/test_types.py index 909e72a0a0..38a3fe414e 100644 --- a/python/packages/core/tests/core/test_types.py +++ b/python/packages/core/tests/core/test_types.py @@ -1,5 +1,6 @@ # Copyright (c) Microsoft. All rights reserved. +import base64 from collections.abc import AsyncIterable from typing import Any @@ -166,6 +167,57 @@ def test_data_content_empty(): DataContent(uri="") +def test_data_content_detect_image_format_from_base64(): + """Test the detect_image_format_from_base64 static method.""" + # Test each supported format + png_data = b"\x89PNG\r\n\x1a\n" + b"fake_data" + assert DataContent.detect_image_format_from_base64(base64.b64encode(png_data).decode()) == "png" + + jpeg_data = b"\xff\xd8\xff\xe0" + b"fake_data" + assert DataContent.detect_image_format_from_base64(base64.b64encode(jpeg_data).decode()) == "jpeg" + + webp_data = b"RIFF" + b"1234" + b"WEBP" + b"fake_data" + assert DataContent.detect_image_format_from_base64(base64.b64encode(webp_data).decode()) == "webp" + + gif_data = b"GIF89a" + b"fake_data" + assert DataContent.detect_image_format_from_base64(base64.b64encode(gif_data).decode()) == "gif" + + # Test fallback behavior + unknown_data = b"UNKNOWN_FORMAT" + assert DataContent.detect_image_format_from_base64(base64.b64encode(unknown_data).decode()) == "png" + + # Test error handling + assert DataContent.detect_image_format_from_base64("invalid_base64!") == "png" + assert DataContent.detect_image_format_from_base64("") == "png" + + +def test_data_content_create_data_uri_from_base64(): + """Test the create_data_uri_from_base64 class method.""" + # Test with PNG data + png_data = b"\x89PNG\r\n\x1a\n" + b"fake_data" + png_base64 = base64.b64encode(png_data).decode() + uri, media_type = DataContent.create_data_uri_from_base64(png_base64) + + assert uri == f"data:image/png;base64,{png_base64}" + assert media_type == "image/png" + + # Test with different format + jpeg_data = b"\xff\xd8\xff\xe0" + b"fake_data" + jpeg_base64 = base64.b64encode(jpeg_data).decode() + uri, media_type = DataContent.create_data_uri_from_base64(jpeg_base64) + + assert uri == f"data:image/jpeg;base64,{jpeg_base64}" + assert media_type == "image/jpeg" + + # Test fallback for unknown format + unknown_data = b"UNKNOWN_FORMAT" + unknown_base64 = base64.b64encode(unknown_data).decode() + uri, media_type = DataContent.create_data_uri_from_base64(unknown_base64) + + assert uri == f"data:image/png;base64,{unknown_base64}" + assert media_type == "image/png" + + # region UriContent diff --git a/python/packages/core/tests/openai/test_openai_responses_client.py b/python/packages/core/tests/openai/test_openai_responses_client.py index 1b044c99be..4700950439 100644 --- a/python/packages/core/tests/openai/test_openai_responses_client.py +++ b/python/packages/core/tests/openai/test_openai_responses_client.py @@ -36,6 +36,7 @@ HostedMCPTool, HostedVectorStoreContent, HostedWebSearchTool, + MCPStreamableHTTPTool, Role, TextContent, TextReasoningContent, @@ -946,1169 +947,1196 @@ def test_streaming_response_basic_structure() -> None: assert response.raw_representation is mock_event -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_response() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() - - assert isinstance(openai_responses_client, ChatClientProtocol) +def test_service_response_exception_includes_original_error_details() -> None: + """Test that ServiceResponseException messages include original error details in the new format.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + messages = [ChatMessage(role="user", text="test message")] - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) + mock_response = MagicMock() + original_error_message = "Request rate limit exceeded" + mock_error = BadRequestError( + message=original_error_message, + response=mock_response, + body={"error": {"code": "rate_limit", "message": original_error_message}}, ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) + mock_error.code = "rate_limit" - # Test that the client can be used to get a response - response = await openai_responses_client.get_response(messages=messages) + with ( + patch.object(client.client.responses, "parse", side_effect=mock_error), + pytest.raises(ServiceResponseException) as exc_info, + ): + asyncio.run(client.get_response(messages=messages, response_format=OutputStruct)) - assert response is not None - assert isinstance(response, ChatResponse) - assert "scientists" in response.text + exception_message = str(exc_info.value) + assert "service failed to complete the prompt:" in exception_message + assert original_error_message in exception_message - messages.clear() - messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - # Test that the client can be used to get a response - response = await openai_responses_client.get_response( - messages=messages, - response_format=OutputStruct, - ) +def test_get_streaming_response_with_response_format() -> None: + """Test get_streaming_response with response_format.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + messages = [ChatMessage(role="user", text="Test streaming with format")] - assert response is not None - assert isinstance(response, ChatResponse) - output = response.value - assert output is not None, "Response value is None" - assert "seattle" in output.location.lower() - assert output.weather is not None + # It will fail due to invalid API key, but exercises the code path + with pytest.raises(ServiceResponseException): + async def run_streaming(): + async for _ in client.get_streaming_response(messages=messages, response_format=OutputStruct): + pass -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_response_tools() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() + asyncio.run(run_streaming()) - assert isinstance(openai_responses_client, ChatClientProtocol) - messages: list[ChatMessage] = [] - messages.append(ChatMessage(role="user", text="What is the weather in New York?")) +def test_openai_content_parser_image_content() -> None: + """Test _openai_content_parser with image content variations.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - # Test that the client can be used to get a response - response = await openai_responses_client.get_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", + # Test image content with detail parameter and file_id + image_content_with_detail = UriContent( + uri="https://example.com/image.jpg", + media_type="image/jpeg", + additional_properties={"detail": "high", "file_id": "file_123"}, ) + result = client._openai_content_parser(Role.USER, image_content_with_detail, {}) # type: ignore + assert result["type"] == "input_image" + assert result["image_url"] == "https://example.com/image.jpg" + assert result["detail"] == "high" + assert result["file_id"] == "file_123" - assert response is not None - assert isinstance(response, ChatResponse) - assert "sunny" in response.text.lower() - - messages.clear() - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) + # Test image content without additional properties (defaults) + image_content_basic = UriContent(uri="https://example.com/basic.png", media_type="image/png") + result = client._openai_content_parser(Role.USER, image_content_basic, {}) # type: ignore + assert result["type"] == "input_image" + assert result["detail"] == "auto" + assert result["file_id"] is None - # Test that the client can be used to get a response - response = await openai_responses_client.get_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - response_format=OutputStruct, - ) - assert response is not None - assert isinstance(response, ChatResponse) - output = OutputStruct.model_validate_json(response.text) - assert "seattle" in output.location.lower() - assert "sunny" in output.weather.lower() +def test_openai_content_parser_audio_content() -> None: + """Test _openai_content_parser with audio content variations.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + # Test WAV audio content + wav_content = UriContent(uri="data:audio/wav;base64,abc123", media_type="audio/wav") + result = client._openai_content_parser(Role.USER, wav_content, {}) # type: ignore + assert result["type"] == "input_audio" + assert result["input_audio"]["data"] == "data:audio/wav;base64,abc123" + assert result["input_audio"]["format"] == "wav" -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_streaming() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() + # Test MP3 audio content + mp3_content = UriContent(uri="data:audio/mp3;base64,def456", media_type="audio/mp3") + result = client._openai_content_parser(Role.USER, mp3_content, {}) # type: ignore + assert result["type"] == "input_audio" + assert result["input_audio"]["format"] == "mp3" - assert isinstance(openai_responses_client, ChatClientProtocol) - messages: list[ChatMessage] = [] - messages.append( - ChatMessage( - role="user", - text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " - "Bonded by their love for the natural world and shared curiosity, they uncovered a " - "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " - "of climate change.", - ) - ) - messages.append(ChatMessage(role="user", text="who are Emily and David?")) +def test_openai_content_parser_unsupported_content() -> None: + """Test _openai_content_parser with unsupported content types.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - # Test that the client can be used to get a response - response = await ChatResponse.from_chat_response_generator( - openai_responses_client.get_streaming_response(messages=messages) - ) + # Test unsupported audio format + unsupported_audio = UriContent(uri="data:audio/ogg;base64,ghi789", media_type="audio/ogg") + result = client._openai_content_parser(Role.USER, unsupported_audio, {}) # type: ignore + assert result == {} - assert "scientists" in response.text + # Test non-media content + text_uri_content = UriContent(uri="https://example.com/document.txt", media_type="text/plain") + result = client._openai_content_parser(Role.USER, text_uri_content, {}) # type: ignore + assert result == {} - messages.clear() - messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - response = openai_responses_client.get_streaming_response( - messages=messages, - response_format=OutputStruct, - ) - chunks = [] - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - chunks.append(chunk) - full_message = ChatResponse.from_chat_response_updates(chunks, output_format_type=OutputStruct) - output = full_message.value - assert output is not None, "Response value is None" - assert "seattle" in output.location.lower() - assert output.weather is not None +def test_create_streaming_response_content_code_interpreter() -> None: + """Test _create_streaming_response_content with code_interpreter_call.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} + mock_event_image = MagicMock() + mock_event_image.type = "response.output_item.added" + mock_item_image = MagicMock() + mock_item_image.type = "code_interpreter_call" + mock_image_output = MagicMock() + mock_image_output.type = "image" + mock_image_output.url = "https://example.com/plot.png" + mock_item_image.outputs = [mock_image_output] + mock_item_image.code = None + mock_event_image.item = mock_item_image -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_streaming_tools() -> None: - """Test OpenAI chat completion responses.""" - openai_responses_client = OpenAIResponsesClient() + result = client._create_streaming_response_content(mock_event_image, chat_options, function_call_ids) # type: ignore + assert len(result.contents) == 1 + assert isinstance(result.contents[0], UriContent) + assert result.contents[0].uri == "https://example.com/plot.png" + assert result.contents[0].media_type == "image" - assert isinstance(openai_responses_client, ChatClientProtocol) - messages: list[ChatMessage] = [ChatMessage(role="user", text="What is the weather in Seattle?")] +def test_create_streaming_response_content_reasoning() -> None: + """Test _create_streaming_response_content with reasoning content.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} - # Test that the client can be used to get a response - response = openai_responses_client.get_streaming_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - ) - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text + mock_event_reasoning = MagicMock() + mock_event_reasoning.type = "response.output_item.added" + mock_item_reasoning = MagicMock() + mock_item_reasoning.type = "reasoning" + mock_reasoning_content = MagicMock() + mock_reasoning_content.text = "Analyzing the problem step by step..." + mock_item_reasoning.content = [mock_reasoning_content] + mock_item_reasoning.summary = ["Problem analysis summary"] + mock_event_reasoning.item = mock_item_reasoning - assert "sunny" in full_message.lower() + result = client._create_streaming_response_content(mock_event_reasoning, chat_options, function_call_ids) # type: ignore + assert len(result.contents) == 1 + assert isinstance(result.contents[0], TextReasoningContent) + assert result.contents[0].text == "Analyzing the problem step by step..." + if result.contents[0].additional_properties: + assert result.contents[0].additional_properties["summary"] == "Problem analysis summary" - messages.clear() - messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - response = openai_responses_client.get_streaming_response( - messages=messages, - tools=[get_weather], - tool_choice="auto", - response_format=OutputStruct, - ) - chunks = [] - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - chunks.append(chunk) - - full_message = ChatResponse.from_chat_response_updates(chunks, output_format_type=OutputStruct) - output = full_message.value - assert output is not None, "Response value is None" - assert "seattle" in output.location.lower() - assert "sunny" in output.weather.lower() +def test_openai_content_parser_text_reasoning_comprehensive() -> None: + """Test _openai_content_parser with TextReasoningContent all additional properties.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + # Test TextReasoningContent with all additional properties + comprehensive_reasoning = TextReasoningContent( + text="Comprehensive reasoning summary", + additional_properties={ + "status": "in_progress", + "reasoning_text": "Step-by-step analysis", + "encrypted_content": "secure_data_456", + }, + ) + result = client._openai_content_parser(Role.ASSISTANT, comprehensive_reasoning, {}) # type: ignore + assert result["type"] == "reasoning" + assert result["summary"]["text"] == "Comprehensive reasoning summary" + assert result["status"] == "in_progress" + assert result["content"]["type"] == "reasoning_text" + assert result["content"]["text"] == "Step-by-step analysis" + assert result["encrypted_content"] == "secure_data_456" -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_web_search() -> None: - openai_responses_client = OpenAIResponsesClient() - assert isinstance(openai_responses_client, ChatClientProtocol) +def test_streaming_reasoning_text_delta_event() -> None: + """Test reasoning text delta event creates TextReasoningContent.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} - # Test that the client will use the web search tool - response = await openai_responses_client.get_response( - messages=[ - ChatMessage( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - tools=[HostedWebSearchTool()], - tool_choice="auto", + event = ResponseReasoningTextDeltaEvent( + type="response.reasoning_text.delta", + content_index=0, + item_id="reasoning_123", + output_index=0, + sequence_number=1, + delta="reasoning delta", ) - assert response is not None - assert isinstance(response, ChatResponse) - assert "Rumi" in response.text - assert "Mira" in response.text - assert "Zoey" in response.text - - # Test that the client will use the web search tool with location - additional_properties = { - "user_location": { - "country": "US", - "city": "Seattle", - } - } - response = await openai_responses_client.get_response( - messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], - tools=[HostedWebSearchTool(additional_properties=additional_properties)], - tool_choice="auto", - ) - assert response.text is not None + with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: + response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + assert len(response.contents) == 1 + assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].text == "reasoning delta" + assert response.contents[0].raw_representation == event + mock_metadata.assert_called_once_with(event) -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_web_search_streaming() -> None: - openai_responses_client = OpenAIResponsesClient() - assert isinstance(openai_responses_client, ChatClientProtocol) +def test_streaming_reasoning_text_done_event() -> None: + """Test reasoning text done event creates TextReasoningContent with complete text.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} - # Test that the client will use the web search tool - response = openai_responses_client.get_streaming_response( - messages=[ - ChatMessage( - role="user", - text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", - ) - ], - tools=[HostedWebSearchTool()], - tool_choice="auto", + event = ResponseReasoningTextDoneEvent( + type="response.reasoning_text.done", + content_index=0, + item_id="reasoning_456", + output_index=0, + sequence_number=2, + text="complete reasoning", ) - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - assert "Rumi" in full_message - assert "Mira" in full_message - assert "Zoey" in full_message - - # Test that the client will use the web search tool with location - additional_properties = { - "user_location": { - "country": "US", - "city": "Seattle", - } - } - response = openai_responses_client.get_streaming_response( - messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], - tools=[HostedWebSearchTool(additional_properties=additional_properties)], - tool_choice="auto", - ) - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text - assert full_message is not None + with patch.object(client, "_get_metadata_from_response", return_value={"test": "data"}) as mock_metadata: + response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + assert len(response.contents) == 1 + assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].text == "complete reasoning" + assert response.contents[0].raw_representation == event + mock_metadata.assert_called_once_with(event) + assert response.additional_properties == {"test": "data"} -@pytest.mark.skip( - reason="Unreliable due to OpenAI vector store indexing potential " - "race condition. See https://github.com/microsoft/agent-framework/issues/1669" -) -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_file_search() -> None: - openai_responses_client = OpenAIResponsesClient() - assert isinstance(openai_responses_client, ChatClientProtocol) +def test_streaming_reasoning_summary_text_delta_event() -> None: + """Test reasoning summary text delta event creates TextReasoningContent.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} - file_id, vector_store = await create_vector_store(openai_responses_client) - # Test that the client will use the web search tool - response = await openai_responses_client.get_response( - messages=[ - ChatMessage( - role="user", - text="What is the weather today? Do a file search to find the answer.", - ) - ], - tools=[HostedFileSearchTool(inputs=vector_store)], - tool_choice="auto", + event = ResponseReasoningSummaryTextDeltaEvent( + type="response.reasoning_summary_text.delta", + item_id="summary_789", + output_index=0, + sequence_number=3, + summary_index=0, + delta="summary delta", ) - await delete_vector_store(openai_responses_client, file_id, vector_store.vector_store_id) - assert "sunny" in response.text.lower() - assert "75" in response.text + with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: + response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + assert len(response.contents) == 1 + assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].text == "summary delta" + assert response.contents[0].raw_representation == event + mock_metadata.assert_called_once_with(event) -@pytest.mark.skip( - reason="Unreliable due to OpenAI vector store indexing " - "potential race condition. See https://github.com/microsoft/agent-framework/issues/1669" -) -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_streaming_file_search() -> None: - openai_responses_client = OpenAIResponsesClient() - assert isinstance(openai_responses_client, ChatClientProtocol) +def test_streaming_reasoning_summary_text_done_event() -> None: + """Test reasoning summary text done event creates TextReasoningContent with complete text.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} - file_id, vector_store = await create_vector_store(openai_responses_client) - # Test that the client will use the web search tool - response = openai_responses_client.get_streaming_response( - messages=[ - ChatMessage( - role="user", - text="What is the weather today? Do a file search to find the answer.", - ) - ], - tools=[HostedFileSearchTool(inputs=vector_store)], - tool_choice="auto", + event = ResponseReasoningSummaryTextDoneEvent( + type="response.reasoning_summary_text.done", + item_id="summary_012", + output_index=0, + sequence_number=4, + summary_index=0, + text="complete summary", ) - assert response is not None - full_message: str = "" - async for chunk in response: - assert chunk is not None - assert isinstance(chunk, ChatResponseUpdate) - for content in chunk.contents: - if isinstance(content, TextContent) and content.text: - full_message += content.text + with patch.object(client, "_get_metadata_from_response", return_value={"custom": "meta"}) as mock_metadata: + response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore - await delete_vector_store(openai_responses_client, file_id, vector_store.vector_store_id) + assert len(response.contents) == 1 + assert isinstance(response.contents[0], TextReasoningContent) + assert response.contents[0].text == "complete summary" + assert response.contents[0].raw_representation == event + mock_metadata.assert_called_once_with(event) + assert response.additional_properties == {"custom": "meta"} - assert "sunny" in full_message.lower() - assert "75" in full_message +def test_streaming_reasoning_events_preserve_metadata() -> None: + """Test that reasoning events preserve metadata like regular text events.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + chat_options = ChatOptions() + function_call_ids: dict[int, tuple[str, str]] = {} -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_basic_run(): - """Test OpenAI Responses Client agent basic run functionality with OpenAIResponsesClient.""" - agent = OpenAIResponsesClient().create_agent( - instructions="You are a helpful assistant.", + text_event = ResponseTextDeltaEvent( + type="response.output_text.delta", + content_index=0, + item_id="text_item", + output_index=0, + sequence_number=1, + logprobs=[], + delta="text", ) - # Test basic run - response = await agent.run("Hello! Please respond with 'Hello World' exactly.") + reasoning_event = ResponseReasoningTextDeltaEvent( + type="response.reasoning_text.delta", + content_index=0, + item_id="reasoning_item", + output_index=0, + sequence_number=2, + delta="reasoning", + ) - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - assert "hello world" in response.text.lower() + with patch.object(client, "_get_metadata_from_response", return_value={"test": "metadata"}): + text_response = client._create_streaming_response_content(text_event, chat_options, function_call_ids) # type: ignore + reasoning_response = client._create_streaming_response_content(reasoning_event, chat_options, function_call_ids) # type: ignore + # Both should preserve metadata + assert text_response.additional_properties == {"test": "metadata"} + assert reasoning_response.additional_properties == {"test": "metadata"} -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_basic_run_streaming(): - """Test OpenAI Responses Client agent basic streaming functionality with OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - ) as agent: - # Test streaming run - full_text = "" - async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): - assert isinstance(chunk, AgentRunResponseUpdate) - if chunk.text: - full_text += chunk.text + # Content types should be different + assert isinstance(text_response.contents[0], TextContent) + assert isinstance(reasoning_response.contents[0], TextReasoningContent) - assert len(full_text) > 0 - assert "streaming response test" in full_text.lower() +def test_create_response_content_image_generation_raw_base64(): + """Test image generation response parsing with raw base64 string.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_thread_persistence(): - """Test OpenAI Responses Client agent thread persistence across runs with OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as agent: - # Create a new thread that will be reused - thread = agent.get_new_thread() + # Create a mock response with raw base64 image data (PNG signature) + mock_response = MagicMock() + mock_response.output_parsed = None + mock_response.metadata = {} + mock_response.usage = None + mock_response.id = "test-response-id" + mock_response.model = "test-model" + mock_response.created_at = 1234567890 - # First interaction - first_response = await agent.run("My favorite programming language is Python. Remember this.", thread=thread) + # Mock image generation output item with raw base64 (PNG format) + png_signature = b"\x89PNG\r\n\x1a\n" + mock_base64 = base64.b64encode(png_signature + b"fake_png_data_here").decode() - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None + mock_item = MagicMock() + mock_item.type = "image_generation_call" + mock_item.result = mock_base64 - # Second interaction - test memory - second_response = await agent.run("What is my favorite programming language?", thread=thread) + mock_response.output = [mock_item] - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None + with patch.object(client, "_get_metadata_from_response", return_value={}): + response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + # Verify the response contains DataContent with proper URI and media_type + assert len(response.messages[0].contents) == 1 + content = response.messages[0].contents[0] + assert isinstance(content, DataContent) + assert content.uri.startswith("data:image/png;base64,") + assert content.media_type == "image/png" -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_thread_storage_with_store_true(): - """Test OpenAI Responses Client agent with store=True to verify service_thread_id is returned.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - # Create a new thread - thread = AgentThread() - # Initially, service_thread_id should be None - assert thread.service_thread_id is None +def test_create_response_content_image_generation_existing_data_uri(): + """Test image generation response parsing with existing data URI.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - # Run with store=True to store messages on OpenAI side - response = await agent.run( - "Hello! Please remember that my name is Alex.", - thread=thread, - store=True, - ) + # Create a mock response with existing data URI + mock_response = MagicMock() + mock_response.output_parsed = None + mock_response.metadata = {} + mock_response.usage = None + mock_response.id = "test-response-id" + mock_response.model = "test-model" + mock_response.created_at = 1234567890 - # Validate response - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 + # Mock image generation output item with existing data URI (valid WEBP header) + webp_signature = b"RIFF" + b"\x12\x00\x00\x00" + b"WEBP" + valid_webp_base64 = base64.b64encode(webp_signature + b"VP8 fake_data").decode() + mock_item = MagicMock() + mock_item.type = "image_generation_call" + mock_item.result = f"data:image/webp;base64,{valid_webp_base64}" - # After store=True, service_thread_id should be populated - assert thread.service_thread_id is not None - assert isinstance(thread.service_thread_id, str) - assert len(thread.service_thread_id) > 0 + mock_response.output = [mock_item] + with patch.object(client, "_get_metadata_from_response", return_value={}): + response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_existing_thread(): - """Test OpenAI Responses Client agent with existing thread to continue conversations across agent instances.""" - # First conversation - capture the thread - preserved_thread = None + # Verify the response contains DataContent with proper media_type parsed from URI + assert len(response.messages[0].contents) == 1 + content = response.messages[0].contents[0] + assert isinstance(content, DataContent) + assert content.uri == f"data:image/webp;base64,{valid_webp_base64}" + assert content.media_type == "image/webp" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as first_agent: - # Start a conversation and capture the thread - thread = first_agent.get_new_thread() - first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread) - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None +def test_create_response_content_image_generation_format_detection(): + """Test different image format detection from base64 data.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - # Preserve the thread for reuse - preserved_thread = thread + # Test JPEG detection + jpeg_signature = b"\xff\xd8\xff" + mock_base64_jpeg = base64.b64encode(jpeg_signature + b"fake_jpeg_data").decode() - # Second conversation - reuse the thread in a new agent instance - if preserved_thread: - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant with good memory.", - ) as second_agent: - # Reuse the preserved thread - second_response = await second_agent.run("What is my hobby?", thread=preserved_thread) + mock_response_jpeg = MagicMock() + mock_response_jpeg.output_parsed = None + mock_response_jpeg.metadata = {} + mock_response_jpeg.usage = None + mock_response_jpeg.id = "test-id" + mock_response_jpeg.model = "test-model" + mock_response_jpeg.created_at = 1234567890 - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - assert "photography" in second_response.text.lower() + mock_item_jpeg = MagicMock() + mock_item_jpeg.type = "image_generation_call" + mock_item_jpeg.result = mock_base64_jpeg + mock_response_jpeg.output = [mock_item_jpeg] + with patch.object(client, "_get_metadata_from_response", return_value={}): + response_jpeg = client._create_response_content(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore + content_jpeg = response_jpeg.messages[0].contents[0] + assert isinstance(content_jpeg, DataContent) + assert content_jpeg.media_type == "image/jpeg" + assert "data:image/jpeg;base64," in content_jpeg.uri -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_hosted_code_interpreter_tool(): - """Test OpenAI Responses Client agent with HostedCodeInterpreterTool through OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can execute Python code.", - tools=[HostedCodeInterpreterTool()], - ) as agent: - # Test code interpreter functionality - response = await agent.run("Calculate the sum of numbers from 1 to 10 using Python code.") + # Test WEBP detection + webp_signature = b"RIFF" + b"\x00\x00\x00\x00" + b"WEBP" + mock_base64_webp = base64.b64encode(webp_signature + b"fake_webp_data").decode() - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain calculation result (sum of 1-10 = 55) or code execution content - contains_relevant_content = any( - term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] - ) - assert contains_relevant_content or len(response.text.strip()) > 10 + mock_response_webp = MagicMock() + mock_response_webp.output_parsed = None + mock_response_webp.metadata = {} + mock_response_webp.usage = None + mock_response_webp.id = "test-id" + mock_response_webp.model = "test-model" + mock_response_webp.created_at = 1234567890 + mock_item_webp = MagicMock() + mock_item_webp.type = "image_generation_call" + mock_item_webp.result = mock_base64_webp + mock_response_webp.output = [mock_item_webp] -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_raw_image_generation_tool(): - """Test OpenAI Responses Client agent with raw image_generation tool through OpenAIResponsesClient.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can generate images.", - tools=[{"type": "image_generation", "size": "1024x1024", "quality": "low", "format": "png"}], - ) as agent: - # Test image generation functionality - response = await agent.run("Generate an image of a cute red panda sitting on a tree branch in a forest.") + with patch.object(client, "_get_metadata_from_response", return_value={}): + response_webp = client._create_response_content(mock_response_webp, chat_options=ChatOptions()) # type: ignore + content_webp = response_webp.messages[0].contents[0] + assert isinstance(content_webp, DataContent) + assert content_webp.media_type == "image/webp" + assert "data:image/webp;base64," in content_webp.uri - assert isinstance(response, AgentRunResponse) - # For image generation, we expect to get some response content - # This could be DataContent with image data, UriContent - assert response.messages is not None and len(response.messages) > 0 +def test_create_response_content_image_generation_fallback(): + """Test image generation with invalid base64 falls back to PNG.""" + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - # Check that we have some kind of content in the response - total_contents = sum(len(message.contents) for message in response.messages) - assert total_contents > 0, f"Expected some content in response messages, got {total_contents} contents" + # Create a mock response with invalid base64 + mock_response = MagicMock() + mock_response.output_parsed = None + mock_response.metadata = {} + mock_response.usage = None + mock_response.id = "test-response-id" + mock_response.model = "test-model" + mock_response.created_at = 1234567890 - # Verify we got image content - look for DataContent with URI starting with "data:image" - image_content_found = False - for message in response.messages: - for content in message.contents: - uri = getattr(content, "uri", None) - if uri and uri.startswith("data:image"): - image_content_found = True - break - if image_content_found: - break + # Mock image generation output item with unrecognized format (should fall back to PNG) + unrecognized_data = b"UNKNOWN_FORMAT" + b"some_binary_data" + unrecognized_base64 = base64.b64encode(unrecognized_data).decode() + mock_item = MagicMock() + mock_item.type = "image_generation_call" + mock_item.result = unrecognized_base64 - # The test passes if we got image content (which we did based on the visible base64 output) - assert image_content_found, "Expected to find image content in response" + mock_response.output = [mock_item] + with patch.object(client, "_get_metadata_from_response", return_value={}): + response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_level_tool_persistence(): - """Test that agent-level tools persist across multiple runs with OpenAI Responses Client.""" + # Verify it falls back to PNG format for unrecognized binary data + assert len(response.messages[0].contents) == 1 + content = response.messages[0].contents[0] + assert isinstance(content, DataContent) + assert content.media_type == "image/png" + assert f"data:image/png;base64,{unrecognized_base64}" == content.uri - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that uses available tools.", - tools=[get_weather], # Agent-level tool - ) as agent: - # First run - agent-level tool should be available - first_response = await agent.run("What's the weather like in Chicago?") - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the agent-level weather tool - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) +async def test_prepare_options_store_parameter_handling() -> None: + client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + messages = [ChatMessage(role="user", text="Test message")] - # Second run - agent-level tool should still be available (persistence test) - second_response = await agent.run("What's the weather in Miami?") + test_conversation_id = "test-conversation-123" + chat_options = ChatOptions(store=True, conversation_id=test_conversation_id) + options = await client.prepare_options(messages, chat_options) + assert options["store"] is True + assert options["previous_response_id"] == test_conversation_id - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should use the agent-level weather tool again - assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) + chat_options = ChatOptions(store=False, conversation_id="") + options = await client.prepare_options(messages, chat_options) + assert options["store"] is False + chat_options = ChatOptions(store=None, conversation_id=None) + options = await client.prepare_options(messages, chat_options) + assert options["store"] is False + assert "previous_response_id" not in options -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_run_level_tool_isolation(): - """Test that run-level tools are isolated to specific runs and don't persist with OpenAI Responses Client.""" - # Counter to track how many times the weather tool is called - call_count = 0 + chat_options = ChatOptions() + options = await client.prepare_options(messages, chat_options) + assert options["store"] is False + assert "previous_response_id" not in options - @ai_function - async def get_weather_with_counter(location: Annotated[str, "The location as a city name"]) -> str: - """Get the current weather in a given location.""" - nonlocal call_count - call_count += 1 - return f"The weather in {location} is sunny and 72°F." - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - # First run - use run-level tool - first_response = await agent.run( - "What's the weather like in Chicago?", - tools=[get_weather_with_counter], # Run-level tool - ) +def test_openai_responses_client_with_callable_api_key() -> None: + """Test OpenAIResponsesClient initialization with callable API key.""" - assert isinstance(first_response, AgentRunResponse) - assert first_response.text is not None - # Should use the run-level weather tool (call count should be 1) - assert call_count == 1 - assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) + async def get_api_key() -> str: + return "test-api-key-123" - # Second run - run-level tool should NOT persist (key isolation test) - second_response = await agent.run("What's the weather like in Miami?") + client = OpenAIResponsesClient(model_id="gpt-4o", api_key=get_api_key) - assert isinstance(second_response, AgentRunResponse) - assert second_response.text is not None - # Should NOT use the weather tool since it was only run-level in previous call - # Call count should still be 1 (no additional calls) - assert call_count == 1 + # Verify client was created successfully + assert client.model_id == "gpt-4o" + # OpenAI SDK now manages callable API keys internally + assert client.client is not None @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_chat_options_run_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - ) as agent: - response = await agent.run( - "Provide a brief, helpful response about why the sky blue is.", - max_tokens=600, - model_id="gpt-4o", - user="comprehensive-test-user", - tools=[get_weather], - tool_choice="auto", +async def test_openai_responses_client_response() -> None: + """Test OpenAI chat completion responses.""" + openai_responses_client = OpenAIResponsesClient() + + assert isinstance(openai_responses_client, ChatClientProtocol) + + messages: list[ChatMessage] = [] + messages.append( + ChatMessage( + role="user", + text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " + "Bonded by their love for the natural world and shared curiosity, they uncovered a " + "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " + "of climate change.", ) + ) + messages.append(ChatMessage(role="user", text="who are Emily and David?")) - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 + # Test that the client can be used to get a response + response = await openai_responses_client.get_response(messages=messages) + assert response is not None + assert isinstance(response, ChatResponse) + assert "scientists" in response.text -@pytest.mark.flaky -@skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_chat_options_agent_level() -> None: - """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant.", - max_tokens=100, - temperature=0.7, - top_p=0.9, - seed=123, - user="comprehensive-test-user", - tools=[get_weather], - tool_choice="auto", - ) as agent: - response = await agent.run( - "Provide a brief, helpful response.", - ) + messages.clear() + messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 + # Test that the client can be used to get a response + response = await openai_responses_client.get_response( + messages=messages, + response_format=OutputStruct, + ) + + assert response is not None + assert isinstance(response, ChatResponse) + output = response.value + assert output is not None, "Response value is None" + assert "seattle" in output.location.lower() + assert output.weather is not None @pytest.mark.flaky @skip_if_openai_integration_tests_disabled -async def test_openai_responses_client_agent_hosted_mcp_tool() -> None: - """Integration test for HostedMCPTool with OpenAI Response Agent using Microsoft Learn MCP.""" +async def test_openai_responses_client_response_tools() -> None: + """Test OpenAI chat completion responses.""" + openai_responses_client = OpenAIResponsesClient() - mcp_tool = HostedMCPTool( - name="Microsoft Learn MCP", - url="https://learn.microsoft.com/api/mcp", - description="A Microsoft Learn MCP server for documentation questions", - approval_mode="never_require", - ) + assert isinstance(openai_responses_client, ChatClientProtocol) - async with ChatAgent( - chat_client=OpenAIResponsesClient(), - instructions="You are a helpful assistant that can help with microsoft documentation questions.", - tools=[mcp_tool], - ) as agent: - response = await agent.run( - "How to create an Azure storage account using az cli?", - max_tokens=200, - ) + messages: list[ChatMessage] = [] + messages.append(ChatMessage(role="user", text="What is the weather in New York?")) - assert isinstance(response, AgentRunResponse) - assert response.text is not None - assert len(response.text) > 0 - # Should contain Azure-related content since it's asking about Azure CLI - assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) + # Test that the client can be used to get a response + response = await openai_responses_client.get_response( + messages=messages, + tools=[get_weather], + tool_choice="auto", + ) + assert response is not None + assert isinstance(response, ChatResponse) + assert "sunny" in response.text.lower() -def test_service_response_exception_includes_original_error_details() -> None: - """Test that ServiceResponseException messages include original error details in the new format.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="test message")] + messages.clear() + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - mock_response = MagicMock() - original_error_message = "Request rate limit exceeded" - mock_error = BadRequestError( - message=original_error_message, - response=mock_response, - body={"error": {"code": "rate_limit", "message": original_error_message}}, + # Test that the client can be used to get a response + response = await openai_responses_client.get_response( + messages=messages, + tools=[get_weather], + tool_choice="auto", + response_format=OutputStruct, ) - mock_error.code = "rate_limit" - - with ( - patch.object(client.client.responses, "parse", side_effect=mock_error), - pytest.raises(ServiceResponseException) as exc_info, - ): - asyncio.run(client.get_response(messages=messages, response_format=OutputStruct)) - exception_message = str(exc_info.value) - assert "service failed to complete the prompt:" in exception_message - assert original_error_message in exception_message + assert response is not None + assert isinstance(response, ChatResponse) + output = OutputStruct.model_validate_json(response.text) + assert "seattle" in output.location.lower() + assert "sunny" in output.weather.lower() -def test_get_streaming_response_with_response_format() -> None: - """Test get_streaming_response with response_format.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="Test streaming with format")] +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_streaming() -> None: + """Test OpenAI chat completion responses.""" + openai_responses_client = OpenAIResponsesClient() - # It will fail due to invalid API key, but exercises the code path - with pytest.raises(ServiceResponseException): + assert isinstance(openai_responses_client, ChatClientProtocol) - async def run_streaming(): - async for _ in client.get_streaming_response(messages=messages, response_format=OutputStruct): - pass + messages: list[ChatMessage] = [] + messages.append( + ChatMessage( + role="user", + text="Emily and David, two passionate scientists, met during a research expedition to Antarctica. " + "Bonded by their love for the natural world and shared curiosity, they uncovered a " + "groundbreaking phenomenon in glaciology that could potentially reshape our understanding " + "of climate change.", + ) + ) + messages.append(ChatMessage(role="user", text="who are Emily and David?")) - asyncio.run(run_streaming()) + # Test that the client can be used to get a response + response = await ChatResponse.from_chat_response_generator( + openai_responses_client.get_streaming_response(messages=messages) + ) + assert "scientists" in response.text -def test_openai_content_parser_image_content() -> None: - """Test _openai_content_parser with image content variations.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + messages.clear() + messages.append(ChatMessage(role="user", text="The weather in Seattle is sunny")) + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - # Test image content with detail parameter and file_id - image_content_with_detail = UriContent( - uri="https://example.com/image.jpg", - media_type="image/jpeg", - additional_properties={"detail": "high", "file_id": "file_123"}, + response = openai_responses_client.get_streaming_response( + messages=messages, + response_format=OutputStruct, ) - result = client._openai_content_parser(Role.USER, image_content_with_detail, {}) # type: ignore - assert result["type"] == "input_image" - assert result["image_url"] == "https://example.com/image.jpg" - assert result["detail"] == "high" - assert result["file_id"] == "file_123" + chunks = [] + async for chunk in response: + assert chunk is not None + assert isinstance(chunk, ChatResponseUpdate) + chunks.append(chunk) + full_message = ChatResponse.from_chat_response_updates(chunks, output_format_type=OutputStruct) + output = full_message.value + assert output is not None, "Response value is None" + assert "seattle" in output.location.lower() + assert output.weather is not None - # Test image content without additional properties (defaults) - image_content_basic = UriContent(uri="https://example.com/basic.png", media_type="image/png") - result = client._openai_content_parser(Role.USER, image_content_basic, {}) # type: ignore - assert result["type"] == "input_image" - assert result["detail"] == "auto" - assert result["file_id"] is None +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_streaming_tools() -> None: + """Test OpenAI chat completion responses.""" + openai_responses_client = OpenAIResponsesClient() -def test_openai_content_parser_audio_content() -> None: - """Test _openai_content_parser with audio content variations.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + assert isinstance(openai_responses_client, ChatClientProtocol) - # Test WAV audio content - wav_content = UriContent(uri="data:audio/wav;base64,abc123", media_type="audio/wav") - result = client._openai_content_parser(Role.USER, wav_content, {}) # type: ignore - assert result["type"] == "input_audio" - assert result["input_audio"]["data"] == "data:audio/wav;base64,abc123" - assert result["input_audio"]["format"] == "wav" + messages: list[ChatMessage] = [ChatMessage(role="user", text="What is the weather in Seattle?")] - # Test MP3 audio content - mp3_content = UriContent(uri="data:audio/mp3;base64,def456", media_type="audio/mp3") - result = client._openai_content_parser(Role.USER, mp3_content, {}) # type: ignore - assert result["type"] == "input_audio" - assert result["input_audio"]["format"] == "mp3" + # Test that the client can be used to get a response + response = openai_responses_client.get_streaming_response( + messages=messages, + tools=[get_weather], + tool_choice="auto", + ) + full_message: str = "" + async for chunk in response: + assert chunk is not None + assert isinstance(chunk, ChatResponseUpdate) + for content in chunk.contents: + if isinstance(content, TextContent) and content.text: + full_message += content.text + assert "sunny" in full_message.lower() -def test_openai_content_parser_unsupported_content() -> None: - """Test _openai_content_parser with unsupported content types.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + messages.clear() + messages.append(ChatMessage(role="user", text="What is the weather in Seattle?")) - # Test unsupported audio format - unsupported_audio = UriContent(uri="data:audio/ogg;base64,ghi789", media_type="audio/ogg") - result = client._openai_content_parser(Role.USER, unsupported_audio, {}) # type: ignore - assert result == {} + response = openai_responses_client.get_streaming_response( + messages=messages, + tools=[get_weather], + tool_choice="auto", + response_format=OutputStruct, + ) + chunks = [] + async for chunk in response: + assert chunk is not None + assert isinstance(chunk, ChatResponseUpdate) + chunks.append(chunk) - # Test non-media content - text_uri_content = UriContent(uri="https://example.com/document.txt", media_type="text/plain") - result = client._openai_content_parser(Role.USER, text_uri_content, {}) # type: ignore - assert result == {} + full_message = ChatResponse.from_chat_response_updates(chunks, output_format_type=OutputStruct) + output = full_message.value + assert output is not None, "Response value is None" + assert "seattle" in output.location.lower() + assert "sunny" in output.weather.lower() -def test_create_streaming_response_content_code_interpreter() -> None: - """Test _create_streaming_response_content with code_interpreter_call.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_web_search() -> None: + openai_responses_client = OpenAIResponsesClient() - mock_event_image = MagicMock() - mock_event_image.type = "response.output_item.added" - mock_item_image = MagicMock() - mock_item_image.type = "code_interpreter_call" - mock_image_output = MagicMock() - mock_image_output.type = "image" - mock_image_output.url = "https://example.com/plot.png" - mock_item_image.outputs = [mock_image_output] - mock_item_image.code = None - mock_event_image.item = mock_item_image + assert isinstance(openai_responses_client, ChatClientProtocol) - result = client._create_streaming_response_content(mock_event_image, chat_options, function_call_ids) # type: ignore - assert len(result.contents) == 1 - assert isinstance(result.contents[0], UriContent) - assert result.contents[0].uri == "https://example.com/plot.png" - assert result.contents[0].media_type == "image" + # Test that the client will use the web search tool + response = await openai_responses_client.get_response( + messages=[ + ChatMessage( + role="user", + text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + ) + ], + tools=[HostedWebSearchTool()], + tool_choice="auto", + ) + assert response is not None + assert isinstance(response, ChatResponse) + assert "Rumi" in response.text + assert "Mira" in response.text + assert "Zoey" in response.text -def test_create_streaming_response_content_reasoning() -> None: - """Test _create_streaming_response_content with reasoning content.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} + # Test that the client will use the web search tool with location + additional_properties = { + "user_location": { + "country": "US", + "city": "Seattle", + } + } + response = await openai_responses_client.get_response( + messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], + tools=[HostedWebSearchTool(additional_properties=additional_properties)], + tool_choice="auto", + ) + assert response.text is not None - mock_event_reasoning = MagicMock() - mock_event_reasoning.type = "response.output_item.added" - mock_item_reasoning = MagicMock() - mock_item_reasoning.type = "reasoning" - mock_reasoning_content = MagicMock() - mock_reasoning_content.text = "Analyzing the problem step by step..." - mock_item_reasoning.content = [mock_reasoning_content] - mock_item_reasoning.summary = ["Problem analysis summary"] - mock_event_reasoning.item = mock_item_reasoning - result = client._create_streaming_response_content(mock_event_reasoning, chat_options, function_call_ids) # type: ignore - assert len(result.contents) == 1 - assert isinstance(result.contents[0], TextReasoningContent) - assert result.contents[0].text == "Analyzing the problem step by step..." - if result.contents[0].additional_properties: - assert result.contents[0].additional_properties["summary"] == "Problem analysis summary" +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_web_search_streaming() -> None: + openai_responses_client = OpenAIResponsesClient() + assert isinstance(openai_responses_client, ChatClientProtocol) -def test_openai_content_parser_text_reasoning_comprehensive() -> None: - """Test _openai_content_parser with TextReasoningContent all additional properties.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + # Test that the client will use the web search tool + response = openai_responses_client.get_streaming_response( + messages=[ + ChatMessage( + role="user", + text="Who are the main characters of Kpop Demon Hunters? Do a web search to find the answer.", + ) + ], + tools=[HostedWebSearchTool()], + tool_choice="auto", + ) - # Test TextReasoningContent with all additional properties - comprehensive_reasoning = TextReasoningContent( - text="Comprehensive reasoning summary", - additional_properties={ - "status": "in_progress", - "reasoning_text": "Step-by-step analysis", - "encrypted_content": "secure_data_456", - }, + assert response is not None + full_message: str = "" + async for chunk in response: + assert chunk is not None + assert isinstance(chunk, ChatResponseUpdate) + for content in chunk.contents: + if isinstance(content, TextContent) and content.text: + full_message += content.text + assert "Rumi" in full_message + assert "Mira" in full_message + assert "Zoey" in full_message + + # Test that the client will use the web search tool with location + additional_properties = { + "user_location": { + "country": "US", + "city": "Seattle", + } + } + response = openai_responses_client.get_streaming_response( + messages=[ChatMessage(role="user", text="What is the current weather? Do not ask for my current location.")], + tools=[HostedWebSearchTool(additional_properties=additional_properties)], + tool_choice="auto", ) - result = client._openai_content_parser(Role.ASSISTANT, comprehensive_reasoning, {}) # type: ignore - assert result["type"] == "reasoning" - assert result["summary"]["text"] == "Comprehensive reasoning summary" - assert result["status"] == "in_progress" - assert result["content"]["type"] == "reasoning_text" - assert result["content"]["text"] == "Step-by-step analysis" - assert result["encrypted_content"] == "secure_data_456" + assert response is not None + full_message: str = "" + async for chunk in response: + assert chunk is not None + assert isinstance(chunk, ChatResponseUpdate) + for content in chunk.contents: + if isinstance(content, TextContent) and content.text: + full_message += content.text + assert full_message is not None -def test_streaming_reasoning_text_delta_event() -> None: - """Test reasoning text delta event creates TextReasoningContent.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} +@pytest.mark.skip( + reason="Unreliable due to OpenAI vector store indexing potential " + "race condition. See https://github.com/microsoft/agent-framework/issues/1669" +) +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_file_search() -> None: + openai_responses_client = OpenAIResponsesClient() - event = ResponseReasoningTextDeltaEvent( - type="response.reasoning_text.delta", - content_index=0, - item_id="reasoning_123", - output_index=0, - sequence_number=1, - delta="reasoning delta", + assert isinstance(openai_responses_client, ChatClientProtocol) + + file_id, vector_store = await create_vector_store(openai_responses_client) + # Test that the client will use the web search tool + response = await openai_responses_client.get_response( + messages=[ + ChatMessage( + role="user", + text="What is the weather today? Do a file search to find the answer.", + ) + ], + tools=[HostedFileSearchTool(inputs=vector_store)], + tool_choice="auto", ) - with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + await delete_vector_store(openai_responses_client, file_id, vector_store.vector_store_id) + assert "sunny" in response.text.lower() + assert "75" in response.text - assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) - assert response.contents[0].text == "reasoning delta" - assert response.contents[0].raw_representation == event - mock_metadata.assert_called_once_with(event) +@pytest.mark.skip( + reason="Unreliable due to OpenAI vector store indexing " + "potential race condition. See https://github.com/microsoft/agent-framework/issues/1669" +) +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_streaming_file_search() -> None: + openai_responses_client = OpenAIResponsesClient() -def test_streaming_reasoning_text_done_event() -> None: - """Test reasoning text done event creates TextReasoningContent with complete text.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} + assert isinstance(openai_responses_client, ChatClientProtocol) - event = ResponseReasoningTextDoneEvent( - type="response.reasoning_text.done", - content_index=0, - item_id="reasoning_456", - output_index=0, - sequence_number=2, - text="complete reasoning", + file_id, vector_store = await create_vector_store(openai_responses_client) + # Test that the client will use the web search tool + response = openai_responses_client.get_streaming_response( + messages=[ + ChatMessage( + role="user", + text="What is the weather today? Do a file search to find the answer.", + ) + ], + tools=[HostedFileSearchTool(inputs=vector_store)], + tool_choice="auto", + ) + + assert response is not None + full_message: str = "" + async for chunk in response: + assert chunk is not None + assert isinstance(chunk, ChatResponseUpdate) + for content in chunk.contents: + if isinstance(content, TextContent) and content.text: + full_message += content.text + + await delete_vector_store(openai_responses_client, file_id, vector_store.vector_store_id) + + assert "sunny" in full_message.lower() + assert "75" in full_message + + +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_basic_run(): + """Test OpenAI Responses Client agent basic run functionality with OpenAIResponsesClient.""" + agent = OpenAIResponsesClient().create_agent( + instructions="You are a helpful assistant.", ) - with patch.object(client, "_get_metadata_from_response", return_value={"test": "data"}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + # Test basic run + response = await agent.run("Hello! Please respond with 'Hello World' exactly.") - assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) - assert response.contents[0].text == "complete reasoning" - assert response.contents[0].raw_representation == event - mock_metadata.assert_called_once_with(event) - assert response.additional_properties == {"test": "data"} + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + assert "hello world" in response.text.lower() -def test_streaming_reasoning_summary_text_delta_event() -> None: - """Test reasoning summary text delta event creates TextReasoningContent.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_basic_run_streaming(): + """Test OpenAI Responses Client agent basic streaming functionality with OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + ) as agent: + # Test streaming run + full_text = "" + async for chunk in agent.run_stream("Please respond with exactly: 'This is a streaming response test.'"): + assert isinstance(chunk, AgentRunResponseUpdate) + if chunk.text: + full_text += chunk.text - event = ResponseReasoningSummaryTextDeltaEvent( - type="response.reasoning_summary_text.delta", - item_id="summary_789", - output_index=0, - sequence_number=3, - summary_index=0, - delta="summary delta", - ) + assert len(full_text) > 0 + assert "streaming response test" in full_text.lower() - with patch.object(client, "_get_metadata_from_response", return_value={}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore - assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) - assert response.contents[0].text == "summary delta" - assert response.contents[0].raw_representation == event - mock_metadata.assert_called_once_with(event) +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_thread_persistence(): + """Test OpenAI Responses Client agent thread persistence across runs with OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant with good memory.", + ) as agent: + # Create a new thread that will be reused + thread = agent.get_new_thread() + # First interaction + first_response = await agent.run("My favorite programming language is Python. Remember this.", thread=thread) -def test_streaming_reasoning_summary_text_done_event() -> None: - """Test reasoning summary text done event creates TextReasoningContent with complete text.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None - event = ResponseReasoningSummaryTextDoneEvent( - type="response.reasoning_summary_text.done", - item_id="summary_012", - output_index=0, - sequence_number=4, - summary_index=0, - text="complete summary", - ) + # Second interaction - test memory + second_response = await agent.run("What is my favorite programming language?", thread=thread) - with patch.object(client, "_get_metadata_from_response", return_value={"custom": "meta"}) as mock_metadata: - response = client._create_streaming_response_content(event, chat_options, function_call_ids) # type: ignore + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None - assert len(response.contents) == 1 - assert isinstance(response.contents[0], TextReasoningContent) - assert response.contents[0].text == "complete summary" - assert response.contents[0].raw_representation == event - mock_metadata.assert_called_once_with(event) - assert response.additional_properties == {"custom": "meta"} +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_thread_storage_with_store_true(): + """Test OpenAI Responses Client agent with store=True to verify service_thread_id is returned.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + ) as agent: + # Create a new thread + thread = AgentThread() -def test_streaming_reasoning_events_preserve_metadata() -> None: - """Test that reasoning events preserve metadata like regular text events.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - chat_options = ChatOptions() - function_call_ids: dict[int, tuple[str, str]] = {} + # Initially, service_thread_id should be None + assert thread.service_thread_id is None - text_event = ResponseTextDeltaEvent( - type="response.output_text.delta", - content_index=0, - item_id="text_item", - output_index=0, - sequence_number=1, - logprobs=[], - delta="text", - ) + # Run with store=True to store messages on OpenAI side + response = await agent.run( + "Hello! Please remember that my name is Alex.", + thread=thread, + store=True, + ) - reasoning_event = ResponseReasoningTextDeltaEvent( - type="response.reasoning_text.delta", - content_index=0, - item_id="reasoning_item", - output_index=0, - sequence_number=2, - delta="reasoning", - ) + # Validate response + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 - with patch.object(client, "_get_metadata_from_response", return_value={"test": "metadata"}): - text_response = client._create_streaming_response_content(text_event, chat_options, function_call_ids) # type: ignore - reasoning_response = client._create_streaming_response_content(reasoning_event, chat_options, function_call_ids) # type: ignore + # After store=True, service_thread_id should be populated + assert thread.service_thread_id is not None + assert isinstance(thread.service_thread_id, str) + assert len(thread.service_thread_id) > 0 - # Both should preserve metadata - assert text_response.additional_properties == {"test": "metadata"} - assert reasoning_response.additional_properties == {"test": "metadata"} - # Content types should be different - assert isinstance(text_response.contents[0], TextContent) - assert isinstance(reasoning_response.contents[0], TextReasoningContent) +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_existing_thread(): + """Test OpenAI Responses Client agent with existing thread to continue conversations across agent instances.""" + # First conversation - capture the thread + preserved_thread = None + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant with good memory.", + ) as first_agent: + # Start a conversation and capture the thread + thread = first_agent.get_new_thread() + first_response = await first_agent.run("My hobby is photography. Remember this.", thread=thread) -def test_create_response_content_image_generation_raw_base64(): - """Test image generation response parsing with raw base64 string.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None - # Create a mock response with raw base64 image data (PNG signature) - mock_response = MagicMock() - mock_response.output_parsed = None - mock_response.metadata = {} - mock_response.usage = None - mock_response.id = "test-response-id" - mock_response.model = "test-model" - mock_response.created_at = 1234567890 + # Preserve the thread for reuse + preserved_thread = thread - # Mock image generation output item with raw base64 (PNG format) - png_signature = b"\x89PNG\r\n\x1a\n" - mock_base64 = base64.b64encode(png_signature + b"fake_png_data_here").decode() + # Second conversation - reuse the thread in a new agent instance + if preserved_thread: + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant with good memory.", + ) as second_agent: + # Reuse the preserved thread + second_response = await second_agent.run("What is my hobby?", thread=preserved_thread) - mock_item = MagicMock() - mock_item.type = "image_generation_call" - mock_item.result = mock_base64 + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + assert "photography" in second_response.text.lower() - mock_response.output = [mock_item] - with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_hosted_code_interpreter_tool(): + """Test OpenAI Responses Client agent with HostedCodeInterpreterTool through OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can execute Python code.", + tools=[HostedCodeInterpreterTool()], + ) as agent: + # Test code interpreter functionality + response = await agent.run("Calculate the sum of numbers from 1 to 10 using Python code.") - # Verify the response contains DataContent with proper URI and media_type - assert len(response.messages[0].contents) == 1 - content = response.messages[0].contents[0] - assert isinstance(content, DataContent) - assert content.uri.startswith("data:image/png;base64,") - assert content.media_type == "image/png" + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + # Should contain calculation result (sum of 1-10 = 55) or code execution content + contains_relevant_content = any( + term in response.text.lower() for term in ["55", "sum", "code", "python", "calculate", "10"] + ) + assert contains_relevant_content or len(response.text.strip()) > 10 -def test_create_response_content_image_generation_existing_data_uri(): - """Test image generation response parsing with existing data URI.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_raw_image_generation_tool(): + """Test OpenAI Responses Client agent with raw image_generation tool through OpenAIResponsesClient.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can generate images.", + tools=[{"type": "image_generation", "size": "1024x1024", "quality": "low", "format": "png"}], + ) as agent: + # Test image generation functionality + response = await agent.run("Generate an image of a cute red panda sitting on a tree branch in a forest.") - # Create a mock response with existing data URI - mock_response = MagicMock() - mock_response.output_parsed = None - mock_response.metadata = {} - mock_response.usage = None - mock_response.id = "test-response-id" - mock_response.model = "test-model" - mock_response.created_at = 1234567890 + assert isinstance(response, AgentRunResponse) - # Mock image generation output item with existing data URI (valid WEBP header) - webp_signature = b"RIFF" + b"\x12\x00\x00\x00" + b"WEBP" - valid_webp_base64 = base64.b64encode(webp_signature + b"VP8 fake_data").decode() - mock_item = MagicMock() - mock_item.type = "image_generation_call" - mock_item.result = f"data:image/webp;base64,{valid_webp_base64}" + # For image generation, we expect to get some response content + # This could be DataContent with image data, UriContent + assert response.messages is not None and len(response.messages) > 0 - mock_response.output = [mock_item] + # Check that we have some kind of content in the response + total_contents = sum(len(message.contents) for message in response.messages) + assert total_contents > 0, f"Expected some content in response messages, got {total_contents} contents" - with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + # Verify we got image content - look for DataContent with URI starting with "data:image" + image_content_found = False + for message in response.messages: + for content in message.contents: + uri = getattr(content, "uri", None) + if uri and uri.startswith("data:image"): + image_content_found = True + break + if image_content_found: + break - # Verify the response contains DataContent with proper media_type parsed from URI - assert len(response.messages[0].contents) == 1 - content = response.messages[0].contents[0] - assert isinstance(content, DataContent) - assert content.uri == f"data:image/webp;base64,{valid_webp_base64}" - assert content.media_type == "image/webp" + # The test passes if we got image content (which we did based on the visible base64 output) + assert image_content_found, "Expected to find image content in response" -def test_create_response_content_image_generation_format_detection(): - """Test different image format detection from base64 data.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_level_tool_persistence(): + """Test that agent-level tools persist across multiple runs with OpenAI Responses Client.""" - # Test JPEG detection - jpeg_signature = b"\xff\xd8\xff" - mock_base64_jpeg = base64.b64encode(jpeg_signature + b"fake_jpeg_data").decode() + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that uses available tools.", + tools=[get_weather], # Agent-level tool + ) as agent: + # First run - agent-level tool should be available + first_response = await agent.run("What's the weather like in Chicago?") - mock_response_jpeg = MagicMock() - mock_response_jpeg.output_parsed = None - mock_response_jpeg.metadata = {} - mock_response_jpeg.usage = None - mock_response_jpeg.id = "test-id" - mock_response_jpeg.model = "test-model" - mock_response_jpeg.created_at = 1234567890 + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + # Should use the agent-level weather tool + assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) - mock_item_jpeg = MagicMock() - mock_item_jpeg.type = "image_generation_call" - mock_item_jpeg.result = mock_base64_jpeg - mock_response_jpeg.output = [mock_item_jpeg] + # Second run - agent-level tool should still be available (persistence test) + second_response = await agent.run("What's the weather in Miami?") - with patch.object(client, "_get_metadata_from_response", return_value={}): - response_jpeg = client._create_response_content(mock_response_jpeg, chat_options=ChatOptions()) # type: ignore - content_jpeg = response_jpeg.messages[0].contents[0] - assert isinstance(content_jpeg, DataContent) - assert content_jpeg.media_type == "image/jpeg" - assert "data:image/jpeg;base64," in content_jpeg.uri + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + # Should use the agent-level weather tool again + assert any(term in second_response.text.lower() for term in ["miami", "sunny", "72"]) - # Test WEBP detection - webp_signature = b"RIFF" + b"\x00\x00\x00\x00" + b"WEBP" - mock_base64_webp = base64.b64encode(webp_signature + b"fake_webp_data").decode() - mock_response_webp = MagicMock() - mock_response_webp.output_parsed = None - mock_response_webp.metadata = {} - mock_response_webp.usage = None - mock_response_webp.id = "test-id" - mock_response_webp.model = "test-model" - mock_response_webp.created_at = 1234567890 +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_run_level_tool_isolation(): + """Test that run-level tools are isolated to specific runs and don't persist with OpenAI Responses Client.""" + # Counter to track how many times the weather tool is called + call_count = 0 - mock_item_webp = MagicMock() - mock_item_webp.type = "image_generation_call" - mock_item_webp.result = mock_base64_webp - mock_response_webp.output = [mock_item_webp] + @ai_function + async def get_weather_with_counter(location: Annotated[str, "The location as a city name"]) -> str: + """Get the current weather in a given location.""" + nonlocal call_count + call_count += 1 + return f"The weather in {location} is sunny and 72°F." - with patch.object(client, "_get_metadata_from_response", return_value={}): - response_webp = client._create_response_content(mock_response_webp, chat_options=ChatOptions()) # type: ignore - content_webp = response_webp.messages[0].contents[0] - assert isinstance(content_webp, DataContent) - assert content_webp.media_type == "image/webp" - assert "data:image/webp;base64," in content_webp.uri + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + ) as agent: + # First run - use run-level tool + first_response = await agent.run( + "What's the weather like in Chicago?", + tools=[get_weather_with_counter], # Run-level tool + ) + assert isinstance(first_response, AgentRunResponse) + assert first_response.text is not None + # Should use the run-level weather tool (call count should be 1) + assert call_count == 1 + assert any(term in first_response.text.lower() for term in ["chicago", "sunny", "72"]) -def test_create_response_content_image_generation_fallback(): - """Test image generation with invalid base64 falls back to PNG.""" - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") + # Second run - run-level tool should NOT persist (key isolation test) + second_response = await agent.run("What's the weather like in Miami?") - # Create a mock response with invalid base64 - mock_response = MagicMock() - mock_response.output_parsed = None - mock_response.metadata = {} - mock_response.usage = None - mock_response.id = "test-response-id" - mock_response.model = "test-model" - mock_response.created_at = 1234567890 + assert isinstance(second_response, AgentRunResponse) + assert second_response.text is not None + # Should NOT use the weather tool since it was only run-level in previous call + # Call count should still be 1 (no additional calls) + assert call_count == 1 - # Mock image generation output item with unrecognized format (should fall back to PNG) - unrecognized_data = b"UNKNOWN_FORMAT" + b"some_binary_data" - unrecognized_base64 = base64.b64encode(unrecognized_data).decode() - mock_item = MagicMock() - mock_item.type = "image_generation_call" - mock_item.result = unrecognized_base64 - mock_response.output = [mock_item] +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_chat_options_run_level() -> None: + """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + ) as agent: + response = await agent.run( + "Provide a brief, helpful response about why the sky blue is.", + max_tokens=600, + model_id="gpt-4o", + user="comprehensive-test-user", + tools=[get_weather], + tool_choice="auto", + ) - with patch.object(client, "_get_metadata_from_response", return_value={}): - response = client._create_response_content(mock_response, chat_options=ChatOptions()) # type: ignore + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 - # Verify it falls back to PNG format for unrecognized binary data - assert len(response.messages[0].contents) == 1 - content = response.messages[0].contents[0] - assert isinstance(content, DataContent) - assert content.media_type == "image/png" - assert f"data:image/png;base64,{unrecognized_base64}" == content.uri +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_chat_options_agent_level() -> None: + """Integration test for comprehensive ChatOptions parameter coverage with OpenAI Response Agent.""" + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant.", + max_tokens=100, + temperature=0.7, + top_p=0.9, + seed=123, + user="comprehensive-test-user", + tools=[get_weather], + tool_choice="auto", + ) as agent: + response = await agent.run( + "Provide a brief, helpful response.", + ) -async def test_prepare_options_store_parameter_handling() -> None: - client = OpenAIResponsesClient(model_id="test-model", api_key="test-key") - messages = [ChatMessage(role="user", text="Test message")] + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 - test_conversation_id = "test-conversation-123" - chat_options = ChatOptions(store=True, conversation_id=test_conversation_id) - options = await client.prepare_options(messages, chat_options) # type: ignore - assert options["store"] is True - assert options["previous_response_id"] == test_conversation_id - chat_options = ChatOptions(store=False, conversation_id="") - options = await client.prepare_options(messages, chat_options) # type: ignore - assert options["store"] is False +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_hosted_mcp_tool() -> None: + """Integration test for HostedMCPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - chat_options = ChatOptions(store=None, conversation_id=None) - options = await client.prepare_options(messages, chat_options) # type: ignore - assert options["store"] is False - assert "previous_response_id" not in options + mcp_tool = HostedMCPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + description="A Microsoft Learn MCP server for documentation questions", + approval_mode="never_require", + ) - chat_options = ChatOptions() - options = await client.prepare_options(messages, chat_options) # type: ignore - assert options["store"] is False - assert "previous_response_id" not in options + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can help with microsoft documentation questions.", + tools=[mcp_tool], + ) as agent: + response = await agent.run( + "How to create an Azure storage account using az cli?", + max_tokens=200, + ) + + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + # Should contain Azure-related content since it's asking about Azure CLI + assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) -def test_openai_responses_client_with_callable_api_key() -> None: - """Test OpenAIResponsesClient initialization with callable API key.""" +@pytest.mark.flaky +@skip_if_openai_integration_tests_disabled +async def test_openai_responses_client_agent_local_mcp_tool() -> None: + """Integration test for MCPStreamableHTTPTool with OpenAI Response Agent using Microsoft Learn MCP.""" - async def get_api_key() -> str: - return "test-api-key-123" + mcp_tool = MCPStreamableHTTPTool( + name="Microsoft Learn MCP", + url="https://learn.microsoft.com/api/mcp", + ) - client = OpenAIResponsesClient(model_id="gpt-4o", api_key=get_api_key) + async with ChatAgent( + chat_client=OpenAIResponsesClient(), + instructions="You are a helpful assistant that can help with microsoft documentation questions.", + tools=[mcp_tool], + ) as agent: + response = await agent.run( + "How to create an Azure storage account using az cli?", + max_tokens=200, + ) - # Verify client was created successfully - assert client.model_id == "gpt-4o" - # OpenAI SDK now manages callable API keys internally - assert client.client is not None + assert isinstance(response, AgentRunResponse) + assert response.text is not None + assert len(response.text) > 0 + # Should contain Azure-related content since it's asking about Azure CLI + assert any(term in response.text.lower() for term in ["azure", "storage", "account", "cli"]) diff --git a/python/packages/core/tests/workflow/test_agent_executor.py b/python/packages/core/tests/workflow/test_agent_executor.py index 3bda2fcaad..77fd969f12 100644 --- a/python/packages/core/tests/workflow/test_agent_executor.py +++ b/python/packages/core/tests/workflow/test_agent_executor.py @@ -111,6 +111,10 @@ async def test_agent_executor_checkpoint_stores_and_restores_state() -> None: chat_store_state = thread_state["chat_message_store_state"] # type: ignore[index] assert "messages" in chat_store_state, "Message store state should include messages" + # Verify checkpoint contains pending requests from agents and responses to be sent + assert "pending_agent_requests" in executor_state + assert "pending_responses_to_agent" in executor_state + # Create a new agent and executor for restoration # This simulates starting from a fresh state and restoring from checkpoint restored_agent = _CountingAgent(id="test_agent", name="TestAgent") diff --git a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py index 8124f6253d..a7849120b0 100644 --- a/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py +++ b/python/packages/core/tests/workflow/test_agent_executor_tool_calls.py @@ -5,19 +5,32 @@ from collections.abc import AsyncIterable from typing import Any +from typing_extensions import Never + from agent_framework import ( AgentExecutor, + AgentExecutorResponse, AgentRunResponse, AgentRunResponseUpdate, AgentRunUpdateEvent, AgentThread, BaseAgent, + ChatAgent, ChatMessage, + ChatResponse, + ChatResponseUpdate, + FunctionApprovalRequestContent, FunctionCallContent, FunctionResultContent, + RequestInfoEvent, Role, TextContent, WorkflowBuilder, + WorkflowContext, + WorkflowOutputEvent, + ai_function, + executor, + use_function_invocation, ) @@ -120,3 +133,235 @@ async def test_agent_executor_emits_tool_calls_in_streaming_mode() -> None: assert events[3].data is not None assert isinstance(events[3].data.contents[0], TextContent) assert "sunny" in events[3].data.contents[0].text + + +@ai_function(approval_mode="always_require") +def mock_tool_requiring_approval(query: str) -> str: + """Mock tool that requires approval before execution.""" + return f"Executed tool with query: {query}" + + +@use_function_invocation +class MockChatClient: + """Simple implementation of a chat client.""" + + def __init__(self, parallel_request: bool = False) -> None: + self.additional_properties: dict[str, Any] = {} + self._iteration: int = 0 + self._parallel_request: bool = parallel_request + + async def get_response( + self, + messages: str | ChatMessage | list[str] | list[ChatMessage], + **kwargs: Any, + ) -> ChatResponse: + if self._iteration == 0: + if self._parallel_request: + response = ChatResponse( + messages=ChatMessage( + role="assistant", + contents=[ + FunctionCallContent( + call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' + ), + FunctionCallContent( + call_id="2", name="mock_tool_requiring_approval", arguments='{"query": "test"}' + ), + ], + ) + ) + else: + response = ChatResponse( + messages=ChatMessage( + role="assistant", + contents=[ + FunctionCallContent( + call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' + ) + ], + ) + ) + else: + response = ChatResponse(messages=ChatMessage(role="assistant", text="Tool executed successfully.")) + + self._iteration += 1 + return response + + async def get_streaming_response( + self, + messages: str | ChatMessage | list[str] | list[ChatMessage], + **kwargs: Any, + ) -> AsyncIterable[ChatResponseUpdate]: + if self._iteration == 0: + if self._parallel_request: + yield ChatResponseUpdate( + contents=[ + FunctionCallContent( + call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' + ), + FunctionCallContent( + call_id="2", name="mock_tool_requiring_approval", arguments='{"query": "test"}' + ), + ], + role="assistant", + ) + else: + yield ChatResponseUpdate( + contents=[ + FunctionCallContent( + call_id="1", name="mock_tool_requiring_approval", arguments='{"query": "test"}' + ) + ], + role="assistant", + ) + else: + yield ChatResponseUpdate(text=TextContent(text="Tool executed "), role="assistant") + yield ChatResponseUpdate(contents=[TextContent(text="successfully.")], role="assistant") + + self._iteration += 1 + + +@executor(id="test_executor") +async def test_executor(agent_executor_response: AgentExecutorResponse, ctx: WorkflowContext[Never, str]) -> None: + await ctx.yield_output(agent_executor_response.agent_run_response.text) + + +async def test_agent_executor_tool_call_with_approval() -> None: + """Test that AgentExecutor handles tool calls requiring approval.""" + # Arrange + agent = ChatAgent( + chat_client=MockChatClient(), + name="ApprovalAgent", + tools=[mock_tool_requiring_approval], + ) + + workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() + + # Act + events = await workflow.run("Invoke tool requiring approval") + + # Assert + assert len(events.get_request_info_events()) == 1 + approval_request = events.get_request_info_events()[0] + assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.function_call.name == "mock_tool_requiring_approval" + assert approval_request.data.function_call.arguments == '{"query": "test"}' + + # Act + events = await workflow.send_responses({approval_request.request_id: approval_request.data.create_response(True)}) + + # Assert + final_response = events.get_outputs() + assert len(final_response) == 1 + assert final_response[0] == "Tool executed successfully." + + +async def test_agent_executor_tool_call_with_approval_streaming() -> None: + """Test that AgentExecutor handles tool calls requiring approval in streaming mode.""" + # Arrange + agent = ChatAgent( + chat_client=MockChatClient(), + name="ApprovalAgent", + tools=[mock_tool_requiring_approval], + ) + + workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() + + # Act + request_info_events: list[RequestInfoEvent] = [] + async for event in workflow.run_stream("Invoke tool requiring approval"): + if isinstance(event, RequestInfoEvent): + request_info_events.append(event) + + # Assert + assert len(request_info_events) == 1 + approval_request = request_info_events[0] + assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.function_call.name == "mock_tool_requiring_approval" + assert approval_request.data.function_call.arguments == '{"query": "test"}' + + # Act + output: str | None = None + async for event in workflow.send_responses_streaming({ + approval_request.request_id: approval_request.data.create_response(True) + }): + if isinstance(event, WorkflowOutputEvent): + output = event.data + + # Assert + assert output is not None + assert output == "Tool executed successfully." + + +async def test_agent_executor_parallel_tool_call_with_approval() -> None: + """Test that AgentExecutor handles parallel tool calls requiring approval.""" + # Arrange + agent = ChatAgent( + chat_client=MockChatClient(parallel_request=True), + name="ApprovalAgent", + tools=[mock_tool_requiring_approval], + ) + + workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() + + # Act + events = await workflow.run("Invoke tool requiring approval") + + # Assert + assert len(events.get_request_info_events()) == 2 + for approval_request in events.get_request_info_events(): + assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.function_call.name == "mock_tool_requiring_approval" + assert approval_request.data.function_call.arguments == '{"query": "test"}' + + # Act + responses = { + approval_request.request_id: approval_request.data.create_response(True) # type: ignore + for approval_request in events.get_request_info_events() + } + events = await workflow.send_responses(responses) + + # Assert + final_response = events.get_outputs() + assert len(final_response) == 1 + assert final_response[0] == "Tool executed successfully." + + +async def test_agent_executor_parallel_tool_call_with_approval_streaming() -> None: + """Test that AgentExecutor handles parallel tool calls requiring approval in streaming mode.""" + # Arrange + agent = ChatAgent( + chat_client=MockChatClient(parallel_request=True), + name="ApprovalAgent", + tools=[mock_tool_requiring_approval], + ) + + workflow = WorkflowBuilder().set_start_executor(agent).add_edge(agent, test_executor).build() + + # Act + request_info_events: list[RequestInfoEvent] = [] + async for event in workflow.run_stream("Invoke tool requiring approval"): + if isinstance(event, RequestInfoEvent): + request_info_events.append(event) + + # Assert + assert len(request_info_events) == 2 + for approval_request in request_info_events: + assert isinstance(approval_request.data, FunctionApprovalRequestContent) + assert approval_request.data.function_call.name == "mock_tool_requiring_approval" + assert approval_request.data.function_call.arguments == '{"query": "test"}' + + # Act + responses = { + approval_request.request_id: approval_request.data.create_response(True) # type: ignore + for approval_request in request_info_events + } + + output: str | None = None + async for event in workflow.send_responses_streaming(responses): + if isinstance(event, WorkflowOutputEvent): + output = event.data + + # Assert + assert output is not None + assert output == "Tool executed successfully." diff --git a/python/packages/core/tests/workflow/test_handoff.py b/python/packages/core/tests/workflow/test_handoff.py index 44a6403c6f..a799fb6f73 100644 --- a/python/packages/core/tests/workflow/test_handoff.py +++ b/python/packages/core/tests/workflow/test_handoff.py @@ -23,7 +23,7 @@ WorkflowOutputEvent, ) from agent_framework._mcp import MCPTool -from agent_framework._workflows._handoff import _clone_chat_agent +from agent_framework._workflows._handoff import _clone_chat_agent # type: ignore[reportPrivateUsage] @dataclass @@ -392,12 +392,218 @@ def sample_function() -> str: ) assert hasattr(original_agent, "_local_mcp_tools") - assert len(original_agent._local_mcp_tools) == 1 - assert original_agent._local_mcp_tools[0] == mock_mcp_tool + assert len(original_agent._local_mcp_tools) == 1 # type: ignore[reportPrivateUsage] + assert original_agent._local_mcp_tools[0] == mock_mcp_tool # type: ignore[reportPrivateUsage] cloned_agent = _clone_chat_agent(original_agent) assert hasattr(cloned_agent, "_local_mcp_tools") - assert len(cloned_agent._local_mcp_tools) == 1 - assert cloned_agent._local_mcp_tools[0] == mock_mcp_tool + assert len(cloned_agent._local_mcp_tools) == 1 # type: ignore[reportPrivateUsage] + assert cloned_agent._local_mcp_tools[0] == mock_mcp_tool # type: ignore[reportPrivateUsage] + assert cloned_agent.chat_options.tools is not None assert len(cloned_agent.chat_options.tools) == 1 + + +async def test_return_to_previous_routing(): + """Test that return-to-previous routes back to the current specialist handling the conversation.""" + triage = _RecordingAgent(name="triage", handoff_to="specialist_a") + specialist_a = _RecordingAgent(name="specialist_a", handoff_to="specialist_b") + specialist_b = _RecordingAgent(name="specialist_b") + + workflow = ( + HandoffBuilder(participants=[triage, specialist_a, specialist_b]) + .set_coordinator(triage) + .add_handoff(triage, [specialist_a, specialist_b]) + .add_handoff(specialist_a, specialist_b) + .enable_return_to_previous(True) + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == Role.USER) >= 4) + .build() + ) + + # Start conversation - triage hands off to specialist_a + events = await _drain(workflow.run_stream("Initial request")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + assert len(specialist_a.calls) > 0 + + # Specialist_a should have been called with initial request + initial_specialist_a_calls = len(specialist_a.calls) + + # Second user message - specialist_a hands off to specialist_b + events = await _drain(workflow.send_responses_streaming({requests[-1].request_id: "Need more help"})) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + # Specialist_b should have been called + assert len(specialist_b.calls) > 0 + initial_specialist_b_calls = len(specialist_b.calls) + + # Third user message - with return_to_previous, should route back to specialist_b (current agent) + events = await _drain(workflow.send_responses_streaming({requests[-1].request_id: "Follow up question"})) + third_requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + + # Specialist_b should have been called again (return-to-previous routes to current agent) + assert len(specialist_b.calls) > initial_specialist_b_calls, ( + "Specialist B should be called again due to return-to-previous routing to current agent" + ) + + # Specialist_a should NOT be called again (it's no longer the current agent) + assert len(specialist_a.calls) == initial_specialist_a_calls, ( + "Specialist A should not be called again - specialist_b is the current agent" + ) + + # Triage should only have been called once at the start + assert len(triage.calls) == 1, "Triage should only be called once (initial routing)" + + # Verify awaiting_agent_id is set to specialist_b (the agent that just responded) + if third_requests: + user_input_req = third_requests[-1].data + assert isinstance(user_input_req, HandoffUserInputRequest) + assert user_input_req.awaiting_agent_id == "specialist_b", ( + f"Expected awaiting_agent_id 'specialist_b' but got '{user_input_req.awaiting_agent_id}'" + ) + + +async def test_return_to_previous_disabled_routes_to_coordinator(): + """Test that with return-to-previous disabled, routing goes back to coordinator.""" + triage = _RecordingAgent(name="triage", handoff_to="specialist_a") + specialist_a = _RecordingAgent(name="specialist_a", handoff_to="specialist_b") + specialist_b = _RecordingAgent(name="specialist_b") + + workflow = ( + HandoffBuilder(participants=[triage, specialist_a, specialist_b]) + .set_coordinator(triage) + .add_handoff(triage, [specialist_a, specialist_b]) + .add_handoff(specialist_a, specialist_b) + .enable_return_to_previous(False) + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == Role.USER) >= 3) + .build() + ) + + # Start conversation - triage hands off to specialist_a + events = await _drain(workflow.run_stream("Initial request")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + assert len(triage.calls) == 1 + + # Second user message - specialist_a hands off to specialist_b + events = await _drain(workflow.send_responses_streaming({requests[-1].request_id: "Need more help"})) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + # Third user message - without return_to_previous, should route back to triage + await _drain(workflow.send_responses_streaming({requests[-1].request_id: "Follow up question"})) + + # Triage should have been called twice total: initial + after specialist_b responds + assert len(triage.calls) == 2, "Triage should be called twice (initial + default routing to coordinator)" + + +async def test_return_to_previous_enabled(): + """Verify that enable_return_to_previous() keeps control with the current specialist.""" + triage = _RecordingAgent(name="triage", handoff_to="specialist_a") + specialist_a = _RecordingAgent(name="specialist_a") + specialist_b = _RecordingAgent(name="specialist_b") + + workflow = ( + HandoffBuilder(participants=[triage, specialist_a, specialist_b]) + .set_coordinator("triage") + .enable_return_to_previous(True) + .with_termination_condition(lambda conv: sum(1 for m in conv if m.role == Role.USER) >= 3) + .build() + ) + + # Start conversation - triage hands off to specialist_a + events = await _drain(workflow.run_stream("Initial request")) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + assert len(triage.calls) == 1 + assert len(specialist_a.calls) == 1 + + # Second user message - with return_to_previous, should route to specialist_a (not triage) + events = await _drain(workflow.send_responses_streaming({requests[-1].request_id: "Follow up question"})) + requests = [ev for ev in events if isinstance(ev, RequestInfoEvent)] + assert requests + + # Triage should only have been called once (initial) - specialist_a handles follow-up + assert len(triage.calls) == 1, "Triage should only be called once (initial)" + assert len(specialist_a.calls) == 2, "Specialist A should handle follow-up with return_to_previous enabled" + + +async def test_tool_choice_preserved_from_agent_config(): + """Verify that agent-level tool_choice configuration is preserved and not overridden.""" + from unittest.mock import AsyncMock + + from agent_framework import ChatResponse, ToolMode + + # Create a mock chat client that records the tool_choice used + recorded_tool_choices: list[Any] = [] + + async def mock_get_response(messages: Any, **kwargs: Any) -> ChatResponse: + chat_options = kwargs.get("chat_options") + if chat_options: + recorded_tool_choices.append(chat_options.tool_choice) + return ChatResponse( + messages=[ChatMessage(role=Role.ASSISTANT, text="Response")], + response_id="test_response", + ) + + mock_client = MagicMock() + mock_client.get_response = AsyncMock(side_effect=mock_get_response) + + # Create agent with specific tool_choice configuration + agent = ChatAgent( + chat_client=mock_client, + name="test_agent", + tool_choice=ToolMode(mode="required"), # type: ignore[arg-type] + ) + + # Run the agent + await agent.run("Test message") + + # Verify tool_choice was preserved + assert len(recorded_tool_choices) > 0, "No tool_choice recorded" + last_tool_choice = recorded_tool_choices[-1] + assert last_tool_choice is not None, "tool_choice should not be None" + assert str(last_tool_choice) == "required", f"Expected 'required', got {last_tool_choice}" + + +async def test_return_to_previous_state_serialization(): + """Test that return_to_previous state is properly serialized/deserialized for checkpointing.""" + from agent_framework._workflows._handoff import _HandoffCoordinator # type: ignore[reportPrivateUsage] + + # Create a coordinator with return_to_previous enabled + coordinator = _HandoffCoordinator( + starting_agent_id="triage", + specialist_ids={"specialist_a": "specialist_a", "specialist_b": "specialist_b"}, + input_gateway_id="gateway", + termination_condition=lambda conv: False, + id="test-coordinator", + return_to_previous=True, + ) + + # Set the current agent (simulating a handoff scenario) + coordinator._current_agent_id = "specialist_a" # type: ignore[reportPrivateUsage] + + # Snapshot the state + state = coordinator.snapshot_state() + + # Verify pattern metadata includes current_agent_id + assert "metadata" in state + assert "current_agent_id" in state["metadata"] + assert state["metadata"]["current_agent_id"] == "specialist_a" + + # Create a new coordinator and restore state + coordinator2 = _HandoffCoordinator( + starting_agent_id="triage", + specialist_ids={"specialist_a": "specialist_a", "specialist_b": "specialist_b"}, + input_gateway_id="gateway", + termination_condition=lambda conv: False, + id="test-coordinator", + return_to_previous=True, + ) + + # Restore state + coordinator2.restore_state(state) + + # Verify current_agent_id was restored + assert coordinator2._current_agent_id == "specialist_a", "Current agent should be restored from checkpoint" # type: ignore[reportPrivateUsage] diff --git a/python/packages/devui/README.md b/python/packages/devui/README.md index 30e807e341..05fe813276 100644 --- a/python/packages/devui/README.md +++ b/python/packages/devui/README.md @@ -62,6 +62,27 @@ serve(entities=[agent]) MCP tools use lazy initialization and connect automatically on first use. DevUI attempts to clean up connections on shutdown +## Resource Cleanup + +Register cleanup hooks to properly close credentials and resources on shutdown: + +```python +from azure.identity.aio import DefaultAzureCredential +from agent_framework import ChatAgent +from agent_framework.azure import AzureOpenAIChatClient +from agent_framework_devui import register_cleanup, serve + +credential = DefaultAzureCredential() +client = AzureOpenAIChatClient() +agent = ChatAgent(name="MyAgent", chat_client=client) + +# Register cleanup hook - credential will be closed on shutdown +register_cleanup(agent, credential.close) +serve(entities=[agent]) +``` + +Works with multiple resources and file-based discovery. See tests for more examples. + ## Directory Structure For your agents to be discovered by the DevUI, they must be organized in a directory structure like below. Each agent/workflow must have an `__init__.py` that exports the required variable (`agent` or `workflow`). @@ -91,15 +112,15 @@ devui ./agents --tracing framework ## OpenAI-Compatible API -For convenience, DevUI provides an OpenAI Responses backend API. This means you can run the backend and also use the OpenAI client sdk to connect to it. Use **agent/workflow name as the model**, and set streaming to `True` as needed. +For convenience, DevUI provides an OpenAI Responses backend API. This means you can run the backend and also use the OpenAI client sdk to connect to it. Use **agent/workflow name as the entity_id in metadata**, and set streaming to `True` as needed. ```bash -# Simple - use your entity name as the model +# Simple - use your entity name as the entity_id in metadata curl -X POST http://localhost:8080/v1/responses \ -H "Content-Type: application/json" \ -d @- << 'EOF' { - "model": "weather_agent", + "metadata": {"entity_id": "weather_agent"}, "input": "Hello world" } ``` @@ -115,7 +136,7 @@ client = OpenAI( ) response = client.responses.create( - model="weather_agent", # Your agent/workflow name + metadata={"entity_id": "weather_agent"}, # Your agent/workflow name input="What's the weather in Seattle?" ) @@ -136,13 +157,13 @@ conversation = client.conversations.create( # Use it across multiple turns response1 = client.responses.create( - model="weather_agent", + metadata={"entity_id": "weather_agent"}, input="What's the weather in Seattle?", conversation=conversation.id ) response2 = client.responses.create( - model="weather_agent", + metadata={"entity_id": "weather_agent"}, input="How about tomorrow?", conversation=conversation.id # Continues the conversation! ) @@ -150,6 +171,22 @@ response2 = client.responses.create( **How it works:** DevUI automatically retrieves the conversation's message history from the stored thread and passes it to the agent. You don't need to manually manage message history - just provide the same `conversation` ID for follow-up requests. +### OpenAI Proxy Mode + +DevUI provides an **OpenAI Proxy** feature for testing OpenAI models directly through the interface without creating custom agents. Enable via Settings → OpenAI Proxy tab. + +**How it works:** The UI sends requests to the DevUI backend (with `X-Proxy-Backend: openai` header), which then proxies them to OpenAI's Responses API (and Conversations API for multi-turn chats). This proxy approach keeps your `OPENAI_API_KEY` secure on the server—never exposed in the browser or client-side code. + +**Example:** + +```bash +curl -X POST http://localhost:8080/v1/responses \ + -H "X-Proxy-Backend: openai" \ + -d '{"model": "gpt-4.1-mini", "input": "Hello"}' +``` + +**Note:** Requires `OPENAI_API_KEY` environment variable configured on the backend. + ## CLI Options ```bash @@ -162,6 +199,21 @@ Options: --config YAML config file --tracing none|framework|workflow|all --reload Enable auto-reload + --mode developer|user (default: developer) + --auth Enable Bearer token authentication +``` + +### UI Modes + +- **developer** (default): Full access - debug panel, entity details, hot reload, deployment +- **user**: Simplified UI with restricted APIs - only chat and conversation management + +```bash +# Development +devui ./agents + +# Production (user-facing) +devui ./agents --mode user --auth ``` ## Key Endpoints @@ -187,18 +239,23 @@ Given that DevUI offers an OpenAI Responses API, it internally maps messages and | `response.function_result.complete` | `FunctionResultContent` | DevUI | | `response.function_approval.requested` | `FunctionApprovalRequestContent` | DevUI | | `response.function_approval.responded` | `FunctionApprovalResponseContent` | DevUI | +| `response.output_item.added` (ResponseOutputImage) | `DataContent` (images) | DevUI | +| `response.output_item.added` (ResponseOutputFile) | `DataContent` (files) | DevUI | +| `response.output_item.added` (ResponseOutputData) | `DataContent` (other) | DevUI | +| `response.output_item.added` (ResponseOutputImage/File) | `UriContent` (images/files) | DevUI | | `error` | `ErrorContent` | OpenAI | | Final `Response.usage` field (not streamed) | `UsageContent` | OpenAI | | | **Workflow Events** | | | `response.output_item.added` (ExecutorActionItem)* | `ExecutorInvokedEvent` | OpenAI | | `response.output_item.done` (ExecutorActionItem)* | `ExecutorCompletedEvent` | OpenAI | | `response.output_item.done` (ExecutorActionItem with error)* | `ExecutorFailedEvent` | OpenAI | +| `response.output_item.added` (ResponseOutputMessage) | `WorkflowOutputEvent` | OpenAI | | `response.workflow_event.complete` | `WorkflowEvent` (other) | DevUI | | `response.trace.complete` | `WorkflowStatusEvent` | DevUI | | `response.trace.complete` | `WorkflowWarningEvent` | DevUI | | | **Trace Content** | | -| `response.trace.complete` | `DataContent` | DevUI | -| `response.trace.complete` | `UriContent` | DevUI | +| `response.trace.complete` | `DataContent` (no data/errors) | DevUI | +| `response.trace.complete` | `UriContent` (unsupported MIME) | DevUI | | `response.trace.complete` | `HostedFileContent` | DevUI | | `response.trace.complete` | `HostedVectorStoreContent` | DevUI | @@ -213,15 +270,19 @@ DevUI follows the OpenAI Responses API specification for maximum compatibility: **OpenAI Standard Event Types Used:** -- `ResponseOutputItemAddedEvent` - Output item notifications (function calls and results) +- `ResponseOutputItemAddedEvent` - Output item notifications (function calls, images, files, data) - `ResponseOutputItemDoneEvent` - Output item completion notifications - `Response.usage` - Token usage (in final response, not streamed) -- All standard text, reasoning, and function call events **Custom DevUI Extensions:** +- `response.output_item.added` with custom item types: + - `ResponseOutputImage` - Agent-generated images (inline display) + - `ResponseOutputFile` - Agent-generated files (inline display) + - `ResponseOutputData` - Agent-generated structured data (inline display) - `response.function_approval.requested` - Function approval requests (for interactive approval workflows) - `response.function_approval.responded` - Function approval responses (user approval/rejection) +- `response.function_result.complete` - Server-side function execution results - `response.workflow_event.complete` - Agent Framework workflow events - `response.trace.complete` - Execution traces and internal content (DataContent, UriContent, hosted files/stores) @@ -254,18 +315,28 @@ These custom extensions are clearly namespaced and can be safely ignored by stan ## Security -DevUI is designed as a **sample application for local development** and should not be exposed to untrusted networks or used in production environments. +DevUI is designed as a **sample application for local development** and should not be exposed to untrusted networks without proper authentication. + +**For production deployments:** + +```bash +# User mode with authentication (recommended) +devui ./agents --mode user --auth --host 0.0.0.0 +``` + +This restricts developer APIs (reload, deployment, entity details) and requires Bearer token authentication. **Security features:** +- User mode restricts developer-facing APIs +- Optional Bearer token authentication via `--auth` - Only loads entities from local directories or in-memory registration - No remote code execution capabilities - Binds to localhost (127.0.0.1) by default -- All samples must be manually downloaded and reviewed before running **Best practices:** -- Never expose DevUI to the internet +- Use `--mode user --auth` for any deployment exposed to end users - Review all agent/workflow code before running - Only load entities from trusted sources - Use `.env` files for sensitive credentials (never commit them) diff --git a/python/packages/devui/agent_framework_devui/__init__.py b/python/packages/devui/agent_framework_devui/__init__.py index c259b33ae8..45d1ea8c2d 100644 --- a/python/packages/devui/agent_framework_devui/__init__.py +++ b/python/packages/devui/agent_framework_devui/__init__.py @@ -5,20 +5,87 @@ import importlib.metadata import logging import webbrowser +from collections.abc import Callable from typing import Any +from ._conversations import CheckpointConversationManager from ._server import DevServer from .models import AgentFrameworkRequest, OpenAIError, OpenAIResponse, ResponseStreamEvent from .models._discovery_models import DiscoveryResponse, EntityInfo, EnvVarRequirement logger = logging.getLogger(__name__) +# Module-level cleanup registry (before serve() is called) +_cleanup_registry: dict[int, list[Callable[[], Any]]] = {} + try: __version__ = importlib.metadata.version(__name__) except importlib.metadata.PackageNotFoundError: __version__ = "0.0.0" # Fallback for development mode +def register_cleanup(entity: Any, *hooks: Callable[[], Any]) -> None: + """Register cleanup hook(s) for an entity. + + Cleanup hooks execute during DevUI server shutdown, before entity + clients are closed. Supports both synchronous and asynchronous callables. + + Args: + entity: Agent, workflow, or other entity object + *hooks: One or more cleanup callables (sync or async) + + Raises: + ValueError: If no hooks provided + + Examples: + Single cleanup hook: + >>> from agent_framework.devui import serve, register_cleanup + >>> credential = DefaultAzureCredential() + >>> agent = ChatAgent(...) + >>> register_cleanup(agent, credential.close) + >>> serve(entities=[agent]) + + Multiple cleanup hooks: + >>> register_cleanup(agent, credential.close, session.close, db_pool.close) + + Works with file-based discovery: + >>> # In agents/my_agent/agent.py + >>> from agent_framework.devui import register_cleanup + >>> credential = DefaultAzureCredential() + >>> agent = ChatAgent(...) + >>> register_cleanup(agent, credential.close) + >>> # Run: devui ./agents + """ + if not hooks: + raise ValueError("At least one cleanup hook required") + + # Use id() to track entity identity (works across modules) + entity_id = id(entity) + + if entity_id not in _cleanup_registry: + _cleanup_registry[entity_id] = [] + + _cleanup_registry[entity_id].extend(hooks) + + logger.debug( + f"Registered {len(hooks)} cleanup hook(s) for {type(entity).__name__} " + f"(id: {entity_id}, total: {len(_cleanup_registry[entity_id])})" + ) + + +def _get_registered_cleanup_hooks(entity: Any) -> list[Callable[[], Any]]: + """Get cleanup hooks registered for an entity (internal use). + + Args: + entity: Entity object to get hooks for + + Returns: + List of cleanup hooks registered for the entity + """ + entity_id = id(entity) + return _cleanup_registry.get(entity_id, []) + + def serve( entities: list[Any] | None = None, entities_dir: str | None = None, @@ -28,6 +95,9 @@ def serve( cors_origins: list[str] | None = None, ui_enabled: bool = True, tracing_enabled: bool = False, + mode: str = "developer", + auth_enabled: bool = False, + auth_token: str | None = None, ) -> None: """Launch Agent Framework DevUI with simple API. @@ -40,6 +110,9 @@ def serve( cors_origins: List of allowed CORS origins ui_enabled: Whether to enable the UI tracing_enabled: Whether to enable OpenTelemetry tracing + mode: Server mode - 'developer' (full access, verbose errors) or 'user' (restricted APIs, generic errors) + auth_enabled: Whether to enable Bearer token authentication + auth_token: Custom authentication token (auto-generated if not provided with auth_enabled=True) """ import re @@ -53,6 +126,52 @@ def serve( if not isinstance(port, int) or not (1 <= port <= 65535): raise ValueError(f"Invalid port: {port}. Must be integer between 1 and 65535") + # Security check: Warn if network-exposed without authentication + if host not in ("127.0.0.1", "localhost") and not auth_enabled: + logger.warning("⚠️ WARNING: Exposing DevUI to network without authentication!") + logger.warning("⚠️ This is INSECURE - anyone on your network can access your agents") + logger.warning("💡 For network exposure, add --auth flag: devui --host 0.0.0.0 --auth") + + # Handle authentication configuration + if auth_enabled: + import os + import secrets + + # Check if token is in environment variable first + if not auth_token: + auth_token = os.environ.get("DEVUI_AUTH_TOKEN") + + # Auto-generate token if STILL not provided + if not auth_token: + # Check if we're in a production-like environment + is_production = ( + host not in ("127.0.0.1", "localhost") # Exposed to network + or os.environ.get("CI") == "true" # Running in CI + or os.environ.get("KUBERNETES_SERVICE_HOST") # Running in k8s + ) + + if is_production: + # REFUSE to start without explicit token + logger.error("❌ Authentication enabled but no token provided") + logger.error("❌ Auto-generated tokens are NOT secure for network-exposed deployments") + logger.error("💡 Set token: export DEVUI_AUTH_TOKEN=") + logger.error("💡 Or pass: serve(entities=[...], auth_token='your-token')") + raise ValueError("DEVUI_AUTH_TOKEN required when host is not localhost") + + # Development mode: auto-generate and show + auth_token = secrets.token_urlsafe(32) + logger.info("🔒 Authentication enabled with auto-generated token") + logger.info("\n" + "=" * 70) + logger.info("🔑 DEV TOKEN (localhost only, shown once):") + logger.info(f" {auth_token}") + logger.info("=" * 70 + "\n") + else: + logger.info("🔒 Authentication enabled with provided token") + + # Set environment variable for server to use + os.environ["AUTH_REQUIRED"] = "true" + os.environ["DEVUI_AUTH_TOKEN"] = auth_token + # Configure tracing environment variables if enabled if tracing_enabled: import os @@ -72,7 +191,12 @@ def serve( # Create server with direct parameters server = DevServer( - entities_dir=entities_dir, port=port, host=host, cors_origins=cors_origins, ui_enabled=ui_enabled + entities_dir=entities_dir, + port=port, + host=host, + cors_origins=cors_origins, + ui_enabled=ui_enabled, + mode=mode, ) # Register in-memory entities if provided @@ -139,6 +263,7 @@ def main() -> None: # Export main public API __all__ = [ "AgentFrameworkRequest", + "CheckpointConversationManager", "DevServer", "DiscoveryResponse", "EntityInfo", @@ -147,5 +272,6 @@ def main() -> None: "OpenAIResponse", "ResponseStreamEvent", "main", + "register_cleanup", "serve", ] diff --git a/python/packages/devui/agent_framework_devui/_cli.py b/python/packages/devui/agent_framework_devui/_cli.py index 2a36d0aa98..5bc06ac3c8 100644 --- a/python/packages/devui/agent_framework_devui/_cli.py +++ b/python/packages/devui/agent_framework_devui/_cli.py @@ -55,6 +55,41 @@ def create_cli_parser() -> argparse.ArgumentParser: parser.add_argument("--tracing", action="store_true", help="Enable OpenTelemetry tracing for Agent Framework") + parser.add_argument( + "--mode", + choices=["developer", "user"], + default=None, + help="Server mode - 'developer' (full access, verbose errors) or 'user' (restricted APIs, generic errors)", + ) + + # Add --dev/--no-dev as a convenient alternative to --mode + parser.add_argument( + "--dev", + dest="dev_mode", + action="store_true", + default=None, + help="Enable developer mode (shorthand for --mode developer)", + ) + + parser.add_argument( + "--no-dev", + dest="dev_mode", + action="store_false", + help="Disable developer mode (shorthand for --mode user)", + ) + + parser.add_argument( + "--auth", + action="store_true", + help="Enable authentication via Bearer token (required for deployed environments)", + ) + + parser.add_argument( + "--auth-token", + type=str, + help="Custom authentication token (auto-generated if not provided with --auth)", + ) + parser.add_argument("--version", action="version", version=f"Agent Framework DevUI {get_version()}") return parser @@ -78,26 +113,35 @@ def validate_directory(directory: str) -> str: abs_dir = os.path.abspath(directory) if not os.path.exists(abs_dir): - print(f"❌ Error: Directory '{directory}' does not exist", file=sys.stderr) # noqa: T201 + print(f"Error: Directory '{directory}' does not exist", file=sys.stderr) # noqa: T201 sys.exit(1) if not os.path.isdir(abs_dir): - print(f"❌ Error: '{directory}' is not a directory", file=sys.stderr) # noqa: T201 + print(f"Error: '{directory}' is not a directory", file=sys.stderr) # noqa: T201 sys.exit(1) return abs_dir -def print_startup_info(entities_dir: str, host: str, port: int, ui_enabled: bool, reload: bool) -> None: +def print_startup_info( + entities_dir: str, host: str, port: int, ui_enabled: bool, reload: bool, auth_token: str | None = None +) -> None: """Print startup information.""" - print("🤖 Agent Framework DevUI") # noqa: T201 + print("Agent Framework DevUI") # noqa: T201 print("=" * 50) # noqa: T201 - print(f"📁 Entities directory: {entities_dir}") # noqa: T201 - print(f"🌐 Server URL: http://{host}:{port}") # noqa: T201 - print(f"🎨 UI enabled: {'Yes' if ui_enabled else 'No'}") # noqa: T201 - print(f"🔄 Auto-reload: {'Yes' if reload else 'No'}") # noqa: T201 + print(f"Entities directory: {entities_dir}") # noqa: T201 + print(f"Server URL: http://{host}:{port}") # noqa: T201 + print(f"UI enabled: {'Yes' if ui_enabled else 'No'}") # noqa: T201 + print(f"Auto-reload: {'Yes' if reload else 'No'}") # noqa: T201 + + # Display auth token if authentication is enabled + if auth_token: + print("Authentication: Enabled") # noqa: T201 + print(f"Auth token: {auth_token}") # noqa: T201 + print("💡 Use this token in Authorization: Bearer header") # noqa: T201 + print("=" * 50) # noqa: T201 - print("🔍 Scanning for entities...") # noqa: T201 + print("Scanning for entities...") # noqa: T201 def main() -> None: @@ -114,8 +158,19 @@ def main() -> None: # Extract parameters directly from args ui_enabled = not args.headless - # Print startup info - print_startup_info(entities_dir, args.host, args.port, ui_enabled, args.reload) + # Determine mode from --mode or --dev/--no-dev flags + if args.dev_mode is not None: + # --dev or --no-dev was specified + mode = "developer" if args.dev_mode else "user" + elif args.mode is not None: + # --mode was specified + mode = args.mode + else: + # Default to developer mode + mode = "developer" + + # Print startup info (don't show token - serve() will handle it) + print_startup_info(entities_dir, args.host, args.port, ui_enabled, args.reload, None) # Import and start server try: @@ -128,14 +183,17 @@ def main() -> None: auto_open=not args.no_open, ui_enabled=ui_enabled, tracing_enabled=args.tracing, + mode=mode, + auth_enabled=args.auth, + auth_token=args.auth_token, # Pass through explicit token only ) except KeyboardInterrupt: - print("\n👋 Shutting down Agent Framework DevUI...") # noqa: T201 + print("\nShutting down Agent Framework DevUI...") # noqa: T201 sys.exit(0) except Exception as e: logger.exception("Failed to start server") - print(f"❌ Error: {e}", file=sys.stderr) # noqa: T201 + print(f"Error: {e}", file=sys.stderr) # noqa: T201 sys.exit(1) diff --git a/python/packages/devui/agent_framework_devui/_conversations.py b/python/packages/devui/agent_framework_devui/_conversations.py index 5b892c8f35..9762b55d0e 100644 --- a/python/packages/devui/agent_framework_devui/_conversations.py +++ b/python/packages/devui/agent_framework_devui/_conversations.py @@ -12,6 +12,7 @@ from typing import Any, Literal, cast from agent_framework import AgentThread, ChatMessage +from agent_framework._workflows._checkpoint import InMemoryCheckpointStorage from openai.types.conversations import Conversation, ConversationDeletedResource from openai.types.conversations.conversation_item import ConversationItem from openai.types.conversations.message import Message @@ -26,6 +27,10 @@ # Type alias for OpenAI Message role literals MessageRole = Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"] +# Checkpoint item type constants +CONVERSATION_ITEM_TYPE_CHECKPOINT = "checkpoint" +CONVERSATION_TYPE_CHECKPOINT_CONTAINER = "checkpoint_container" + class ConversationStore(ABC): """Abstract base class for conversation storage. @@ -35,14 +40,17 @@ class ConversationStore(ABC): """ @abstractmethod - def create_conversation(self, metadata: dict[str, str] | None = None) -> Conversation: + def create_conversation( + self, metadata: dict[str, str] | None = None, conversation_id: str | None = None + ) -> Conversation: """Create a new conversation (wraps AgentThread creation). Args: metadata: Optional metadata dict (e.g., {"agent_id": "weather_agent"}) + conversation_id: Optional conversation ID (if None, generates one) Returns: - Conversation object with generated ID + Conversation object with generated or provided ID """ pass @@ -127,7 +135,7 @@ async def list_items( @abstractmethod def get_item(self, conversation_id: str, item_id: str) -> ConversationItem | None: - """Get specific conversation item. + """Get a specific conversation item by ID. Args: conversation_id: Conversation ID @@ -184,17 +192,23 @@ def __init__(self) -> None: # Item index for O(1) lookup: {conversation_id: {item_id: ConversationItem}} self._item_index: dict[str, dict[str, ConversationItem]] = {} - def create_conversation(self, metadata: dict[str, str] | None = None) -> Conversation: - """Create a new conversation with underlying AgentThread.""" - conv_id = f"conv_{uuid.uuid4().hex}" + def create_conversation( + self, metadata: dict[str, str] | None = None, conversation_id: str | None = None + ) -> Conversation: + """Create a new conversation with underlying AgentThread and checkpoint storage.""" + conv_id = conversation_id or f"conv_{uuid.uuid4().hex}" created_at = int(time.time()) # Create AgentThread with default ChatMessageStore thread = AgentThread() + # Create session-scoped checkpoint storage (one per conversation) + checkpoint_storage = InMemoryCheckpointStorage() + self._conversations[conv_id] = { "id": conv_id, "thread": thread, + "checkpoint_storage": checkpoint_storage, # Stored alongside thread "metadata": metadata or {}, "created_at": created_at, "items": [], @@ -424,6 +438,23 @@ async def list_items( # Add function result items items.extend(function_results) + # Include checkpoints from checkpoint storage as conversation items + checkpoint_storage = conv_data.get("checkpoint_storage") + if checkpoint_storage: + # Get all checkpoints for this conversation + checkpoints = await checkpoint_storage.list_checkpoints() + for checkpoint in checkpoints: + # Create a conversation item for each checkpoint + checkpoint_item = { + "id": f"checkpoint_{checkpoint.checkpoint_id}", + "type": "checkpoint", + "checkpoint_id": checkpoint.checkpoint_id, + "workflow_id": checkpoint.workflow_id, + "timestamp": checkpoint.timestamp, + "status": "completed", + } + items.append(cast(ConversationItem, checkpoint_item)) + # Apply pagination if order == "desc": items = items[::-1] @@ -442,12 +473,9 @@ async def list_items( return paginated_items, has_more def get_item(self, conversation_id: str, item_id: str) -> ConversationItem | None: - """Get specific conversation item - O(1) lookup via index.""" - # Use index for O(1) lookup instead of linear search - conv_items = self._item_index.get(conversation_id) - if not conv_items: - return None - + """Get a specific conversation item by ID.""" + # Use the item index for O(1) lookup + conv_items = self._item_index.get(conversation_id, {}) return conv_items.get(item_id) def get_thread(self, conversation_id: str) -> AgentThread | None: @@ -471,3 +499,42 @@ def list_conversations_by_metadata(self, metadata_filter: dict[str, str]) -> lis ) ) return results + + +class CheckpointConversationManager: + """Manages checkpoint storage for workflow sessions - SESSION-SCOPED. + + Simplified architecture: Each conversation has its own InMemoryCheckpointStorage + stored in conv_data["checkpoint_storage"]. This manager just retrieves it. + Session isolation comes from each conversation having a separate storage instance. + """ + + def __init__(self, conversation_store: ConversationStore): + # Runtime validation since we need specific implementation details + if not isinstance(conversation_store, InMemoryConversationStore): + raise TypeError("CheckpointConversationManager currently requires InMemoryConversationStore") + self._store: InMemoryConversationStore = conversation_store + # Keep public reference for backward compatibility with tests + self.conversation_store = conversation_store + + def get_checkpoint_storage(self, conversation_id: str) -> InMemoryCheckpointStorage: + """Get the checkpoint storage for a specific conversation. + + Args: + conversation_id: Conversation ID + + Returns: + InMemoryCheckpointStorage instance for this conversation + + Raises: + ValueError: If conversation not found + """ + # Access internal conversations dict (we know it's InMemoryConversationStore) + conv_data = self._store._conversations.get(conversation_id) + if not conv_data: + raise ValueError(f"Conversation {conversation_id} not found") + + checkpoint_storage = conv_data["checkpoint_storage"] + if not isinstance(checkpoint_storage, InMemoryCheckpointStorage): + raise TypeError(f"Expected InMemoryCheckpointStorage but got {type(checkpoint_storage)}") + return checkpoint_storage diff --git a/python/packages/devui/agent_framework_devui/_deployment.py b/python/packages/devui/agent_framework_devui/_deployment.py new file mode 100644 index 0000000000..e1cf1d5c3d --- /dev/null +++ b/python/packages/devui/agent_framework_devui/_deployment.py @@ -0,0 +1,588 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""Azure Container Apps deployment manager for DevUI entities.""" + +import asyncio +import logging +import re +import secrets +import uuid +from collections.abc import AsyncGenerator +from datetime import datetime, timezone +from pathlib import Path + +from .models._discovery_models import Deployment, DeploymentConfig, DeploymentEvent + +logger = logging.getLogger(__name__) + + +class DeploymentManager: + """Manages entity deployments to Azure Container Apps.""" + + def __init__(self) -> None: + """Initialize deployment manager.""" + self._deployments: dict[str, Deployment] = {} + + async def deploy(self, config: DeploymentConfig, entity_path: Path) -> AsyncGenerator[DeploymentEvent, None]: + """Deploy entity to Azure Container Apps with streaming events. + + Args: + config: Deployment configuration + entity_path: Path to entity directory + + Yields: + DeploymentEvent objects for real-time progress updates + + Raises: + ValueError: If prerequisites not met or deployment fails + """ + deployment_id = str(uuid.uuid4()) + + try: + # Step 1: Validate prerequisites + yield DeploymentEvent( + type="deploy.validating", + message="Checking prerequisites (Azure CLI, Docker, authentication)...", + ) + + await self._validate_prerequisites() + + # Step 2: Generate Dockerfile + yield DeploymentEvent( + type="deploy.dockerfile", + message="Generating Dockerfile with authentication enabled...", + ) + + _ = await self._generate_dockerfile(entity_path, config) + + # Step 3: Generate auth token + yield DeploymentEvent( + type="deploy.token", + message="Generating secure authentication token...", + ) + + auth_token = secrets.token_urlsafe(32) + + # Step 4: Discover existing Container App Environment + yield DeploymentEvent( + type="deploy.environment", + message="Checking for existing Container App Environment...", + ) + + # Step 5: Build and deploy with Azure CLI + yield DeploymentEvent( + type="deploy.building", + message=f"Deploying to Azure Container Apps ({config.region})...", + ) + + # Create a queue for streaming events from subprocess + event_queue: asyncio.Queue[DeploymentEvent] = asyncio.Queue() + + # Run deployment in background task with event queue + deployment_task = asyncio.create_task(self._deploy_to_azure(config, entity_path, auth_token, event_queue)) + + # Stream events from queue while deployment runs + while True: + try: + # Check if deployment task is done + if deployment_task.done(): + # Get the result or exception + deployment_url = await deployment_task + break + + # Get event from queue with short timeout + event = await asyncio.wait_for(event_queue.get(), timeout=0.1) + yield event + except asyncio.TimeoutError: + # No event in queue, continue waiting + continue + + # Step 5: Store deployment record + deployment = Deployment( + id=deployment_id, + entity_id=config.entity_id, + resource_group=config.resource_group, + app_name=config.app_name, + region=config.region, + url=deployment_url, + status="deployed", + created_at=datetime.now(timezone.utc).isoformat(), + ) + self._deployments[deployment_id] = deployment + + # Step 6: Success - return URL and token + yield DeploymentEvent( + type="deploy.completed", + message=f"Deployment successful! URL: {deployment_url}", + url=deployment_url, + auth_token=auth_token, # Shown once to user + ) + + except Exception as e: + error_msg = f"Deployment failed: {e!s}" + logger.exception(error_msg) + + # Store failed deployment + deployment = Deployment( + id=deployment_id, + entity_id=config.entity_id, + resource_group=config.resource_group, + app_name=config.app_name, + region=config.region, + url="", + status="failed", + created_at=datetime.now(timezone.utc).isoformat(), + error=str(e), + ) + self._deployments[deployment_id] = deployment + + yield DeploymentEvent( + type="deploy.failed", + message=error_msg, + ) + + async def _validate_prerequisites(self) -> None: + """Validate that Azure CLI, Docker, authentication, and resource providers are available. + + Raises: + ValueError: If prerequisites not met + """ + # Check Azure CLI + az_check = await asyncio.create_subprocess_exec( + "az", "--version", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + await az_check.communicate() + if az_check.returncode != 0: + raise ValueError( + "Azure CLI not found. Install from: https://learn.microsoft.com/cli/azure/install-azure-cli" + ) + + # Check Docker + docker_check = await asyncio.create_subprocess_exec( + "docker", "--version", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + await docker_check.communicate() + if docker_check.returncode != 0: + raise ValueError("Docker not found. Install from: https://www.docker.com/get-started") + + # Check Azure authentication + az_account_check = await asyncio.create_subprocess_exec( + "az", "account", "show", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + stdout, _ = await az_account_check.communicate() + if az_account_check.returncode != 0: + raise ValueError("Not authenticated with Azure. Run: az login") + + # Check required resource providers are registered + required_providers = ["Microsoft.App", "Microsoft.ContainerRegistry", "Microsoft.OperationalInsights"] + unregistered_providers = [] + + # Get list of registered providers + provider_check = await asyncio.create_subprocess_exec( + "az", + "provider", + "list", + "--query", + "[?registrationState=='Registered'].namespace", + "--output", + "json", + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, _stderr = await provider_check.communicate() + + if provider_check.returncode == 0: + import json + + try: + registered = json.loads(stdout.decode()) + for provider in required_providers: + if provider not in registered: + unregistered_providers.append(provider) + except json.JSONDecodeError: + logger.warning("Could not parse provider list, skipping provider validation") + else: + logger.warning("Could not check provider registration status") + + if unregistered_providers: + commands = [f"az provider register -n {p} --wait" for p in unregistered_providers] + raise ValueError( + f"Required Azure resource providers not registered: {', '.join(unregistered_providers)}\n\n" + f"Register them by running:\n" + "\n".join(commands) + "\n\n" + "This is a one-time setup per Azure subscription." + ) + + logger.info("All prerequisites validated successfully") + + async def _generate_dockerfile(self, entity_path: Path, config: DeploymentConfig) -> Path: + """Generate Dockerfile for entity deployment. + + Args: + entity_path: Path to entity directory + config: Deployment configuration + + Returns: + Path to generated Dockerfile + """ + # Validate ui_mode + if config.ui_mode not in ["user", "developer"]: + raise ValueError(f"Invalid ui_mode: {config.ui_mode}. Must be 'user' or 'developer'.") + + # Check if requirements.txt exists in the entity directory + has_requirements = (entity_path / "requirements.txt").exists() + + requirements_section = "" + if has_requirements: + logger.info(f"Found requirements.txt in {entity_path}, will include in Dockerfile") + requirements_section = """# Install entity dependencies +COPY requirements.txt ./ +RUN pip install -r requirements.txt +""" + else: + logger.info(f"No requirements.txt found in {entity_path}, skipping dependency installation") + + dockerfile_content = f"""FROM python:3.11-slim +WORKDIR /app + +{requirements_section}# Install DevUI from PyPI +RUN pip install agent-framework-devui --pre + +# Copy entity code +COPY . /app/entity/ + +ENV PORT=8080 +EXPOSE 8080 + +# Launch DevUI with auth enabled (token from environment variable) +CMD ["devui", "/app/entity", "--mode", "{config.ui_mode}", "--host", "0.0.0.0", "--port", "8080", "--auth"] +""" + + dockerfile_path = entity_path / "Dockerfile" + + # Warn if Dockerfile already exists + if dockerfile_path.exists(): + logger.warning(f"Dockerfile already exists at {dockerfile_path}, overwriting...") + + dockerfile_path.write_text(dockerfile_content) + logger.info(f"Generated Dockerfile at {dockerfile_path}") + + return dockerfile_path + + async def _discover_container_app_environment(self, resource_group: str, region: str) -> str | None: + """Discover existing Container App Environment in resource group. + + Args: + resource_group: Resource group name + region: Azure region (for filtering if needed) + + Returns: + Environment name if found, None otherwise + """ + cmd = [ + "az", + "containerapp", + "env", + "list", + "--resource-group", + resource_group, + "--query", + "[0].name", + "--output", + "tsv", + ] + + logger.info(f"Discovering existing Container App Environments in {resource_group}...") + + process = await asyncio.create_subprocess_exec( + *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + + stdout, stderr = await process.communicate() + + if process.returncode == 0: + env_name = stdout.decode().strip() + if env_name: + logger.info(f"Found existing environment: {env_name}") + return env_name + logger.info("No existing environments found in resource group") + return None + logger.warning(f"Failed to query environments: {stderr.decode()}") + return None + + async def _deploy_to_azure( + self, config: DeploymentConfig, entity_path: Path, auth_token: str, event_queue: asyncio.Queue[DeploymentEvent] + ) -> str: + """Deploy to Azure Container Apps, reusing existing environments. + + Args: + config: Deployment configuration + entity_path: Path to entity directory + auth_token: Authentication token to inject + event_queue: Queue for streaming progress events + + Returns: + Deployment URL + + Raises: + ValueError: If deployment fails + """ + # Step 1: Try to discover existing Container App Environment + existing_env = await self._discover_container_app_environment(config.resource_group, config.region) + + if existing_env: + # Use existing environment - avoids needing environment creation permissions + logger.info(f"Reusing existing Container App Environment: {existing_env} (cost efficient, no side effects)") + cmd = [ + "az", + "containerapp", + "up", + "--name", + config.app_name, + "--resource-group", + config.resource_group, + "--environment", + existing_env, + "--source", + str(entity_path), + "--env-vars", + f"DEVUI_AUTH_TOKEN={auth_token}", + "--ingress", + "external", + "--target-port", + "8080", + ] + logger.info(f"Creating new Container App '{config.app_name}' in environment '{existing_env}'...") + else: + # No existing environment - try to create one (may fail if no permissions) + logger.warning( + "No existing Container App Environment found. " + "Attempting to create new environment (requires Microsoft.App/managedEnvironments/write permission)..." + ) + cmd = [ + "az", + "containerapp", + "up", + "--name", + config.app_name, + "--resource-group", + config.resource_group, + "--location", + config.region, + "--source", + str(entity_path), + "--env-vars", + f"DEVUI_AUTH_TOKEN={auth_token}", + "--ingress", + "external", + "--target-port", + "8080", + ] + + logger.info(f"Running: {' '.join(cmd)}") + + process = await asyncio.create_subprocess_exec( + *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT + ) + + # Stream output line by line + output_lines = [] + try: + if not process.stdout: + raise ValueError("Failed to capture process output") + + while True: + # Read with timeout + line = await asyncio.wait_for(process.stdout.readline(), timeout=600) + if not line: + break + + line_text = line.decode().strip() + if line_text: + output_lines.append(line_text) + + # Stream meaningful updates to user + if "WARNING:" in line_text: + # Parse and send user-friendly warnings + if "Creating resource group" in line_text: + await event_queue.put( + DeploymentEvent( + type="deploy.progress", + message=f"Creating resource group '{config.resource_group}'...", + ) + ) + elif "Creating ContainerAppEnvironment" in line_text: + await event_queue.put( + DeploymentEvent( + type="deploy.progress", + message="Setting up Container App Environment (this may take 2-3 minutes)...", + ) + ) + elif "Registering resource provider" in line_text: + provider = line_text.split("provider")[-1].strip() + if provider.endswith("..."): + provider = provider[:-3] + await event_queue.put( + DeploymentEvent( + type="deploy.progress", message=f"Registering Azure provider{provider}..." + ) + ) + elif "Creating Azure Container Registry" in line_text: + await event_queue.put( + DeploymentEvent( + type="deploy.progress", message="Creating Container Registry for your images..." + ) + ) + elif "No Log Analytics workspace" in line_text: + await event_queue.put( + DeploymentEvent( + type="deploy.progress", message="Creating Log Analytics workspace for monitoring..." + ) + ) + elif "Building image" in line_text: + await event_queue.put( + DeploymentEvent( + type="deploy.progress", + message="Building Docker image (this may take several minutes)...", + ) + ) + elif "Pushing image" in line_text: + await event_queue.put( + DeploymentEvent( + type="deploy.progress", message="Pushing image to Azure Container Registry..." + ) + ) + elif "Creating Container App" in line_text: + await event_queue.put( + DeploymentEvent(type="deploy.progress", message="Creating your Container App...") + ) + elif "Container app created" in line_text: + await event_queue.put( + DeploymentEvent(type="deploy.progress", message="Container app created successfully!") + ) + elif "ERROR:" in line_text: + # Stream errors immediately + await event_queue.put(DeploymentEvent(type="deploy.error", message=line_text)) + elif "Step" in line_text and "/" in line_text: + # Docker build steps + await event_queue.put( + DeploymentEvent(type="deploy.progress", message=f"Docker build: {line_text}") + ) + elif "https://" in line_text and ".azurecontainerapps.io" in line_text: + # Deployment URL detected + await event_queue.put( + DeploymentEvent(type="deploy.progress", message="Deployment URL generated!") + ) + + # Wait for process to complete + return_code = await process.wait() + + if return_code != 0: + error_output = "\n".join(output_lines[-10:]) # Last 10 lines for context + raise ValueError(f"Azure deployment failed:\n{error_output}") + + except asyncio.TimeoutError as e: + process.kill() + raise ValueError( + "Azure deployment timed out after 10 minutes. Please check Azure portal for status." + ) from e + + # Parse output to extract FQDN + output = "\n".join(output_lines) + logger.debug(f"Azure CLI output: {output}") + + # Extract FQDN from output (az containerapp up returns it) + # Format: https://...azurecontainerapps.io + deployment_url = self._extract_fqdn_from_output(output, config.app_name) + + logger.info(f"Deployment successful: {deployment_url}") + return deployment_url + + def _extract_fqdn_from_output(self, output: str, app_name: str) -> str: + """Extract FQDN from Azure CLI output. + + Args: + output: Azure CLI command output + app_name: Container app name + + Returns: + Full HTTPS URL to deployed app + """ + # Try to find FQDN in output + for line in output.split("\n"): + if "fqdn" in line.lower() or app_name in line: + # Extract URL-like string + match = re.search(r"https?://[\w\-\.]+\.azurecontainerapps\.io", line) + if match: + return match.group(0) + + # If we can't extract FQDN, fail explicitly rather than return a broken URL + logger.error(f"Could not extract FQDN from Azure CLI output. Output:\n{output}") + raise ValueError( + "Could not extract deployment URL from Azure CLI output. " + "The deployment may have succeeded - check the Azure portal for your container app URL." + ) + + async def list_deployments(self, entity_id: str | None = None) -> list[Deployment]: + """List all deployments, optionally filtered by entity. + + Args: + entity_id: Optional entity ID to filter by + + Returns: + List of deployment records + """ + if entity_id: + return [d for d in self._deployments.values() if d.entity_id == entity_id] + return list(self._deployments.values()) + + async def get_deployment(self, deployment_id: str) -> Deployment | None: + """Get deployment by ID. + + Args: + deployment_id: Deployment ID + + Returns: + Deployment record or None if not found + """ + return self._deployments.get(deployment_id) + + async def delete_deployment(self, deployment_id: str) -> None: + """Delete deployment from Azure Container Apps. + + Args: + deployment_id: Deployment ID to delete + + Raises: + ValueError: If deployment not found or deletion fails + """ + deployment = self._deployments.get(deployment_id) + if not deployment: + raise ValueError(f"Deployment {deployment_id} not found") + + # Execute: az containerapp delete + cmd = [ + "az", + "containerapp", + "delete", + "--name", + deployment.app_name, + "--resource-group", + deployment.resource_group, + "--yes", # Skip confirmation + ] + + logger.info(f"Deleting deployment: {' '.join(cmd)}") + + process = await asyncio.create_subprocess_exec( + *cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE + ) + + stdout, stderr = await process.communicate() + + if process.returncode != 0: + error_output = stderr.decode() if stderr else stdout.decode() + raise ValueError(f"Deployment deletion failed: {error_output}") + + # Remove from store + del self._deployments[deployment_id] + logger.info(f"Deployment {deployment_id} deleted successfully") diff --git a/python/packages/devui/agent_framework_devui/_discovery.py b/python/packages/devui/agent_framework_devui/_discovery.py index 213af1f0e1..99265b5d52 100644 --- a/python/packages/devui/agent_framework_devui/_discovery.py +++ b/python/packages/devui/agent_framework_devui/_discovery.py @@ -4,6 +4,7 @@ from __future__ import annotations +import ast import importlib import importlib.util import logging @@ -31,6 +32,7 @@ def __init__(self, entities_dir: str | None = None): self.entities_dir = entities_dir self._entities: dict[str, EntityInfo] = {} self._loaded_objects: dict[str, Any] = {} + self._cleanup_hooks: dict[str, list[Any]] = {} async def discover_entities(self) -> list[EntityInfo]: """Scan for Agent Framework entities. @@ -70,14 +72,15 @@ def get_entity_object(self, entity_id: str) -> Any | None: """ return self._loaded_objects.get(entity_id) - async def load_entity(self, entity_id: str) -> Any: - """Load entity on-demand (lazy loading). + async def load_entity(self, entity_id: str, checkpoint_manager: Any = None) -> Any: + """Load entity on-demand and inject checkpoint storage for workflows. This method implements lazy loading by importing the entity module only when needed. In-memory entities are returned from cache immediately. Args: entity_id: Entity identifier + checkpoint_manager: Optional checkpoint manager for workflow storage injection Returns: Loaded entity object @@ -107,9 +110,13 @@ async def load_entity(self, entity_id: str) -> Any: else: raise ValueError( f"Unsupported entity source: {entity_info.source}. " - f"Only 'directory' and 'in_memory' sources are supported." + f"Only 'directory' and 'in-memory' sources are supported." ) + # Note: Checkpoint storage is now injected at runtime via run_stream() parameter, + # not at load time. This provides cleaner architecture and explicit control flow. + # See _executor.py _execute_workflow() for runtime checkpoint storage injection. + # Enrich metadata with actual entity data # Don't pass entity_type if it's "unknown" - let inference determine the real type enriched_info = await self.create_entity_info_from_object( @@ -122,11 +129,27 @@ async def load_entity(self, entity_id: str) -> Any: # Preserve the original path from sparse metadata if "path" in entity_info.metadata: enriched_info.metadata["path"] = entity_info.metadata["path"] + # Now that we have the path, properly check deployment support + entity_path = Path(entity_info.metadata["path"]) + deployment_supported, deployment_reason = self._check_deployment_support(entity_path, entity_info.source) + enriched_info.deployment_supported = deployment_supported + enriched_info.deployment_reason = deployment_reason enriched_info.metadata["lazy_loaded"] = True self._entities[entity_id] = enriched_info # Cache the loaded object self._loaded_objects[entity_id] = entity_obj + + # Check module-level registry for cleanup hooks + from . import _get_registered_cleanup_hooks + + registered_hooks = _get_registered_cleanup_hooks(entity_obj) + if registered_hooks: + if entity_id not in self._cleanup_hooks: + self._cleanup_hooks[entity_id] = [] + self._cleanup_hooks[entity_id].extend(registered_hooks) + logger.debug(f"Discovered {len(registered_hooks)} registered cleanup hook(s) for: {entity_id}") + logger.info(f"Successfully loaded entity: {entity_id} (type: {enriched_info.type})") return entity_obj @@ -187,6 +210,17 @@ def list_entities(self) -> list[EntityInfo]: """ return list(self._entities.values()) + def get_cleanup_hooks(self, entity_id: str) -> list[Any]: + """Get cleanup hooks registered for an entity. + + Args: + entity_id: Entity identifier + + Returns: + List of cleanup hooks for the entity + """ + return self._cleanup_hooks.get(entity_id, []) + def invalidate_entity(self, entity_id: str) -> None: """Invalidate (clear cache for) an entity to enable hot reload. @@ -239,6 +273,17 @@ def register_entity(self, entity_id: str, entity_info: EntityInfo, entity_object """ self._entities[entity_id] = entity_info self._loaded_objects[entity_id] = entity_object + + # Check module-level registry for cleanup hooks + from . import _get_registered_cleanup_hooks + + registered_hooks = _get_registered_cleanup_hooks(entity_object) + if registered_hooks: + if entity_id not in self._cleanup_hooks: + self._cleanup_hooks[entity_id] = [] + self._cleanup_hooks[entity_id].extend(registered_hooks) + logger.debug(f"Discovered {len(registered_hooks)} registered cleanup hook(s) for: {entity_id}") + logger.debug(f"Registered entity: {entity_id} ({entity_info.type})") async def create_entity_info_from_object( @@ -305,6 +350,17 @@ async def create_entity_info_from_object( elif not has_run_stream and not has_run: logger.warning(f"Agent '{entity_id}' lacks both run() and run_stream() methods. May not work.") + # Check deployment support based on source + # For directory-based entities, we need the path to verify deployment support + deployment_supported = False + deployment_reason = "In-memory entities cannot be deployed (no source directory)" + + if source == "directory": + # Directory-based entity - will be checked properly after enrichment when path is available + # For now, mark as potentially deployable - will be re-evaluated after enrichment + deployment_supported = True + deployment_reason = "Ready for deployment (pending path verification)" + # Create EntityInfo with Agent Framework specifics return EntityInfo( id=entity_id, @@ -321,6 +377,8 @@ async def create_entity_info_from_object( executors=tools_list if entity_type == "workflow" else [], input_schema={"type": "string"}, # Default schema start_executor_id=tools_list[0] if tools_list and entity_type == "workflow" else None, + deployment_supported=deployment_supported, + deployment_reason=deployment_reason, metadata={ "source": "agent_framework_object", "class_name": entity_object.__class__.__name__ @@ -404,6 +462,31 @@ def _detect_entity_type(self, dir_path: Path) -> str: # Has __init__.py but no specific file return "unknown" + def _check_deployment_support(self, entity_path: Path, source: str) -> tuple[bool, str | None]: + """Check if entity can be deployed to Azure Container Apps. + + Args: + entity_path: Path to entity directory or file + source: Entity source ("directory" or "in_memory") + + Returns: + Tuple of (supported, reason) explaining deployment eligibility + """ + # In-memory entities cannot be deployed + if source == "in_memory": + return False, "In-memory entities cannot be deployed (no source directory)" + + # File-based entities need a directory structure for deployment + if not entity_path.is_dir(): + return False, "Only directory-based entities can be deployed" + + # Must have __init__.py + if not (entity_path / "__init__.py").exists(): + return False, "Missing __init__.py file" + + # Passed all checks + return True, "Ready for deployment" + def _register_sparse_entity(self, dir_path: Path) -> None: """Register entity with sparse metadata (no import). @@ -413,6 +496,9 @@ def _register_sparse_entity(self, dir_path: Path) -> None: entity_id = dir_path.name entity_type = self._detect_entity_type(dir_path) + # Check deployment support + deployment_supported, deployment_reason = self._check_deployment_support(dir_path, "directory") + entity_info = EntityInfo( id=entity_id, name=entity_id.replace("_", " ").title(), @@ -421,6 +507,8 @@ def _register_sparse_entity(self, dir_path: Path) -> None: tools=[], # Sparse - will be populated on load description="", # Sparse - will be populated on load source="directory", + deployment_supported=deployment_supported, + deployment_reason=deployment_reason, metadata={ "path": str(dir_path), "discovered": True, @@ -431,14 +519,52 @@ def _register_sparse_entity(self, dir_path: Path) -> None: self._entities[entity_id] = entity_info logger.debug(f"Registered sparse entity: {entity_id} (type: {entity_type})") + def _has_entity_exports(self, file_path: Path) -> bool: + """Check if a Python file has entity exports (agent or workflow) using AST parsing. + + This safely checks for module-level assignments like: + - agent = ChatAgent(...) + - workflow = WorkflowBuilder()... + + Args: + file_path: Python file to check + + Returns: + True if file has 'agent' or 'workflow' exports + """ + try: + # Read and parse the file's AST + source = file_path.read_text(encoding="utf-8") + tree = ast.parse(source, filename=str(file_path)) + + # Look for module-level assignments of 'agent' or 'workflow' + for node in ast.walk(tree): + if isinstance(node, ast.Assign): + for target in node.targets: + if isinstance(target, ast.Name) and target.id in ("agent", "workflow"): + return True + except Exception as e: + logger.debug(f"Could not parse {file_path} for entity exports: {e}") + return False + + return False + def _register_sparse_file_entity(self, file_path: Path) -> None: """Register file-based entity with sparse metadata (no import). Args: file_path: Entity Python file """ + # Check if file has valid entity exports using AST parsing + if not self._has_entity_exports(file_path): + logger.debug(f"Skipping {file_path.name} - no 'agent' or 'workflow' exports found") + return + entity_id = file_path.stem + # Check deployment support (file-based entities cannot be deployed) + deployment_supported, deployment_reason = self._check_deployment_support(file_path, "directory") + # File-based entities are typically agents, but we can't know for sure without importing entity_info = EntityInfo( id=entity_id, @@ -448,6 +574,8 @@ def _register_sparse_file_entity(self, file_path: Path) -> None: tools=[], description="", source="directory", + deployment_supported=deployment_supported, + deployment_reason=deployment_reason, metadata={ "path": str(file_path), "discovered": True, diff --git a/python/packages/devui/agent_framework_devui/_executor.py b/python/packages/devui/agent_framework_devui/_executor.py index 16d57c94b5..f1ca1c6a62 100644 --- a/python/packages/devui/agent_framework_devui/_executor.py +++ b/python/packages/devui/agent_framework_devui/_executor.py @@ -9,6 +9,7 @@ from typing import Any from agent_framework import AgentProtocol +from agent_framework._workflows._events import RequestInfoEvent from ._conversations import ConversationStore, InMemoryConversationStore from ._discovery import EntityDiscovery @@ -50,6 +51,11 @@ def __init__( # Use provided conversation store or default to in-memory self.conversation_store = conversation_store or InMemoryConversationStore() + # Create checkpoint manager (wraps conversation store) + from ._conversations import CheckpointConversationManager + + self.checkpoint_manager = CheckpointConversationManager(self.conversation_store) + def _setup_tracing_provider(self) -> None: """Set up our own TracerProvider so we can add processors.""" try: @@ -79,10 +85,20 @@ def _setup_agent_framework_tracing(self) -> None: # Configure Agent Framework tracing only if ENABLE_OTEL is set if os.environ.get("ENABLE_OTEL"): try: - from agent_framework.observability import setup_observability - - setup_observability(enable_sensitive_data=True) - logger.info("Enabled Agent Framework observability") + from agent_framework.observability import OBSERVABILITY_SETTINGS, setup_observability + + # Only configure if not already executed + if not OBSERVABILITY_SETTINGS._executed_setup: + # Get OTLP endpoint from either custom or standard env var + # This handles the case where env vars are set after ObservabilitySettings was imported + otlp_endpoint = os.environ.get("OTLP_ENDPOINT") or os.environ.get("OTEL_EXPORTER_OTLP_ENDPOINT") + + # Pass the endpoint explicitly to setup_observability + # This ensures OTLP exporters are created even if env vars were set late + setup_observability(enable_sensitive_data=True, otlp_endpoint=otlp_endpoint) + logger.info("Enabled Agent Framework observability") + else: + logger.debug("Agent Framework observability already configured") except Exception as e: logger.warning(f"Failed to enable Agent Framework observability: {e}") else: @@ -173,7 +189,7 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - entity_info = self.get_entity_info(entity_id) # Trigger lazy loading (will return from cache if already loaded) - entity_obj = await self.entity_discovery.load_entity(entity_id) + entity_obj = await self.entity_discovery.load_entity(entity_id, checkpoint_manager=self.checkpoint_manager) if not entity_obj: raise EntityNotFoundError(f"Entity object for '{entity_id}' not found") @@ -190,6 +206,15 @@ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) - yield event elif entity_info.type == "workflow": async for event in self._execute_workflow(entity_obj, request, trace_collector): + # Log RequestInfoEvent for debugging HIL flow + event_class = event.__class__.__name__ if hasattr(event, "__class__") else type(event).__name__ + if event_class == "RequestInfoEvent": + logger.info("🔔 [EXECUTOR] RequestInfoEvent detected from workflow!") + logger.info(f" request_id: {getattr(event, 'request_id', 'N/A')}") + logger.info(f" source_executor_id: {getattr(event, 'source_executor_id', 'N/A')}") + logger.info(f" request_type: {getattr(event, 'request_type', 'N/A')}") + data = getattr(event, "data", None) + logger.info(f" data type: {type(data).__name__ if data else 'None'}") yield event else: raise ValueError(f"Unsupported entity type: {entity_info.type}") @@ -289,7 +314,7 @@ async def _execute_agent( async def _execute_workflow( self, workflow: Any, request: AgentFrameworkRequest, trace_collector: Any ) -> AsyncGenerator[Any, None]: - """Execute Agent Framework workflow with trace collection. + """Execute Agent Framework workflow with checkpoint support via conversation items. Args: workflow: Workflow object to execute @@ -300,23 +325,199 @@ async def _execute_workflow( Workflow events and trace events """ try: - # Get input data directly from request.input field - input_data = request.input - logger.debug(f"Using input field: {type(input_data)}") + entity_id = request.get_entity_id() or "unknown" + + # Get or create session conversation for checkpoint storage + conversation_id = request.get_conversation_id() + if not conversation_id: + # Create default session if not provided + import time + import uuid + + conversation_id = f"session_{entity_id}_{uuid.uuid4().hex[:8]}" + logger.info(f"Created new workflow session: {conversation_id}") + + # Create conversation in store + self.conversation_store.create_conversation( + metadata={ + "entity_id": entity_id, + "type": "workflow_session", + "created_at": str(int(time.time())), + }, + conversation_id=conversation_id, + ) + else: + # Validate conversation exists, create if missing (handles deleted conversations) + import time + + existing = self.conversation_store.get_conversation(conversation_id) + if not existing: + logger.warning(f"Conversation {conversation_id} not found (may have been deleted), recreating") + self.conversation_store.create_conversation( + metadata={ + "entity_id": entity_id, + "type": "workflow_session", + "created_at": str(int(time.time())), + }, + conversation_id=conversation_id, + ) + + # Get session-scoped checkpoint storage (InMemoryCheckpointStorage from conv_data) + # Each conversation has its own storage instance, providing automatic session isolation. + # This storage is passed to workflow.run_stream() which sets it as runtime override, + # ensuring all checkpoint operations (save/load) use THIS conversation's storage. + # The framework guarantees runtime storage takes precedence over build-time storage. + checkpoint_storage = self.checkpoint_manager.get_checkpoint_storage(conversation_id) + + # Check for HIL responses first + hil_responses = self._extract_workflow_hil_responses(request.input) + + # Determine checkpoint_id (explicit or auto-latest for HIL responses) + checkpoint_id = None + if request.extra_body and "checkpoint_id" in request.extra_body: + checkpoint_id = request.extra_body["checkpoint_id"] + logger.debug(f"Using explicit checkpoint_id from request: {checkpoint_id}") + elif hil_responses: + # Only auto-resume from latest checkpoint when we have HIL responses + # Regular "Run" clicks should start fresh, not resume from checkpoints + checkpoints = await checkpoint_storage.list_checkpoints() # No workflow_id filter needed! + if checkpoints: + latest = max(checkpoints, key=lambda cp: cp.timestamp) + checkpoint_id = latest.checkpoint_id + logger.info(f"Auto-resuming from latest checkpoint in session {conversation_id}: {checkpoint_id}") + else: + logger.warning(f"HIL responses received but no checkpoints in session {conversation_id}") + + if hil_responses: + # HIL continuation mode requires checkpointing + if not checkpoint_id: + error_msg = ( + "Cannot process HIL responses without a checkpoint. " + "Workflows using HIL must be configured with .with_checkpointing() " + "and a checkpoint must exist before sending responses." + ) + logger.error(error_msg) + yield {"type": "error", "message": error_msg} + return + + logger.info(f"Resuming workflow with HIL responses for {len(hil_responses)} request(s)") + + # Unwrap primitive responses if they're wrapped in {response: value} format + from ._utils import parse_input_for_type + + unwrapped_responses = {} + for request_id, response_value in hil_responses.items(): + if isinstance(response_value, dict) and "response" in response_value: + response_value = response_value["response"] + unwrapped_responses[request_id] = response_value + + hil_responses = unwrapped_responses + + # NOTE: Two-step approach for stateless HTTP (framework limitation): + # 1. Restore checkpoint to load pending requests into workflow's in-memory state + # 2. Then send responses using send_responses_streaming + # Future: Framework should support run_stream(checkpoint_id, responses) in single call + # (checkpoint_id is guaranteed to exist due to earlier validation) + logger.debug(f"Restoring checkpoint {checkpoint_id} then sending HIL responses") + + try: + # Step 1: Restore checkpoint to populate workflow's in-memory pending requests + restored = False + async for _event in workflow.run_stream( + checkpoint_id=checkpoint_id, checkpoint_storage=checkpoint_storage + ): + restored = True + break # Stop immediately after restoration, don't process events + + if not restored: + raise RuntimeError("Checkpoint restoration did not yield any events") + + # Reset running flags so we can call send_responses_streaming + if hasattr(workflow, "_is_running"): + workflow._is_running = False + if hasattr(workflow, "_runner") and hasattr(workflow._runner, "_running"): + workflow._runner._running = False + + # Extract response types from restored workflow and convert responses to proper types + try: + if hasattr(workflow, "_runner") and hasattr(workflow._runner, "context"): + runner_context = workflow._runner.context + pending_requests_dict = await runner_context.get_pending_request_info_events() + + converted_responses = {} + for request_id, response_value in hil_responses.items(): + if request_id in pending_requests_dict: + pending_request = pending_requests_dict[request_id] + if hasattr(pending_request, "response_type"): + response_type = pending_request.response_type + try: + response_value = parse_input_for_type(response_value, response_type) + logger.debug( + f"Converted HIL response for {request_id} to {type(response_value)}" + ) + except Exception as e: + logger.warning(f"Failed to convert HIL response for {request_id}: {e}") + + converted_responses[request_id] = response_value + + hil_responses = converted_responses + except Exception as e: + logger.warning(f"Could not convert HIL responses to proper types: {e}") + + # Step 2: Now send responses to the in-memory workflow + async for event in workflow.send_responses_streaming(hil_responses): + for trace_event in trace_collector.get_pending_events(): + yield trace_event + yield event - # Parse input based on workflow's expected input type - parsed_input = await self._parse_workflow_input(workflow, input_data) + except (AttributeError, ValueError, RuntimeError) as e: + error_msg = f"Failed to send HIL responses: {e}" + logger.error(error_msg) + yield {"type": "error", "message": error_msg} - logger.debug(f"Executing workflow with parsed input type: {type(parsed_input)}") + elif checkpoint_id: + # Resume from checkpoint (explicit or auto-latest) using unified API + logger.info(f"Resuming workflow from checkpoint {checkpoint_id} in session {conversation_id}") - # Use Agent Framework workflow's native streaming - async for event in workflow.run_stream(parsed_input): - # Yield any pending trace events first - for trace_event in trace_collector.get_pending_events(): - yield trace_event + try: + async for event in workflow.run_stream( + checkpoint_id=checkpoint_id, checkpoint_storage=checkpoint_storage + ): + if isinstance(event, RequestInfoEvent): + self._enrich_request_info_event_with_response_schema(event, workflow) + + for trace_event in trace_collector.get_pending_events(): + yield trace_event - # Then yield the workflow event - yield event + yield event + + # Note: Removed break on RequestInfoEvent - continue yielding all events + # The workflow is already paused by ctx.request_info() in the framework + # DevUI should continue yielding events even during HIL pause + + except ValueError as e: + error_msg = f"Cannot resume from checkpoint: {e}" + logger.error(error_msg) + yield {"type": "error", "message": error_msg} + + else: + # First run - pass DevUI's checkpoint storage to enable checkpointing + logger.info(f"Starting fresh workflow in session {conversation_id}") + + parsed_input = await self._parse_workflow_input(workflow, request.input) + + async for event in workflow.run_stream(parsed_input, checkpoint_storage=checkpoint_storage): + if isinstance(event, RequestInfoEvent): + self._enrich_request_info_event_with_response_schema(event, workflow) + + for trace_event in trace_collector.get_pending_events(): + yield trace_event + + yield event + + # Note: Removed break on RequestInfoEvent - continue yielding all events + # The workflow is already paused by ctx.request_info() in the framework + # DevUI should continue yielding events even during HIL pause except Exception as e: logger.error(f"Error in workflow execution: {e}") @@ -569,6 +770,59 @@ def _get_start_executor_message_types(self, workflow: Any) -> tuple[Any | None, return start_executor, message_types + def _extract_workflow_hil_responses(self, input_data: Any) -> dict[str, Any] | None: + """Extract workflow HIL responses from OpenAI input format. + + Looks for special content type: workflow_hil_response + + Args: + input_data: OpenAI ResponseInputParam + + Returns: + Dict of {request_id: response_value} if found, None otherwise + """ + if not isinstance(input_data, list): + return None + + for item in input_data: + if isinstance(item, dict) and item.get("type") == "message": + message_content = item.get("content", []) + + if isinstance(message_content, list): + for content_item in message_content: + if isinstance(content_item, dict): + content_type = content_item.get("type") + + if content_type == "workflow_hil_response": + # Extract responses dict + # dict.get() returns Any, so we explicitly type it + responses: dict[str, Any] = content_item.get("responses", {}) # type: ignore[assignment] + logger.info(f"Found workflow HIL responses: {list(responses.keys())}") + return responses + + return None + + def _get_or_create_conversation(self, conversation_id: str, entity_id: str) -> Any: + """Get existing conversation or create a new one. + + Args: + conversation_id: Conversation ID from frontend + entity_id: Entity ID (e.g., "spam_workflow") for metadata filtering + + Returns: + Conversation object + """ + conversation = self.conversation_store.get_conversation(conversation_id) + if not conversation: + # Create conversation with frontend's ID + # Use agent_id in metadata so it can be filtered by list_conversations(agent_id=...) + conversation = self.conversation_store.create_conversation( + metadata={"agent_id": entity_id}, conversation_id=conversation_id + ) + logger.info(f"Created conversation {conversation_id} for entity {entity_id}") + + return conversation + def _parse_structured_workflow_input(self, workflow: Any, input_data: dict[str, Any]) -> Any: """Parse structured input data for workflow execution. @@ -644,3 +898,53 @@ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any: except Exception as e: logger.debug(f"Error parsing workflow input: {e}") return raw_input + + def _enrich_request_info_event_with_response_schema(self, event: Any, workflow: Any) -> None: + """Extract response type from workflow executor and attach response schema to RequestInfoEvent. + + Args: + event: RequestInfoEvent to enrich + workflow: Workflow object containing executors + """ + try: + from agent_framework_devui._utils import extract_response_type_from_executor, generate_input_schema + + # Get source executor ID and request type from event + source_executor_id = getattr(event, "source_executor_id", None) + request_type = getattr(event, "request_type", None) + + if not source_executor_id or not request_type: + logger.debug("RequestInfoEvent missing source_executor_id or request_type") + return + + # Find the source executor in the workflow + if not hasattr(workflow, "executors") or not isinstance(workflow.executors, dict): + logger.debug("Workflow doesn't have executors dict") + return + + source_executor = workflow.executors.get(source_executor_id) + if not source_executor: + logger.debug(f"Could not find executor '{source_executor_id}' in workflow") + return + + # Extract response type from the executor's handler signature + response_type = extract_response_type_from_executor(source_executor, request_type) + + if response_type: + # Generate JSON schema for response type + response_schema = generate_input_schema(response_type) + + # Attach response_schema to event for mapper to include in output + event._response_schema = response_schema + + logger.debug(f"Extracted response schema for {request_type.__name__}: {response_schema}") + else: + # Even if extraction fails, provide a reasonable default to avoid warnings + logger.debug( + f"Could not extract response type for {request_type.__name__}, using default string schema" + ) + response_schema = {"type": "string"} + event._response_schema = response_schema + + except Exception as e: + logger.warning(f"Failed to enrich RequestInfoEvent with response schema: {e}") diff --git a/python/packages/devui/agent_framework_devui/_mapper.py b/python/packages/devui/agent_framework_devui/_mapper.py index af127e6d3d..b8d5bf4526 100644 --- a/python/packages/devui/agent_framework_devui/_mapper.py +++ b/python/packages/devui/agent_framework_devui/_mapper.py @@ -34,6 +34,9 @@ ResponseFunctionCallArgumentsDeltaEvent, ResponseFunctionResultComplete, ResponseFunctionToolCall, + ResponseOutputData, + ResponseOutputFile, + ResponseOutputImage, ResponseOutputItemAddedEvent, ResponseOutputMessage, ResponseOutputText, @@ -160,7 +163,7 @@ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> if isinstance(raw_event, ResponseTraceEvent): return [ ResponseTraceEventComplete( - type="response.trace.complete", + type="response.trace.completed", data=raw_event.data, item_id=context["item_id"], sequence_number=self._next_sequence(context), @@ -273,7 +276,7 @@ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrame id=f"resp_{uuid.uuid4().hex[:12]}", object="response", created_at=datetime.now().timestamp(), - model=request.model, + model=request.model or "devui", output=[response_output_message], usage=usage, parallel_tool_calls=False, @@ -338,6 +341,147 @@ def _next_sequence(self, context: dict[str, Any]) -> int: context["sequence_counter"] += 1 return int(context["sequence_counter"]) + def _serialize_value(self, value: Any) -> Any: + """Recursively serialize a value, handling complex nested objects. + + Handles: + - Primitives (str, int, float, bool, None) + - Collections (list, tuple, set, dict) + - SerializationMixin objects (ChatMessage, etc.) - calls to_dict() + - Pydantic models - calls model_dump() + - Dataclasses - recursively serializes with asdict() + - Enums - extracts value + - datetime/date/UUID - converts to ISO string + + Args: + value: Value to serialize + + Returns: + JSON-serializable representation + """ + from dataclasses import is_dataclass + from datetime import date, datetime + from enum import Enum + from uuid import UUID + + # Handle None + if value is None: + return None + + # Handle primitives + if isinstance(value, (str, int, float, bool)): + return value + + # Handle datetime/date - convert to ISO format + if isinstance(value, datetime): + return value.isoformat() + if isinstance(value, date): + return value.isoformat() + + # Handle UUID - convert to string + if isinstance(value, UUID): + return str(value) + + # Handle Enums - extract value + if isinstance(value, Enum): + return value.value + + # Handle lists/tuples/sets - recursively serialize elements + if isinstance(value, (list, tuple)): + return [self._serialize_value(item) for item in value] + if isinstance(value, set): + return [self._serialize_value(item) for item in value] + + # Handle dicts - recursively serialize values + if isinstance(value, dict): + return {k: self._serialize_value(v) for k, v in value.items()} + + # Handle SerializationMixin (like ChatMessage) - call to_dict() + if hasattr(value, "to_dict") and callable(getattr(value, "to_dict", None)): + try: + return value.to_dict() # type: ignore[attr-defined, no-any-return] + except Exception as e: + logger.debug(f"Failed to serialize with to_dict(): {e}") + return str(value) + + # Handle Pydantic models - call model_dump() + if hasattr(value, "model_dump") and callable(getattr(value, "model_dump", None)): + try: + return value.model_dump() # type: ignore[attr-defined, no-any-return] + except Exception as e: + logger.debug(f"Failed to serialize Pydantic model: {e}") + return str(value) + + # Handle dataclasses - recursively serialize with asdict + if is_dataclass(value) and not isinstance(value, type): + try: + from dataclasses import asdict + + # Use our custom serializer as dict_factory + return asdict(value, dict_factory=lambda items: {k: self._serialize_value(v) for k, v in items}) + except Exception as e: + logger.debug(f"Failed to serialize nested dataclass: {e}") + return str(value) + + # Fallback: convert to string (for unknown types) + logger.debug(f"Serializing unknown type {type(value).__name__} as string") + return str(value) + + def _serialize_request_data(self, request_data: Any) -> dict[str, Any]: + """Serialize RequestInfoMessage to dict for JSON transmission. + + Handles nested SerializationMixin objects (like ChatMessage) within dataclasses. + + Args: + request_data: The RequestInfoMessage instance + + Returns: + Serialized dict representation + """ + from dataclasses import asdict, fields, is_dataclass + + if request_data is None: + return {} + + # Handle dict first (most common) + if isinstance(request_data, dict): + return {k: self._serialize_value(v) for k, v in request_data.items()} + + # Handle dataclasses with nested SerializationMixin objects + # We can't use asdict() directly because it doesn't handle ChatMessage + if is_dataclass(request_data) and not isinstance(request_data, type): + try: + # Manually serialize each field to handle nested SerializationMixin + result = {} + for field in fields(request_data): + field_value = getattr(request_data, field.name) + result[field.name] = self._serialize_value(field_value) + return result + except Exception as e: + logger.debug(f"Failed to serialize dataclass fields: {e}") + # Fallback to asdict() if our custom serialization fails + try: + return asdict(request_data) # type: ignore[arg-type] + except Exception as e2: + logger.debug(f"Failed to serialize dataclass with asdict(): {e2}") + + # Handle Pydantic models (have model_dump method) + if hasattr(request_data, "model_dump") and callable(getattr(request_data, "model_dump", None)): + try: + return request_data.model_dump() # type: ignore[attr-defined, no-any-return] + except Exception as e: + logger.debug(f"Failed to serialize Pydantic model: {e}") + + # Handle SerializationMixin (have to_dict method) + if hasattr(request_data, "to_dict") and callable(getattr(request_data, "to_dict", None)): + try: + return request_data.to_dict() # type: ignore[attr-defined, no-any-return] + except Exception as e: + logger.debug(f"Failed to serialize with to_dict(): {e}") + + # Fallback: string representation + return {"raw": str(request_data)} + async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> Sequence[Any]: """Convert agent text updates to proper content part events. @@ -495,8 +639,9 @@ async def _convert_agent_lifecycle_event(self, event: Any, context: dict[str, An from .models._openai_custom import AgentCompletedEvent, AgentFailedEvent, AgentStartedEvent try: - # Get model name from context (the agent name) - model_name = context.get("request", {}).model if context.get("request") else "agent" + # Get model name from request or use 'devui' as default + request_obj = context.get("request") + model_name = request_obj.model if request_obj and request_obj.model else "devui" if isinstance(event, AgentStartedEvent): execution_id = f"agent_{uuid4().hex[:12]}" @@ -603,16 +748,16 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Return proper OpenAI event objects events: list[Any] = [] - # Determine the model name - use request model or default to "workflow" - # The request model will be the agent name for agents, workflow name for workflows - model_name = context.get("request", {}).model if context.get("request") else "workflow" + # Get model name from request or use 'devui' as default + request_obj = context.get("request") + model_name = request_obj.model if request_obj and request_obj.model else "devui" # Create a full Response object with all required fields response_obj = Response( id=f"resp_{workflow_id}", object="response", created_at=float(time.time()), - model=model_name, # Use the actual model/agent name + model=model_name, output=[], # Empty output list initially status="in_progress", # Required fields with safe defaults @@ -637,14 +782,73 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return events - if event_class in ["WorkflowCompletedEvent", "WorkflowOutputEvent"]: + # Handle WorkflowOutputEvent separately to preserve output data + if event_class == "WorkflowOutputEvent": + output_data = getattr(event, "data", None) + source_executor_id = getattr(event, "source_executor_id", "unknown") + + if output_data is not None: + # Import required types + from openai.types.responses import ResponseOutputMessage, ResponseOutputText + from openai.types.responses.response_output_item_added_event import ResponseOutputItemAddedEvent + + # Increment output index for each yield_output + context["output_index"] = context.get("output_index", -1) + 1 + + # Extract text from output data based on type + text = None + if hasattr(output_data, "__class__") and output_data.__class__.__name__ == "ChatMessage": + # Handle ChatMessage (from Magentic and AgentExecutor with output_response=True) + text = getattr(output_data, "text", None) + if not text: + # Fallback to string representation + text = str(output_data) + elif isinstance(output_data, str): + # String output + text = output_data + else: + # Object/dict/list → JSON string + try: + text = json.dumps(output_data, indent=2) + except (TypeError, ValueError): + # Fallback to string representation if not JSON serializable + text = str(output_data) + + # Create output message with text content + text_content = ResponseOutputText(type="output_text", text=text, annotations=[]) + + output_message = ResponseOutputMessage( + type="message", + id=f"msg_{uuid4().hex[:8]}", + role="assistant", + content=[text_content], + status="completed", + ) + + # Emit output_item.added for each yield_output + logger.debug( + f"WorkflowOutputEvent converted to output_item.added " + f"(executor: {source_executor_id}, length: {len(text)})" + ) + return [ + ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=output_message, + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + ] + + # Handle WorkflowCompletedEvent - emit response.completed + if event_class == "WorkflowCompletedEvent": workflow_id = context.get("workflow_id", str(uuid4())) # Import Response type for proper construction from openai.types.responses import Response - # Get model name from context - model_name = context.get("request", {}).model if context.get("request") else "workflow" + # Get model name from request or use 'devui' as default + request_obj = context.get("request") + model_name = request_obj.model if request_obj and request_obj.model else "devui" # Create a full Response object for completed state response_obj = Response( @@ -652,7 +856,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> object="response", created_at=float(time.time()), model=model_name, - output=[], # Output should be populated by this point from text streaming + output=[], # Output items already sent via output_item.added events status="completed", parallel_tool_calls=False, tool_choice="none", @@ -672,8 +876,9 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Import Response and ResponseError types from openai.types.responses import Response, ResponseError - # Get model name from context - model_name = context.get("request", {}).model if context.get("request") else "workflow" + # Get model name from request or use 'devui' as default + request_obj = context.get("request") + model_name = request_obj.model if request_obj and request_obj.model else "devui" # Create error object error_message = str(error_info) if error_info else "Unknown error" @@ -778,8 +983,77 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> ) ] - # Handle informational workflow events (status, warnings, errors) - if event_class in ["WorkflowStatusEvent", "WorkflowWarningEvent", "WorkflowErrorEvent", "RequestInfoEvent"]: + # Handle RequestInfoEvent specially - emit as HIL event with schema + if event_class == "RequestInfoEvent": + from .models._openai_custom import ResponseRequestInfoEvent + + request_id = getattr(event, "request_id", "") + source_executor_id = getattr(event, "source_executor_id", "") + request_type_class = getattr(event, "request_type", None) + request_data = getattr(event, "data", None) + + logger.info("📨 [MAPPER] Processing RequestInfoEvent") + logger.info(f" request_id: {request_id}") + logger.info(f" source_executor_id: {source_executor_id}") + logger.info(f" request_type_class: {request_type_class}") + logger.info(f" request_data: {request_data}") + + # Serialize request data + serialized_data = self._serialize_request_data(request_data) + logger.info(f" serialized_data: {serialized_data}") + + # Get request type name for debugging + request_type_name = "Unknown" + if request_type_class: + request_type_name = f"{request_type_class.__module__}:{request_type_class.__name__}" + + # Get response schema that was attached by executor + # This tells the UI what format to collect from the user + response_schema = getattr(event, "_response_schema", None) + if not response_schema: + # Fallback to string if somehow not set (shouldn't happen with current executor enrichment) + logger.warning(f"⚠️ Response schema not found for {request_type_name}, using default") + response_schema = {"type": "string"} + else: + logger.info(f" response_schema: {response_schema}") + + # Wrap primitive schemas in object for form rendering + # The UI's SchemaFormRenderer expects an object with properties + if response_schema.get("type") in ["string", "integer", "number", "boolean"]: + # Wrap primitive type in object with "response" field + wrapped_schema = { + "type": "object", + "properties": {"response": response_schema}, + "required": ["response"], + } + logger.info(" wrapped primitive schema in object") + else: + wrapped_schema = response_schema + + # Create HIL request event with response schema + hil_event = ResponseRequestInfoEvent( + type="response.request_info.requested", + request_id=request_id, + source_executor_id=source_executor_id, + request_type=request_type_name, + request_data=serialized_data, + request_schema=wrapped_schema, # Send wrapped schema for form rendering + response_schema=response_schema, # Keep original for reference + item_id=context["item_id"], + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + timestamp=datetime.now().isoformat(), + ) + + logger.info("✅ [MAPPER] Created ResponseRequestInfoEvent:") + logger.info(f" type: {hil_event.type}") + logger.info(f" request_id: {hil_event.request_id}") + logger.info(f" sequence_number: {hil_event.sequence_number}") + + return [hil_event] + + # Handle other informational workflow events (status, warnings, errors) + if event_class in ["WorkflowStatusEvent", "WorkflowWarningEvent", "WorkflowErrorEvent"]: # These are informational events that don't map to OpenAI lifecycle events # Convert them to trace events for debugging visibility event_data: dict[str, Any] = {} @@ -792,13 +1066,10 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> elif event_class == "WorkflowErrorEvent": event_data["message"] = str(getattr(event, "message", "")) event_data["error"] = str(getattr(event, "error", "")) - elif event_class == "RequestInfoEvent": - request_info = getattr(event, "data", {}) - event_data["request_info"] = request_info if isinstance(request_info, dict) else str(request_info) # Create a trace event for debugging trace_event = ResponseTraceEventComplete( - type="response.trace.complete", + type="response.trace.completed", data={ "trace_type": "workflow_info", "event_type": event_class, @@ -813,6 +1084,237 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> return [trace_event] + # Handle Magentic-specific events + if event_class == "MagenticAgentDeltaEvent": + agent_id = getattr(event, "agent_id", "unknown_agent") + text = getattr(event, "text", None) + + if text: + events = [] + + # Track Magentic agent messages separately from regular messages + # Use timestamp to ensure uniqueness for multiple runs of same agent + magentic_key = f"magentic_message_{agent_id}" + + # Check if this is the first delta from this agent (need to create message container) + if magentic_key not in context: + # Create a unique message ID for this agent's streaming session + message_id = f"msg_{agent_id}_{uuid4().hex[:8]}" + context[magentic_key] = message_id + context["output_index"] = context.get("output_index", -1) + 1 + + # Import required types + from openai.types.responses import ResponseOutputMessage, ResponseOutputText + from openai.types.responses.response_content_part_added_event import ( + ResponseContentPartAddedEvent, + ) + from openai.types.responses.response_output_item_added_event import ResponseOutputItemAddedEvent + + # Emit message output item (container for the agent's message) + # This matches what _convert_agent_update does for regular agents + events.append( + ResponseOutputItemAddedEvent( + type="response.output_item.added", + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + item=ResponseOutputMessage( + type="message", + id=message_id, + role="assistant", + content=[], + status="in_progress", + # Add metadata to identify this as a Magentic agent message + metadata={"agent_id": agent_id, "source": "magentic"}, # type: ignore[call-arg] + ), + ) + ) + + # Add content part for text (establishes the text container) + events.append( + ResponseContentPartAddedEvent( + type="response.content_part.added", + output_index=context["output_index"], + content_index=0, + item_id=message_id, + sequence_number=self._next_sequence(context), + part=ResponseOutputText(type="output_text", text="", annotations=[]), + ) + ) + + # Get the message ID for this agent + message_id = context[magentic_key] + + # Emit text delta event using the message ID (matches regular agent behavior) + events.append( + ResponseTextDeltaEvent( + type="response.output_text.delta", + output_index=context["output_index"], + content_index=0, # Always 0 for single text content + item_id=message_id, + delta=text, + logprobs=[], + sequence_number=self._next_sequence(context), + ) + ) + return events + + # Handle function calls from Magentic agents + if getattr(event, "function_call_id", None) and getattr(event, "function_call_name", None): + # Handle function call initiation + function_call_id = getattr(event, "function_call_id", None) + function_call_name = getattr(event, "function_call_name", None) + function_call_arguments = getattr(event, "function_call_arguments", None) + + # Track function call for accumulating arguments + context["active_function_calls"][function_call_id] = { + "item_id": function_call_id, + "name": function_call_name, + "arguments_chunks": [], + } + + # Emit function call output item + return [ + ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseFunctionToolCall( + id=function_call_id, + call_id=function_call_id, + name=function_call_name, + arguments=json.dumps(function_call_arguments) if function_call_arguments else "", + type="function_call", + status="in_progress", + ), + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + ] + + # For other non-text deltas, emit as trace for debugging + return [ + ResponseTraceEventComplete( + type="response.trace.completed", + data={ + "trace_type": "magentic_delta", + "agent_id": agent_id, + "function_call_id": getattr(event, "function_call_id", None), + "function_call_name": getattr(event, "function_call_name", None), + "function_result_id": getattr(event, "function_result_id", None), + "timestamp": datetime.now().isoformat(), + }, + span_id=f"magentic_delta_{uuid4().hex[:8]}", + item_id=context["item_id"], + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + ) + ] + + if event_class == "MagenticAgentMessageEvent": + agent_id = getattr(event, "agent_id", "unknown_agent") + message = getattr(event, "message", None) + + # Track Magentic agent messages + magentic_key = f"magentic_message_{agent_id}" + + # Check if we were streaming for this agent + if magentic_key in context: + # Mark the streaming message as complete + message_id = context[magentic_key] + + # Import required types + from openai.types.responses import ResponseOutputMessage + from openai.types.responses.response_output_item_done_event import ResponseOutputItemDoneEvent + + # Extract text from ChatMessage for the completed message + text = None + if message and hasattr(message, "text"): + text = message.text + + # Emit output_item.done to mark message as complete + events = [ + ResponseOutputItemDoneEvent( + type="response.output_item.done", + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + item=ResponseOutputMessage( + type="message", + id=message_id, + role="assistant", + content=[], # Content already streamed via deltas + status="completed", + metadata={"agent_id": agent_id, "source": "magentic"}, # type: ignore[call-arg] + ), + ) + ] + + # Clean up context for this agent + del context[magentic_key] + + logger.debug(f"MagenticAgentMessageEvent from {agent_id} marked streaming message as complete") + return events + # No streaming occurred, create a complete message (shouldn't happen normally) + # Extract text from ChatMessage + text = None + if message and hasattr(message, "text"): + text = message.text + + if text: + # Emit as output item for this agent + from openai.types.responses import ResponseOutputMessage, ResponseOutputText + from openai.types.responses.response_output_item_added_event import ResponseOutputItemAddedEvent + + context["output_index"] = context.get("output_index", -1) + 1 + + text_content = ResponseOutputText(type="output_text", text=text, annotations=[]) + + output_message = ResponseOutputMessage( + type="message", + id=f"msg_{agent_id}_{uuid4().hex[:8]}", + role="assistant", + content=[text_content], + status="completed", + metadata={"agent_id": agent_id, "source": "magentic"}, # type: ignore[call-arg] + ) + + logger.debug( + f"MagenticAgentMessageEvent from {agent_id} converted to output_item.added (non-streaming)" + ) + return [ + ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=output_message, + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + ] + + if event_class == "MagenticOrchestratorMessageEvent": + orchestrator_id = getattr(event, "orchestrator_id", "orchestrator") + message = getattr(event, "message", None) + kind = getattr(event, "kind", "unknown") + + # Extract text from ChatMessage + text = None + if message and hasattr(message, "text"): + text = message.text + + # Emit as trace event for orchestrator messages (typically task ledger, instructions) + return [ + ResponseTraceEventComplete( + type="response.trace.completed", + data={ + "trace_type": "magentic_orchestrator", + "orchestrator_id": orchestrator_id, + "kind": kind, + "text": text or str(message), + "timestamp": datetime.now().isoformat(), + }, + span_id=f"magentic_orch_{uuid4().hex[:8]}", + item_id=context["item_id"], + output_index=context.get("output_index", 0), + sequence_number=self._next_sequence(context), + ) + ] + # For unknown/legacy events, still emit as workflow event for backward compatibility # Get event data and serialize if it's a SerializationMixin raw_event_data = getattr(event, "data", None) @@ -827,7 +1329,7 @@ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> # Create structured workflow event (keeping for backward compatibility) workflow_event = ResponseWorkflowEventComplete( - type="response.workflow_event.complete", + type="response.workflow_event.completed", data={ "event_type": event.__class__.__name__, "data": serialized_event_data, @@ -1053,30 +1555,227 @@ async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> Non # NO EVENT RETURNED - usage goes in final Response only return - async def _map_data_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete: - """Map DataContent to structured trace event.""" - return ResponseTraceEventComplete( - type="response.trace.complete", - data={ - "content_type": "data", - "data": getattr(content, "data", None), - "mime_type": getattr(content, "mime_type", "application/octet-stream"), - "size_bytes": len(str(getattr(content, "data", ""))) if getattr(content, "data", None) else 0, - "timestamp": datetime.now().isoformat(), - }, - item_id=context["item_id"], + async def _map_data_content( + self, content: Any, context: dict[str, Any] + ) -> ResponseOutputItemAddedEvent | ResponseTraceEventComplete: + """Map DataContent to proper output item (image/file/data) or fallback to trace. + + Maps Agent Framework DataContent to appropriate output types: + - Images (image/*) → ResponseOutputImage + - Common files (pdf, audio, video) → ResponseOutputFile + - Generic data → ResponseOutputData + - Unknown/debugging content → ResponseTraceEventComplete (fallback) + """ + mime_type = getattr(content, "mime_type", "application/octet-stream") + item_id = f"item_{uuid.uuid4().hex[:16]}" + + # Extract data/uri + data_value = getattr(content, "data", None) + uri_value = getattr(content, "uri", None) + + # Handle images + if mime_type.startswith("image/"): + # Prefer URI, but create data URI from data if needed + if uri_value: + image_url = uri_value + elif data_value: + # Convert bytes to base64 data URI + import base64 + + if isinstance(data_value, bytes): + b64_data = base64.b64encode(data_value).decode("utf-8") + else: + b64_data = str(data_value) + image_url = f"data:{mime_type};base64,{b64_data}" + else: + # No data available, fallback to trace + logger.warning(f"DataContent with {mime_type} has no data or uri, falling back to trace") + return ResponseTraceEventComplete( + type="response.trace.completed", + data={"content_type": "data", "mime_type": mime_type, "error": "No data or uri"}, + item_id=context["item_id"], + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + return ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseOutputImage( # type: ignore[arg-type] + id=item_id, + type="output_image", + image_url=image_url, + mime_type=mime_type, + alt_text=None, + ), + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + # Handle common file types + if mime_type in [ + "application/pdf", + "audio/mp3", + "audio/wav", + "audio/m4a", + "audio/ogg", + "audio/flac", + "audio/aac", + "audio/mpeg", + "video/mp4", + "video/webm", + ]: + # Determine filename from mime type + ext = mime_type.split("/")[-1] + if ext == "mpeg": + ext = "mp3" # audio/mpeg → .mp3 + filename = f"output.{ext}" + + # Prefer URI + if uri_value: + file_url = uri_value + file_data = None + elif data_value: + # Convert bytes to base64 + import base64 + + if isinstance(data_value, bytes): + b64_data = base64.b64encode(data_value).decode("utf-8") + else: + b64_data = str(data_value) + file_url = f"data:{mime_type};base64,{b64_data}" + file_data = b64_data + else: + # No data available, fallback to trace + logger.warning(f"DataContent with {mime_type} has no data or uri, falling back to trace") + return ResponseTraceEventComplete( + type="response.trace.completed", + data={"content_type": "data", "mime_type": mime_type, "error": "No data or uri"}, + item_id=context["item_id"], + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + return ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseOutputFile( # type: ignore[arg-type] + id=item_id, + type="output_file", + filename=filename, + file_url=file_url, + file_data=file_data, + mime_type=mime_type, + ), + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + # Handle generic data (structured data, JSON, etc.) + data_str = "" + if uri_value: + data_str = uri_value + elif data_value: + if isinstance(data_value, bytes): + try: + data_str = data_value.decode("utf-8") + except UnicodeDecodeError: + # Binary data, encode as base64 for display + import base64 + + data_str = base64.b64encode(data_value).decode("utf-8") + else: + data_str = str(data_value) + + return ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseOutputData( # type: ignore[arg-type] + id=item_id, + type="output_data", + data=data_str, + mime_type=mime_type, + description=None, + ), output_index=context["output_index"], sequence_number=self._next_sequence(context), ) - async def _map_uri_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete: - """Map UriContent to structured trace event.""" + async def _map_uri_content( + self, content: Any, context: dict[str, Any] + ) -> ResponseOutputItemAddedEvent | ResponseTraceEventComplete: + """Map UriContent to proper output item (image/file) based on MIME type. + + UriContent has a URI and MIME type, so we can create appropriate output items: + - Images → ResponseOutputImage + - Common files → ResponseOutputFile + - Other URIs → ResponseTraceEventComplete (fallback for debugging) + """ + mime_type = getattr(content, "mime_type", "text/plain") + uri = getattr(content, "uri", "") + item_id = f"item_{uuid.uuid4().hex[:16]}" + + if not uri: + # No URI available, fallback to trace + logger.warning("UriContent has no uri, falling back to trace") + return ResponseTraceEventComplete( + type="response.trace.completed", + data={"content_type": "uri", "mime_type": mime_type, "error": "No uri"}, + item_id=context["item_id"], + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + # Handle images + if mime_type.startswith("image/"): + return ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseOutputImage( # type: ignore[arg-type] + id=item_id, + type="output_image", + image_url=uri, + mime_type=mime_type, + alt_text=None, + ), + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + # Handle common file types + if mime_type in [ + "application/pdf", + "audio/mp3", + "audio/wav", + "audio/m4a", + "audio/ogg", + "audio/flac", + "audio/aac", + "audio/mpeg", + "video/mp4", + "video/webm", + ]: + # Extract filename from URI or use generic name + filename = uri.split("/")[-1] if "/" in uri else f"output.{mime_type.split('/')[-1]}" + + return ResponseOutputItemAddedEvent( + type="response.output_item.added", + item=ResponseOutputFile( # type: ignore[arg-type] + id=item_id, + type="output_file", + filename=filename, + file_url=uri, + file_data=None, + mime_type=mime_type, + ), + output_index=context["output_index"], + sequence_number=self._next_sequence(context), + ) + + # For other URI types (text/plain, application/json, etc.), use trace for now + logger.debug(f"UriContent with unsupported MIME type {mime_type}, using trace event") return ResponseTraceEventComplete( - type="response.trace.complete", + type="response.trace.completed", data={ "content_type": "uri", - "uri": getattr(content, "uri", ""), - "mime_type": getattr(content, "mime_type", "text/plain"), + "uri": uri, + "mime_type": mime_type, "timestamp": datetime.now().isoformat(), }, item_id=context["item_id"], @@ -1085,9 +1784,15 @@ async def _map_uri_content(self, content: Any, context: dict[str, Any]) -> Respo ) async def _map_hosted_file_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete: - """Map HostedFileContent to structured trace event.""" + """Map HostedFileContent to trace event. + + HostedFileContent references external file IDs (like OpenAI file IDs). + These remain as traces since they're metadata about hosted resources, + not direct content to display. To display them, agents should return + DataContent or UriContent with the actual file data/URL. + """ return ResponseTraceEventComplete( - type="response.trace.complete", + type="response.trace.completed", data={ "content_type": "hosted_file", "file_id": getattr(content, "file_id", "unknown"), @@ -1101,9 +1806,14 @@ async def _map_hosted_file_content(self, content: Any, context: dict[str, Any]) async def _map_hosted_vector_store_content( self, content: Any, context: dict[str, Any] ) -> ResponseTraceEventComplete: - """Map HostedVectorStoreContent to structured trace event.""" + """Map HostedVectorStoreContent to trace event. + + HostedVectorStoreContent references external vector store IDs. + These remain as traces since they're metadata about hosted resources, + not direct content to display. + """ return ResponseTraceEventComplete( - type="response.trace.complete", + type="response.trace.completed", data={ "content_type": "hosted_vector_store", "vector_store_id": getattr(content, "vector_store_id", "unknown"), @@ -1208,7 +1918,7 @@ async def _create_error_response(self, error_message: str, request: AgentFramewo id=f"resp_{uuid.uuid4().hex[:12]}", object="response", created_at=datetime.now().timestamp(), - model=request.model, + model=request.model or "devui", output=[response_output_message], usage=usage, parallel_tool_calls=False, diff --git a/python/packages/devui/agent_framework_devui/_openai/__init__.py b/python/packages/devui/agent_framework_devui/_openai/__init__.py new file mode 100644 index 0000000000..9eadb747c4 --- /dev/null +++ b/python/packages/devui/agent_framework_devui/_openai/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""OpenAI integration for DevUI - proxy support for OpenAI Responses API.""" + +from ._executor import OpenAIExecutor + +__all__ = [ + "OpenAIExecutor", +] diff --git a/python/packages/devui/agent_framework_devui/_openai/_executor.py b/python/packages/devui/agent_framework_devui/_openai/_executor.py new file mode 100644 index 0000000000..1de05bfc2d --- /dev/null +++ b/python/packages/devui/agent_framework_devui/_openai/_executor.py @@ -0,0 +1,270 @@ +# Copyright (c) Microsoft. All rights reserved. + +"""OpenAI Executor - proxies requests to OpenAI Responses API. + +This executor mirrors the AgentFrameworkExecutor interface but routes +requests to OpenAI's API instead of executing local entities. +""" + +import logging +import os +from collections.abc import AsyncGenerator +from typing import Any + +from openai import APIStatusError, AsyncOpenAI, AsyncStream, AuthenticationError, PermissionDeniedError, RateLimitError +from openai.types.responses import Response, ResponseStreamEvent + +from .._conversations import ConversationStore +from ..models import AgentFrameworkRequest, OpenAIResponse + +logger = logging.getLogger(__name__) + + +class OpenAIExecutor: + """Executor for OpenAI Responses API - mirrors AgentFrameworkExecutor interface. + + This executor provides the same interface as AgentFrameworkExecutor but proxies + requests to OpenAI's Responses API instead of executing local entities. + + Key features: + - Same execute_streaming() and execute_sync() interface + - Shares ConversationStore with local executor + - Configured via OPENAI_API_KEY environment variable + - Supports all OpenAI Responses API parameters + """ + + def __init__(self, conversation_store: ConversationStore): + """Initialize OpenAI executor. + + Args: + conversation_store: Shared conversation store (works for both local and OpenAI) + """ + self.conversation_store = conversation_store + + # Load configuration from environment + self.api_key = os.getenv("OPENAI_API_KEY") + self.base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") + self._client: AsyncOpenAI | None = None + + @property + def is_configured(self) -> bool: + """Check if OpenAI executor is properly configured. + + Returns: + True if OPENAI_API_KEY is set + """ + return self.api_key is not None + + def _get_client(self) -> AsyncOpenAI: + """Get or create OpenAI async client. + + Returns: + AsyncOpenAI client instance + + Raises: + ValueError: If OPENAI_API_KEY not configured + """ + if self._client is None: + if not self.api_key: + raise ValueError("OPENAI_API_KEY environment variable not set") + + self._client = AsyncOpenAI( + api_key=self.api_key, + base_url=self.base_url, + ) + logger.debug(f"Created OpenAI client with base_url: {self.base_url}") + + return self._client + + async def execute_streaming(self, request: AgentFrameworkRequest) -> AsyncGenerator[Any, None]: + """Execute request via OpenAI and stream results in OpenAI format. + + This mirrors AgentFrameworkExecutor.execute_streaming() interface. + + Args: + request: Request to execute + + Yields: + OpenAI ResponseStreamEvent objects (already in correct format!) + """ + if not self.is_configured: + logger.error("OpenAI executor not configured (missing OPENAI_API_KEY)") + # Emit proper response.failed event + yield { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": "OpenAI not configured on server. Set OPENAI_API_KEY environment variable.", + "type": "configuration_error", + "code": "openai_not_configured", + }, + }, + } + return + + try: + client = self._get_client() + + # Convert AgentFrameworkRequest to OpenAI params + params = request.to_openai_params() + + # Remove DevUI-specific fields that OpenAI doesn't recognize + params.pop("extra_body", None) + + # Conversation ID is now from OpenAI (created via /v1/conversations proxy) + # so we can pass it through! + + # Force streaming mode (remove if already present to avoid duplicate) + params.pop("stream", None) + + logger.info(f"🔀 Proxying to OpenAI Responses API: model={params.get('model')}") + logger.debug(f"Request params: {params}") + + # Call OpenAI Responses API - returns AsyncStream[ResponseStreamEvent] + stream: AsyncStream[ResponseStreamEvent] = await client.responses.create( + **params, + stream=True, # Force streaming + ) + + # Yield events directly - they're already ResponseStreamEvent objects! + # No conversion needed - OpenAI SDK returns proper typed objects + async for event in stream: + yield event + + except AuthenticationError as e: + # 401 - Invalid API key or authentication issue + logger.error(f"OpenAI authentication error: {e}", exc_info=True) + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + yield { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": error_data.get("message", str(e)), + "type": error_data.get("type", "authentication_error"), + "code": error_data.get("code", "invalid_api_key"), + }, + }, + } + except PermissionDeniedError as e: + # 403 - Permission denied + logger.error(f"OpenAI permission denied: {e}", exc_info=True) + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + yield { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": error_data.get("message", str(e)), + "type": error_data.get("type", "permission_denied"), + "code": error_data.get("code", "insufficient_permissions"), + }, + }, + } + except RateLimitError as e: + # 429 - Rate limit exceeded + logger.error(f"OpenAI rate limit exceeded: {e}", exc_info=True) + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + yield { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": error_data.get("message", str(e)), + "type": error_data.get("type", "rate_limit_error"), + "code": error_data.get("code", "rate_limit_exceeded"), + }, + }, + } + except APIStatusError as e: + # Other OpenAI API errors + logger.error(f"OpenAI API error: {e}", exc_info=True) + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + yield { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": error_data.get("message", str(e)), + "type": error_data.get("type", "api_error"), + "code": error_data.get("code", "unknown_error"), + }, + }, + } + except Exception as e: + # Catch-all for unexpected errors + logger.error(f"Unexpected error in OpenAI proxy: {e}", exc_info=True) + yield { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": f"Unexpected error: {e!s}", + "type": "internal_error", + "code": "unexpected_error", + }, + }, + } + + async def execute_sync(self, request: AgentFrameworkRequest) -> OpenAIResponse: + """Execute request via OpenAI and return complete response. + + This mirrors AgentFrameworkExecutor.execute_sync() interface. + + Args: + request: Request to execute + + Returns: + Final OpenAI Response object + + Raises: + ValueError: If OpenAI not configured + Exception: If OpenAI API call fails + """ + if not self.is_configured: + raise ValueError("OpenAI not configured on server. Set OPENAI_API_KEY environment variable.") + + try: + client = self._get_client() + + # Convert AgentFrameworkRequest to OpenAI params + params = request.to_openai_params() + + # Remove DevUI-specific fields + params.pop("extra_body", None) + + # Force non-streaming mode (remove if already present to avoid duplicate) + params.pop("stream", None) + + logger.info(f"🔀 Proxying to OpenAI Responses API (non-streaming): model={params.get('model')}") + logger.debug(f"Request params: {params}") + + # Call OpenAI Responses API - returns Response object + response: Response = await client.responses.create( + **params, + stream=False, # Force non-streaming + ) + + return response + + except Exception as e: + logger.error(f"OpenAI proxy error: {e}", exc_info=True) + raise + + async def close(self) -> None: + """Close the OpenAI client and release resources.""" + if self._client: + await self._client.close() + self._client = None + logger.debug("Closed OpenAI client") diff --git a/python/packages/devui/agent_framework_devui/_server.py b/python/packages/devui/agent_framework_devui/_server.py index 3e3c538098..d8f01d9527 100644 --- a/python/packages/devui/agent_framework_devui/_server.py +++ b/python/packages/devui/agent_framework_devui/_server.py @@ -5,7 +5,9 @@ import inspect import json import logging -from collections.abc import AsyncGenerator +import os +import secrets +from collections.abc import AsyncGenerator, Awaitable, Callable from contextlib import asynccontextmanager from typing import Any @@ -14,15 +16,20 @@ from fastapi.responses import JSONResponse, StreamingResponse from fastapi.staticfiles import StaticFiles +from ._deployment import DeploymentManager from ._discovery import EntityDiscovery from ._executor import AgentFrameworkExecutor from ._mapper import MessageMapper -from .models import AgentFrameworkRequest, OpenAIError -from .models._discovery_models import DiscoveryResponse, EntityInfo +from ._openai import OpenAIExecutor +from .models import AgentFrameworkRequest, MetaResponse, OpenAIError +from .models._discovery_models import Deployment, DeploymentConfig, DiscoveryResponse, EntityInfo logger = logging.getLogger(__name__) +# No AuthMiddleware class needed - we'll use the decorator pattern instead + + class DevServer: """Development Server - OpenAI compatible API server for debugging agents.""" @@ -33,6 +40,7 @@ def __init__( host: str = "127.0.0.1", cors_origins: list[str] | None = None, ui_enabled: bool = True, + mode: str = "developer", ) -> None: """Initialize the development server. @@ -42,16 +50,79 @@ def __init__( host: Host to bind server to cors_origins: List of allowed CORS origins ui_enabled: Whether to enable the UI + mode: Server mode - 'developer' (full access, verbose errors) or 'user' (restricted APIs, generic errors) """ self.entities_dir = entities_dir self.port = port self.host = host - self.cors_origins = cors_origins or ["*"] + + # Smart CORS defaults: permissive for localhost, restrictive for network-exposed deployments + if cors_origins is None: + # Localhost development: allow cross-origin for dev tools (e.g., frontend dev server) + # Network-exposed: empty list (same-origin only, no CORS) + cors_origins = ["*"] if host in ("127.0.0.1", "localhost") else [] + + self.cors_origins = cors_origins self.ui_enabled = ui_enabled + self.mode = mode self.executor: AgentFrameworkExecutor | None = None + self.openai_executor: OpenAIExecutor | None = None + self.deployment_manager = DeploymentManager() self._app: FastAPI | None = None self._pending_entities: list[Any] | None = None + def _is_dev_mode(self) -> bool: + """Check if running in developer mode. + + Returns: + True if in developer mode, False if in user mode + """ + return self.mode == "developer" + + def _format_error(self, error: Exception, context: str = "Operation") -> str: + """Format error message based on server mode. + + In developer mode: Returns detailed error message for debugging. + In user mode: Returns generic message and logs details internally. + + Args: + error: The exception that occurred + context: Description of the operation that failed (e.g., "Request execution") + + Returns: + Formatted error message appropriate for the current mode + """ + if self._is_dev_mode(): + # Developer mode: Show full error details for debugging + return f"{context} failed: {error!s}" + + # User mode: Generic message to user, detailed logging internally + logger.error(f"{context} failed: {error}", exc_info=True) + return f"{context} failed" + + def _require_developer_mode(self, feature: str = "operation") -> None: + """Check if current mode allows developer operations. + + Args: + feature: Name of the feature being accessed (for error message) + + Raises: + HTTPException: If in user mode + """ + if self.mode == "user": + logger.warning(f"Blocked {feature} access in user mode") + raise HTTPException( + status_code=403, + detail={ + "error": { + "message": f"Access denied: {feature} requires developer mode", + "type": "permission_denied", + "code": "developer_mode_required", + "current_mode": self.mode, + } + }, + ) + async def _ensure_executor(self) -> AgentFrameworkExecutor: """Ensure executor is initialized.""" if self.executor is None: @@ -84,6 +155,29 @@ async def _ensure_executor(self) -> AgentFrameworkExecutor: return self.executor + async def _ensure_openai_executor(self) -> OpenAIExecutor: + """Ensure OpenAI executor is initialized. + + Returns: + OpenAI executor instance + + Raises: + ValueError: If OpenAI executor cannot be initialized + """ + if self.openai_executor is None: + # Initialize local executor first to get conversation_store + local_executor = await self._ensure_executor() + + # Create OpenAI executor with shared conversation store + self.openai_executor = OpenAIExecutor(local_executor.conversation_store) + + if self.openai_executor.is_configured: + logger.info("OpenAI proxy mode available (OPENAI_API_KEY configured)") + else: + logger.info("OpenAI proxy mode disabled (OPENAI_API_KEY not set)") + + return self.openai_executor + async def _cleanup_entities(self) -> None: """Cleanup entity resources (close clients, MCP tools, credentials, etc.).""" if not self.executor: @@ -94,12 +188,28 @@ async def _cleanup_entities(self) -> None: closed_count = 0 mcp_tools_closed = 0 credentials_closed = 0 + hook_count = 0 for entity_info in entities: + entity_id = entity_info.id + try: - entity_obj = self.executor.entity_discovery.get_entity_object(entity_info.id) + # Step 1: Execute registered cleanup hooks (NEW) + cleanup_hooks = self.executor.entity_discovery.get_cleanup_hooks(entity_id) + for hook in cleanup_hooks: + try: + if inspect.iscoroutinefunction(hook): + await hook() + else: + hook() + hook_count += 1 + logger.debug(f"✓ Executed cleanup hook for: {entity_id}") + except Exception as e: + logger.warning(f"⚠ Cleanup hook failed for {entity_id}: {e}") + + # Step 2: Close chat clients and their credentials (EXISTING) + entity_obj = self.executor.entity_discovery.get_entity_object(entity_id) - # Close chat clients and their credentials if entity_obj and hasattr(entity_obj, "chat_client"): client = entity_obj.chat_client @@ -144,14 +254,24 @@ async def _cleanup_entities(self) -> None: logger.warning(f"Error closing MCP tool for {entity_info.id}: {e}") except Exception as e: - logger.warning(f"Error closing entity {entity_info.id}: {e}") + logger.warning(f"Error cleaning up entity {entity_id}: {e}") + if hook_count > 0: + logger.info(f"✓ Executed {hook_count} cleanup hook(s)") if closed_count > 0: - logger.info(f"Closed {closed_count} entity client(s)") + logger.info(f"✓ Closed {closed_count} entity client(s)") if credentials_closed > 0: - logger.info(f"Closed {credentials_closed} credential(s)") + logger.info(f"✓ Closed {credentials_closed} credential(s)") if mcp_tools_closed > 0: - logger.info(f"Closed {mcp_tools_closed} MCP tool(s)") + logger.info(f"✓ Closed {mcp_tools_closed} MCP tool(s)") + + # Close OpenAI executor if it exists + if self.openai_executor: + try: + await self.openai_executor.close() + logger.info("Closed OpenAI executor") + except Exception as e: + logger.warning(f"Error closing OpenAI executor: {e}") def create_app(self) -> FastAPI: """Create the FastAPI application.""" @@ -161,6 +281,7 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: # Startup logger.info("Starting Agent Framework Server") await self._ensure_executor() + await self._ensure_openai_executor() # Initialize OpenAI executor yield # Shutdown logger.info("Shutting down Agent Framework Server") @@ -177,14 +298,74 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: ) # Add CORS middleware + # Note: allow_credentials cannot be True when allow_origins is ["*"] + # For localhost dev with wildcard origins, credentials are disabled + # For network deployments with specific origins or empty list, credentials can be enabled + allow_credentials = self.cors_origins != ["*"] + app.add_middleware( CORSMiddleware, allow_origins=self.cors_origins, - allow_credentials=True, + allow_credentials=allow_credentials, allow_methods=["*"], allow_headers=["*"], ) + # Add authentication middleware using decorator pattern + # Auth is enabled by presence of DEVUI_AUTH_TOKEN + auth_token = os.getenv("DEVUI_AUTH_TOKEN", "") + auth_required = bool(auth_token) + + if auth_required: + logger.info("Authentication middleware enabled") + + @app.middleware("http") + async def auth_middleware(request: Request, call_next: Callable[[Request], Awaitable[Any]]) -> Any: + """Validate Bearer token authentication. + + Skips authentication for health, meta, static UI endpoints, and OPTIONS requests. + """ + # Skip auth for OPTIONS (CORS preflight) requests + if request.method == "OPTIONS": + return await call_next(request) + + # Skip auth for health checks, meta endpoint, and static files + if request.url.path in ["/health", "/meta", "/"] or request.url.path.startswith("/assets"): + return await call_next(request) + + # Check Authorization header + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + return JSONResponse( + status_code=401, + content={ + "error": { + "message": ( + "Missing or invalid Authorization header. Expected: Authorization: Bearer " + ), + "type": "authentication_error", + "code": "missing_token", + } + }, + ) + + # Extract and validate token + token = auth_header.replace("Bearer ", "", 1).strip() + if not secrets.compare_digest(token, auth_token): + return JSONResponse( + status_code=401, + content={ + "error": { + "message": "Invalid authentication token", + "type": "authentication_error", + "code": "invalid_token", + } + }, + ) + + # Token valid, proceed + return await call_next(request) + self._register_routes(app) self._mount_ui(app) @@ -202,6 +383,29 @@ async def health_check() -> dict[str, Any]: return {"status": "healthy", "entities_count": len(entities), "framework": "agent_framework"} + @app.get("/meta", response_model=MetaResponse) + async def get_meta() -> MetaResponse: + """Get server metadata and configuration.""" + import os + + from . import __version__ + + # Ensure executors are initialized to check capabilities + openai_executor = await self._ensure_openai_executor() + + return MetaResponse( + ui_mode=self.mode, # type: ignore[arg-type] + version=__version__, + framework="agent_framework", + runtime="python", # Python DevUI backend + capabilities={ + "tracing": os.getenv("ENABLE_OTEL") == "true", + "openai_proxy": openai_executor.is_configured, + "deployment": True, # Deployment feature is available + }, + auth_required=bool(os.getenv("DEVUI_AUTH_TOKEN")), + ) + @app.get("/v1/entities", response_model=DiscoveryResponse) async def discover_entities() -> DiscoveryResponse: """List all registered entities.""" @@ -226,7 +430,10 @@ async def get_entity_info(entity_id: str) -> EntityInfo: # Trigger lazy loading if entity not yet loaded # This will import the module and enrich metadata - entity_obj = await executor.entity_discovery.load_entity(entity_id) + # Pass checkpoint_manager to ensure workflows get checkpoint storage injected + entity_obj = await executor.entity_discovery.load_entity( + entity_id, checkpoint_manager=executor.checkpoint_manager + ) # Get updated entity info (may have been enriched during load) entity_info = executor.get_entity_info(entity_id) or entity_info @@ -305,6 +512,7 @@ async def get_entity_info(entity_id: str) -> EntityInfo: executor_list = [getattr(ex, "executor_id", str(ex)) for ex in entity_obj.executors] # Create copy of entity info and populate workflow-specific fields + # Note: DevUI provides runtime checkpoint storage for ALL workflows via conversations update_payload: dict[str, Any] = { "workflow_dump": workflow_dump, "input_schema": input_schema, @@ -320,9 +528,13 @@ async def get_entity_info(entity_id: str) -> EntityInfo: except HTTPException: raise + except ValueError as e: + # ValueError from load_entity indicates entity not found or invalid + error_msg = self._format_error(e, "Entity loading") + raise HTTPException(status_code=404, detail=error_msg) from e except Exception as e: - logger.error(f"Error getting entity info for {entity_id}: {e}") - raise HTTPException(status_code=500, detail=f"Failed to get entity info: {e!s}") from e + error_msg = self._format_error(e, "Entity info retrieval") + raise HTTPException(status_code=500, detail=error_msg) from e @app.post("/v1/entities/{entity_id}/reload") async def reload_entity(entity_id: str) -> dict[str, Any]: @@ -331,6 +543,7 @@ async def reload_entity(entity_id: str) -> dict[str, Any]: This enables hot reload during development - edit entity code, call this endpoint, and the next execution will use the updated code without server restart. """ + self._require_developer_mode("entity hot reload") try: executor = await self._ensure_executor() @@ -353,20 +566,150 @@ async def reload_entity(entity_id: str) -> dict[str, Any]: logger.error(f"Error reloading entity {entity_id}: {e}") raise HTTPException(status_code=500, detail=f"Failed to reload entity: {e!s}") from e + # ============================================================================ + # Deployment Endpoints + # ============================================================================ + + @app.post("/v1/deployments") + async def create_deployment(config: DeploymentConfig) -> StreamingResponse: + """Deploy entity to Azure Container Apps with streaming events. + + Returns SSE stream of deployment progress events. + """ + self._require_developer_mode("deployment") + try: + executor = await self._ensure_executor() + + # Validate entity exists and supports deployment + entity_info = executor.get_entity_info(config.entity_id) + if not entity_info: + raise HTTPException(status_code=404, detail=f"Entity {config.entity_id} not found") + + if not entity_info.deployment_supported: + reason = entity_info.deployment_reason or "Deployment not supported for this entity" + raise HTTPException(status_code=400, detail=reason) + + # Get entity path from metadata + from pathlib import Path + + entity_path_str = entity_info.metadata.get("path") + if not entity_path_str: + raise HTTPException( + status_code=400, + detail="Entity path not found in metadata (in-memory entities cannot be deployed)", + ) + + entity_path = Path(entity_path_str) + + # Stream deployment events + async def event_generator() -> AsyncGenerator[str, None]: + async for event in self.deployment_manager.deploy(config, entity_path): + # Format as SSE + import json + + yield f"data: {json.dumps(event.model_dump())}\n\n" + + return StreamingResponse(event_generator(), media_type="text/event-stream") + + except HTTPException: + raise + except Exception as e: + error_msg = self._format_error(e, "Deployment creation") + raise HTTPException(status_code=500, detail=error_msg) from e + + @app.get("/v1/deployments") + async def list_deployments(entity_id: str | None = None) -> list[Deployment]: + """List all deployments, optionally filtered by entity.""" + self._require_developer_mode("deployment listing") + try: + return await self.deployment_manager.list_deployments(entity_id) + except Exception as e: + error_msg = self._format_error(e, "Deployment listing") + raise HTTPException(status_code=500, detail=error_msg) from e + + @app.get("/v1/deployments/{deployment_id}") + async def get_deployment(deployment_id: str) -> Deployment: + """Get deployment by ID.""" + self._require_developer_mode("deployment details") + try: + deployment = await self.deployment_manager.get_deployment(deployment_id) + if not deployment: + raise HTTPException(status_code=404, detail=f"Deployment {deployment_id} not found") + return deployment + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting deployment: {e}") + raise HTTPException(status_code=500, detail=f"Failed to get deployment: {e!s}") from e + + @app.delete("/v1/deployments/{deployment_id}") + async def delete_deployment(deployment_id: str) -> dict[str, Any]: + """Delete deployment from Azure Container Apps.""" + self._require_developer_mode("deployment deletion") + try: + await self.deployment_manager.delete_deployment(deployment_id) + return {"success": True, "message": f"Deployment {deployment_id} deleted successfully"} + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) from e + except Exception as e: + logger.error(f"Error deleting deployment: {e}") + raise HTTPException(status_code=500, detail=f"Failed to delete deployment: {e!s}") from e + + # Convenience endpoint: deploy specific entity + @app.post("/v1/entities/{entity_id}/deploy") + async def deploy_entity(entity_id: str, config: DeploymentConfig) -> StreamingResponse: + """Convenience endpoint to deploy entity (shortcuts to /v1/deployments).""" + self._require_developer_mode("deployment") + # Override entity_id from path parameter + config.entity_id = entity_id + return await create_deployment(config) + + # ============================================================================ + # Response/Conversation Endpoints + # ============================================================================ + @app.post("/v1/responses") async def create_response(request: AgentFrameworkRequest, raw_request: Request) -> Any: - """OpenAI Responses API endpoint.""" + """OpenAI Responses API endpoint - routes to local or OpenAI executor.""" try: + # Check if frontend requested OpenAI proxy mode + proxy_mode = raw_request.headers.get("X-Proxy-Backend") + + if proxy_mode == "openai": + # Route to OpenAI executor + logger.info("🔀 Routing to OpenAI proxy mode") + openai_executor = await self._ensure_openai_executor() + + if not openai_executor.is_configured: + error = OpenAIError.create( + "OpenAI proxy mode not configured. Set OPENAI_API_KEY environment variable." + ) + return JSONResponse(status_code=503, content=error.to_dict()) + + # Execute via OpenAI with dedicated streaming method + if request.stream: + return StreamingResponse( + self._stream_openai_execution(openai_executor, request), + media_type="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "Access-Control-Allow-Origin": "*", + }, + ) + return await openai_executor.execute_sync(request) + + # Route to local Agent Framework executor (original behavior) raw_body = await raw_request.body() logger.info(f"Raw request body: {raw_body.decode()}") - logger.info(f"Parsed request: model={request.model}, extra_body={request.extra_body}") + logger.info(f"Parsed request: metadata={request.metadata}") - # Get entity_id using the new method + # Get entity_id from metadata entity_id = request.get_entity_id() logger.info(f"Extracted entity_id: {entity_id}") if not entity_id: - error = OpenAIError.create(f"Missing entity_id. Request extra_body: {request.extra_body}") + error = OpenAIError.create("Missing entity_id in metadata. Provide metadata.entity_id in request.") return JSONResponse(status_code=400, content=error.to_dict()) # Get executor and validate entity exists @@ -392,18 +735,86 @@ async def create_response(request: AgentFrameworkRequest, raw_request: Request) return await executor.execute_sync(request) except Exception as e: - logger.error(f"Error executing request: {e}") - error = OpenAIError.create(f"Execution failed: {e!s}") + error_msg = self._format_error(e, "Request execution") + error = OpenAIError.create(error_msg) return JSONResponse(status_code=500, content=error.to_dict()) # ======================================== # OpenAI Conversations API (Standard) # ======================================== - @app.post("/v1/conversations") - async def create_conversation(request_data: dict[str, Any]) -> dict[str, Any]: - """Create a new conversation - OpenAI standard.""" + @app.post("/v1/conversations", response_model=None) + async def create_conversation(raw_request: Request) -> dict[str, Any] | JSONResponse: + """Create a new conversation - routes to OpenAI or local based on mode.""" try: + # Parse request body + request_data = await raw_request.json() + + # Check if frontend requested OpenAI proxy mode + proxy_mode = raw_request.headers.get("X-Proxy-Backend") + + if proxy_mode == "openai": + # Create conversation in OpenAI + openai_executor = await self._ensure_openai_executor() + if not openai_executor.is_configured: + error = OpenAIError.create( + "OpenAI proxy mode not configured. Set OPENAI_API_KEY environment variable.", + type="configuration_error", + code="openai_not_configured", + ) + return JSONResponse(status_code=503, content=error.to_dict()) + + # Use OpenAI client to create conversation + from openai import APIStatusError, AsyncOpenAI, AuthenticationError, PermissionDeniedError + + client = AsyncOpenAI( + api_key=openai_executor.api_key, + base_url=openai_executor.base_url, + ) + + try: + metadata = request_data.get("metadata") + logger.debug(f"Creating OpenAI conversation with metadata: {metadata}") + conversation = await client.conversations.create(metadata=metadata) + logger.info(f"Created OpenAI conversation: {conversation.id}") + return conversation.model_dump() + except AuthenticationError as e: + # 401 - Invalid API key or authentication issue + logger.error(f"OpenAI authentication error creating conversation: {e}") + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + error = OpenAIError.create( + message=error_data.get("message", str(e)), + type=error_data.get("type", "authentication_error"), + code=error_data.get("code", "invalid_api_key"), + ) + return JSONResponse(status_code=401, content=error.to_dict()) + except PermissionDeniedError as e: + # 403 - Permission denied + logger.error(f"OpenAI permission denied creating conversation: {e}") + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + error = OpenAIError.create( + message=error_data.get("message", str(e)), + type=error_data.get("type", "permission_denied"), + code=error_data.get("code", "insufficient_permissions"), + ) + return JSONResponse(status_code=403, content=error.to_dict()) + except APIStatusError as e: + # Other OpenAI API errors (rate limit, etc.) + logger.error(f"OpenAI API error creating conversation: {e}") + error_body = e.body if hasattr(e, "body") else {} + error_data = error_body.get("error", {}) if isinstance(error_body, dict) else {} + error = OpenAIError.create( + message=error_data.get("message", str(e)), + type=error_data.get("type", "api_error"), + code=error_data.get("code", "unknown_error"), + ) + return JSONResponse( + status_code=e.status_code if hasattr(e, "status_code") else 500, content=error.to_dict() + ) + + # Local mode - use DevUI conversation store metadata = request_data.get("metadata") executor = await self._ensure_executor() conversation = executor.conversation_store.create_conversation(metadata=metadata) @@ -411,22 +822,39 @@ async def create_conversation(request_data: dict[str, Any]) -> dict[str, Any]: except HTTPException: raise except Exception as e: - logger.error(f"Error creating conversation: {e}") - raise HTTPException(status_code=500, detail=f"Failed to create conversation: {e!s}") from e + logger.error(f"Error creating conversation: {e}", exc_info=True) + error = OpenAIError.create(f"Failed to create conversation: {e!s}") + return JSONResponse(status_code=500, content=error.to_dict()) @app.get("/v1/conversations") - async def list_conversations(agent_id: str | None = None) -> dict[str, Any]: - """List conversations, optionally filtered by agent_id.""" + async def list_conversations( + agent_id: str | None = None, + entity_id: str | None = None, + type: str | None = None, + ) -> dict[str, Any]: + """List conversations, optionally filtered by agent_id, entity_id, and/or type. + + Query Parameters: + - agent_id: Filter by agent_id (for agent conversations) + - entity_id: Filter by entity_id (for workflow sessions or other entities) + - type: Filter by conversation type (e.g., "workflow_session") + + Multiple filters can be combined (AND logic). + """ try: executor = await self._ensure_executor() + # Build filter criteria + filters = {} if agent_id: - # Filter by agent_id metadata - conversations = executor.conversation_store.list_conversations_by_metadata({"agent_id": agent_id}) - else: - # Return all conversations (for InMemoryStore, list all) - # Note: This assumes list_conversations_by_metadata({}) returns all - conversations = executor.conversation_store.list_conversations_by_metadata({}) + filters["agent_id"] = agent_id + if entity_id: + filters["entity_id"] = entity_id + if type: + filters["type"] = type + + # Apply filters + conversations = executor.conversation_store.list_conversations_by_metadata(filters) return { "object": "list", @@ -511,9 +939,20 @@ async def list_conversation_items( items, has_more = await executor.conversation_store.list_items( conversation_id, limit=limit, after=after, order=order ) + # Handle both Pydantic models and dicts (some stores return raw dicts) + serialized_items = [] + for item in items: + if hasattr(item, "model_dump"): + serialized_items.append(item.model_dump()) + elif isinstance(item, dict): + serialized_items.append(item) + else: + logger.warning(f"Unexpected item type: {type(item)}, converting to dict") + serialized_items.append(dict(item)) + return { "object": "list", - "data": [item.model_dump() for item in items], + "data": serialized_items, "has_more": has_more, } except ValueError as e: @@ -532,13 +971,51 @@ async def retrieve_conversation_item(conversation_id: str, item_id: str) -> dict item = executor.conversation_store.get_item(conversation_id, item_id) if not item: raise HTTPException(status_code=404, detail="Item not found") - return item.model_dump() + result: dict[str, Any] = item.model_dump() + return result except HTTPException: raise except Exception as e: logger.error(f"Error getting item {item_id} from conversation {conversation_id}: {e}") raise HTTPException(status_code=500, detail=f"Failed to get item: {e!s}") from e + @app.delete("/v1/conversations/{conversation_id}/items/{item_id}") + async def delete_conversation_item(conversation_id: str, item_id: str) -> dict[str, Any]: + """Delete conversation item - supports checkpoint deletion.""" + try: + executor = await self._ensure_executor() + + # Check if this is a checkpoint item + if item_id.startswith("checkpoint_"): + # Extract checkpoint_id from item_id (format: "checkpoint_{checkpoint_id}") + checkpoint_id = item_id[len("checkpoint_") :] + storage = executor.checkpoint_manager.get_checkpoint_storage(conversation_id) + deleted = await storage.delete_checkpoint(checkpoint_id) + + if not deleted: + raise HTTPException(status_code=404, detail="Checkpoint not found") + + return { + "id": item_id, + "object": "item.deleted", + "deleted": True, + } + # For other items, delegate to conversation store (if it supports deletion) + raise HTTPException(status_code=501, detail="Deletion of non-checkpoint items not implemented") + + except HTTPException: + raise + except ValueError as e: + raise HTTPException(status_code=404, detail=str(e)) from e + except Exception as e: + logger.error(f"Error deleting item {item_id} from conversation {conversation_id}: {e}") + raise HTTPException(status_code=500, detail=f"Failed to delete item: {e!s}") from e + + # ============================================================================ + # Checkpoint Management - Now handled through conversation items API + # Checkpoints are exposed as conversation items with type="checkpoint" + # ============================================================================ + async def _stream_execution( self, executor: AgentFrameworkExecutor, request: AgentFrameworkRequest ) -> AsyncGenerator[str, None]: @@ -587,6 +1064,63 @@ async def _stream_execution( error_event = {"id": "error", "object": "error", "error": {"message": str(e), "type": "execution_error"}} yield f"data: {json.dumps(error_event)}\n\n" + async def _stream_openai_execution( + self, executor: OpenAIExecutor, request: AgentFrameworkRequest + ) -> AsyncGenerator[str, None]: + """Stream execution through OpenAI executor. + + OpenAI events are already in final format - no conversion or aggregation needed. + Just serialize and stream them as SSE. + + Args: + executor: OpenAI executor instance + request: Request to execute + + Yields: + SSE-formatted event strings + """ + try: + # Stream events from OpenAI - they're already ResponseStreamEvent objects + async for event in executor.execute_streaming(request): + # Handle error dicts from executor + if isinstance(event, dict): + payload = json.dumps(event) + yield f"data: {payload}\n\n" + continue + + # OpenAI SDK events have model_dump_json() - use it for single-line JSON + if hasattr(event, "model_dump_json"): + payload = event.model_dump_json() # type: ignore[attr-defined] + yield f"data: {payload}\n\n" + else: + # Fallback (shouldn't happen with OpenAI SDK) + logger.warning(f"Unexpected event type from OpenAI: {type(event)}") + payload = json.dumps(str(event)) + yield f"data: {payload}\n\n" + + # OpenAI already sends response.completed event - no aggregation needed! + # Just send [DONE] marker + yield "data: [DONE]\n\n" + + except Exception as e: + logger.error(f"Error in OpenAI streaming execution: {e}", exc_info=True) + # Emit proper response.failed event + import os + + error_event = { + "type": "response.failed", + "response": { + "id": f"resp_{os.urandom(16).hex()}", + "status": "failed", + "error": { + "message": str(e), + "type": "internal_error", + "code": "streaming_error", + }, + }, + } + yield f"data: {json.dumps(error_event)}\n\n" + def _mount_ui(self, app: FastAPI) -> None: """Mount the UI as static files.""" from pathlib import Path diff --git a/python/packages/devui/agent_framework_devui/_utils.py b/python/packages/devui/agent_framework_devui/_utils.py index 19be9d5f35..3c17c072f7 100644 --- a/python/packages/devui/agent_framework_devui/_utils.py +++ b/python/packages/devui/agent_framework_devui/_utils.py @@ -324,6 +324,71 @@ def generate_schema_from_dataclass(cls: type[Any]) -> dict[str, Any]: return schema +def extract_response_type_from_executor(executor: Any, request_type: type) -> type | None: + """Extract the expected response type from an executor's response handler. + + Looks for methods decorated with @response_handler that have signature: + async def handler(self, original_request: RequestType, response: ResponseType, ctx) + + Args: + executor: Executor object that should have a handler for the request type + request_type: The request message type + + Returns: + The response type class, or None if not found + """ + try: + from typing import get_type_hints + + # Introspect handler methods for @response_handler pattern + for attr_name in dir(executor): + if attr_name.startswith("_"): + continue + attr = getattr(executor, attr_name, None) + if not callable(attr): + continue + + # Get type hints for this method + try: + type_hints = get_type_hints(attr) + + # Check for @response_handler pattern: + # async def handler(self, original_request: RequestType, response: ResponseType, ctx) + type_hint_params = {k: v for k, v in type_hints.items() if k not in ("self", "return")} + + # Look for at least 2 parameters: original_request, response (ctx is optional) + if len(type_hint_params) >= 2: + param_items = list(type_hint_params.items()) + # First param should be original_request matching request_type + _, first_param_type = param_items[0] + _, second_param_type = param_items[1] if len(param_items) > 1 else (None, None) + + # Check if first param matches request_type + first_matches_request = first_param_type == request_type or ( + hasattr(first_param_type, "__name__") + and hasattr(request_type, "__name__") + and first_param_type.__name__ == request_type.__name__ + ) + + # Verify we have a matching request type and valid response type (must be a type class) + if first_matches_request and second_param_type is not None and isinstance(second_param_type, type): + response_type_class: type = second_param_type + logger.debug( + f"Found response type {response_type_class} for request {request_type} " + f"via @response_handler" + ) + return response_type_class + + except Exception as e: + logger.debug(f"Failed to get type hints for {attr_name}: {e}") + continue + + except Exception as e: + logger.debug(f"Failed to extract response type from executor: {e}") + + return None + + def generate_input_schema(input_type: type) -> dict[str, Any]: """Generate JSON schema for workflow input type. diff --git a/python/packages/devui/agent_framework_devui/models/__init__.py b/python/packages/devui/agent_framework_devui/models/__init__.py index 254bb4e4af..5dc3ba59b9 100644 --- a/python/packages/devui/agent_framework_devui/models/__init__.py +++ b/python/packages/devui/agent_framework_devui/models/__init__.py @@ -27,14 +27,18 @@ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails from openai.types.shared import Metadata, ResponsesModel -from ._discovery_models import DiscoveryResponse, EntityInfo +from ._discovery_models import Deployment, DeploymentConfig, DeploymentEvent, DiscoveryResponse, EntityInfo from ._openai_custom import ( AgentFrameworkRequest, CustomResponseOutputItemAddedEvent, CustomResponseOutputItemDoneEvent, ExecutorActionItem, + MetaResponse, OpenAIError, ResponseFunctionResultComplete, + ResponseOutputData, + ResponseOutputFile, + ResponseOutputImage, ResponseTraceEvent, ResponseTraceEventComplete, ResponseWorkflowEventComplete, @@ -51,10 +55,14 @@ "ConversationItem", "CustomResponseOutputItemAddedEvent", "CustomResponseOutputItemDoneEvent", + "Deployment", + "DeploymentConfig", + "DeploymentEvent", "DiscoveryResponse", "EntityInfo", "ExecutorActionItem", "InputTokensDetails", + "MetaResponse", "Metadata", "OpenAIError", "OpenAIResponse", @@ -67,6 +75,9 @@ "ResponseFunctionToolCall", "ResponseFunctionToolCallOutputItem", "ResponseInputParam", + "ResponseOutputData", + "ResponseOutputFile", + "ResponseOutputImage", "ResponseOutputItemAddedEvent", "ResponseOutputItemDoneEvent", "ResponseOutputMessage", diff --git a/python/packages/devui/agent_framework_devui/models/_discovery_models.py b/python/packages/devui/agent_framework_devui/models/_discovery_models.py index 690efa7f9f..cdb5d0619c 100644 --- a/python/packages/devui/agent_framework_devui/models/_discovery_models.py +++ b/python/packages/devui/agent_framework_devui/models/_discovery_models.py @@ -4,9 +4,10 @@ from __future__ import annotations +import re from typing import Any -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, field_validator class EnvVarRequirement(BaseModel): @@ -36,6 +37,10 @@ class EntityInfo(BaseModel): # Environment variable requirements required_env_vars: list[EnvVarRequirement] | None = None + # Deployment support + deployment_supported: bool = False # Whether entity can be deployed + deployment_reason: str | None = None # Explanation of why/why not entity can be deployed + # Agent-specific fields (optional, populated when available) instructions: str | None = None model_id: str | None = None @@ -55,3 +60,144 @@ class DiscoveryResponse(BaseModel): """Response model for entity discovery.""" entities: list[EntityInfo] = Field(default_factory=list) + + +# ============================================================================ +# Deployment Models +# ============================================================================ + + +class DeploymentConfig(BaseModel): + """Configuration for deploying an entity.""" + + entity_id: str = Field(description="Entity ID to deploy") + resource_group: str = Field(description="Azure resource group name") + app_name: str = Field(description="Azure Container App name") + region: str = Field(default="eastus", description="Azure region") + ui_mode: str = Field(default="user", description="UI mode (user or developer)") + ui_enabled: bool = Field(default=True, description="Whether to enable web interface") + stream: bool = Field(default=True, description="Stream deployment events") + + @field_validator("app_name") + @classmethod + def validate_app_name(cls, v: str) -> str: + """Validate Azure Container App name format. + + Azure Container App names must: + - Be 3-32 characters long + - Contain only lowercase letters, numbers, and hyphens + - Start with a lowercase letter + - End with a lowercase letter or number + - Not contain consecutive hyphens + """ + if not v: + raise ValueError("app_name cannot be empty") + + if len(v) < 3 or len(v) > 32: + raise ValueError("app_name must be between 3 and 32 characters") + + if not re.match(r"^[a-z][a-z0-9-]*[a-z0-9]$", v): + raise ValueError( + "app_name must start with a lowercase letter, " + "end with a letter or number, and contain only lowercase letters, numbers, and hyphens" + ) + + if "--" in v: + raise ValueError("app_name cannot contain consecutive hyphens") + + return v + + @field_validator("resource_group") + @classmethod + def validate_resource_group(cls, v: str) -> str: + """Validate Azure resource group name format. + + Azure resource group names must: + - Be 1-90 characters long + - Contain only alphanumeric, underscore, parentheses, hyphen, period (except at end) + - Not end with a period + """ + if not v: + raise ValueError("resource_group cannot be empty") + + if len(v) > 90: + raise ValueError("resource_group must be 90 characters or less") + + if not re.match(r"^[a-zA-Z0-9._()-]+$", v): + raise ValueError( + "resource_group can only contain alphanumeric characters, " + "underscores, hyphens, periods, and parentheses" + ) + + if v.endswith("."): + raise ValueError("resource_group cannot end with a period") + + return v + + @field_validator("region") + @classmethod + def validate_region(cls, v: str) -> str: + """Validate Azure region format. + + Validates that the region string is a reasonable format. + Does not validate against the full list of Azure regions (which changes). + """ + if not v: + raise ValueError("region cannot be empty") + + if len(v) > 50: + raise ValueError("region name too long") + + # Azure regions are typically lowercase with no spaces (e.g., eastus, westeurope) + if not re.match(r"^[a-z0-9]+$", v): + raise ValueError("region must contain only lowercase letters and numbers (e.g., eastus, westeurope)") + + return v + + @field_validator("entity_id") + @classmethod + def validate_entity_id(cls, v: str) -> str: + """Validate entity_id format to prevent injection attacks.""" + if not v: + raise ValueError("entity_id cannot be empty") + + if len(v) > 256: + raise ValueError("entity_id too long") + + # Allow alphanumeric, hyphens, underscores, and periods + if not re.match(r"^[a-zA-Z0-9._-]+$", v): + raise ValueError("entity_id contains invalid characters") + + return v + + @field_validator("ui_mode") + @classmethod + def validate_ui_mode(cls, v: str) -> str: + """Validate ui_mode is one of the allowed values.""" + if v not in ("user", "developer"): + raise ValueError("ui_mode must be 'user' or 'developer'") + + return v + + +class DeploymentEvent(BaseModel): + """Real-time deployment event (SSE).""" + + type: str = Field(description="Event type (e.g., deploy.validating, deploy.building)") + message: str = Field(description="Human-readable message") + url: str | None = Field(default=None, description="Deployment URL (on completion)") + auth_token: str | None = Field(default=None, description="Auth token (on completion, shown once)") + + +class Deployment(BaseModel): + """Deployment record.""" + + id: str = Field(description="Deployment ID (UUID)") + entity_id: str = Field(description="Entity ID that was deployed") + resource_group: str = Field(description="Azure resource group") + app_name: str = Field(description="Azure Container App name") + region: str = Field(description="Azure region") + url: str = Field(description="Deployment URL") + status: str = Field(description="Deployment status (deploying, deployed, failed)") + created_at: str = Field(description="ISO 8601 timestamp") + error: str | None = Field(default=None, description="Error message if failed") diff --git a/python/packages/devui/agent_framework_devui/models/_openai_custom.py b/python/packages/devui/agent_framework_devui/models/_openai_custom.py index d4506c7b4c..f82ef90b72 100644 --- a/python/packages/devui/agent_framework_devui/models/_openai_custom.py +++ b/python/packages/devui/agent_framework_devui/models/_openai_custom.py @@ -80,9 +80,16 @@ class CustomResponseOutputItemDoneEvent(BaseModel): class ResponseWorkflowEventComplete(BaseModel): - """Complete workflow event data.""" + """Complete workflow event data. - type: Literal["response.workflow_event.complete"] = "response.workflow_event.complete" + DevUI extension for workflow execution events (debugging/observability). + Uses past-tense 'completed' to follow OpenAI's event naming pattern. + + Workflow events are shown in the debug panel for monitoring execution flow, + not in main chat. Use response.output_item.added for user-facing content. + """ + + type: Literal["response.workflow_event.completed"] = "response.workflow_event.completed" data: dict[str, Any] # Complete event data, not delta executor_id: str | None = None item_id: str @@ -91,9 +98,17 @@ class ResponseWorkflowEventComplete(BaseModel): class ResponseTraceEventComplete(BaseModel): - """Complete trace event data.""" + """Complete trace event data. + + DevUI extension for non-displayable debugging/metadata events. + Uses past-tense 'completed' to follow OpenAI's event naming pattern + (e.g., response.completed, response.output_item.added). + + Trace events are shown in the Traces debug panel, not in main chat. + Use response.output_item.added for user-facing content. + """ - type: Literal["response.trace.complete"] = "response.trace.complete" + type: Literal["response.trace.completed"] = "response.trace.completed" data: dict[str, Any] # Complete trace data, not delta span_id: str | None = None item_id: str @@ -124,6 +139,139 @@ class ResponseFunctionResultComplete(BaseModel): timestamp: str | None = None # Optional timestamp for UI display +class ResponseRequestInfoEvent(BaseModel): + """DevUI extension: Workflow requests human input. + + This is a DevUI extension because: + - OpenAI Responses API doesn't have a concept of workflow human-in-the-loop pausing + - Agent Framework workflows can pause via RequestInfoExecutor to collect external information + - Clients need to render forms and submit responses to continue workflow execution + + When a workflow emits this event, it enters IDLE_WITH_PENDING_REQUESTS state. + Client should render a form based on request_schema and submit responses via + a new request with workflow_hil_response content type. + """ + + type: Literal["response.request_info.requested"] = "response.request_info.requested" + request_id: str + """Unique identifier for correlating this request with the response.""" + + source_executor_id: str + """ID of the executor that is waiting for this response.""" + + request_type: str + """Fully qualified type name of the request (e.g., 'module.path:ClassName').""" + + request_data: dict[str, Any] + """Current data from the RequestInfoMessage (may contain defaults/context).""" + + request_schema: dict[str, Any] + """JSON schema describing the request data structure (what the workflow is asking about).""" + + response_schema: dict[str, Any] | None = None + """JSON schema describing the expected response structure for form rendering (what user should provide).""" + + item_id: str + """OpenAI item ID for correlation.""" + + output_index: int = 0 + """Output index for OpenAI compatibility.""" + + sequence_number: int + """Sequence number for ordering events.""" + + timestamp: str + """ISO timestamp when the request was made.""" + + +# DevUI Output Content Types - for agent-generated media/data +# These extend ResponseOutputItem to support rich content outputs that OpenAI's API doesn't natively support + + +class ResponseOutputImage(BaseModel): + """DevUI extension: Agent-generated image output. + + This is a DevUI extension because: + - OpenAI Responses API only supports text output in ResponseOutputMessage.content + - ImageGenerationCall exists but is for tool calls (generating images), not returning existing images + - Agent Framework agents can return images via DataContent/UriContent that need proper display + + This type allows images to be displayed inline in chat rather than hidden in trace logs. + """ + + id: str + """The unique ID of the image output.""" + + image_url: str + """The URL or data URI of the image (e.g., data:image/png;base64,...)""" + + type: Literal["output_image"] = "output_image" + """The type of the output. Always `output_image`.""" + + alt_text: str | None = None + """Optional alt text for accessibility.""" + + mime_type: str = "image/png" + """The MIME type of the image (e.g., image/png, image/jpeg).""" + + +class ResponseOutputFile(BaseModel): + """DevUI extension: Agent-generated file output. + + This is a DevUI extension because: + - OpenAI Responses API only supports text output in ResponseOutputMessage.content + - Agent Framework agents can return files via DataContent/UriContent that need proper display + - Supports PDFs, audio files, and other media types + + This type allows files to be displayed inline in chat with appropriate renderers. + """ + + id: str + """The unique ID of the file output.""" + + filename: str + """The filename (used to determine rendering and download).""" + + type: Literal["output_file"] = "output_file" + """The type of the output. Always `output_file`.""" + + file_url: str | None = None + """Optional URL to the file.""" + + file_data: str | None = None + """Optional base64-encoded file data.""" + + mime_type: str = "application/octet-stream" + """The MIME type of the file (e.g., application/pdf, audio/mp3).""" + + +class ResponseOutputData(BaseModel): + """DevUI extension: Agent-generated generic data output. + + This is a DevUI extension because: + - OpenAI Responses API only supports text output in ResponseOutputMessage.content + - Agent Framework agents can return arbitrary structured data that needs display + - Useful for debugging and displaying non-text content + + This type allows generic data to be displayed inline in chat. + """ + + id: str + """The unique ID of the data output.""" + + data: str + """The data payload (string representation).""" + + type: Literal["output_data"] = "output_data" + """The type of the output. Always `output_data`.""" + + mime_type: str + """The MIME type of the data.""" + + description: str | None = None + """Optional description of the data.""" + + # Agent Framework extension fields class AgentFrameworkExtraBody(BaseModel): """Agent Framework specific routing fields for OpenAI requests.""" @@ -144,7 +292,7 @@ class AgentFrameworkRequest(BaseModel): """ # All OpenAI fields from ResponseCreateParams - model: str # Used as entity_id in DevUI! + model: str | None = None input: str | list[Any] | dict[str, Any] # ResponseInputParam + dict for workflow structured input stream: bool | None = False @@ -156,20 +304,25 @@ class AgentFrameworkRequest(BaseModel): metadata: dict[str, Any] | None = None temperature: float | None = None max_output_tokens: int | None = None + top_p: float | None = None tools: list[dict[str, Any]] | None = None + # Reasoning parameters (for o-series models) + reasoning: dict[str, Any] | None = None # {"effort": "low" | "medium" | "high" | "minimal"} + # Optional extra_body for advanced use cases extra_body: dict[str, Any] | None = None model_config = ConfigDict(extra="allow") - def get_entity_id(self) -> str: - """Get entity_id from model field. + def get_entity_id(self) -> str | None: + """Get entity_id from metadata.entity_id. - In DevUI, model IS the entity_id (agent/workflow name). - Simple and clean! + In DevUI, entity_id is specified in metadata for routing. """ - return self.model + if self.metadata: + return self.metadata.get("entity_id") + return None def get_conversation_id(self) -> str | None: """Extract conversation_id from conversation parameter. @@ -218,11 +371,40 @@ def to_json(self) -> str: return self.model_dump_json() +class MetaResponse(BaseModel): + """Server metadata response for /meta endpoint. + + Provides information about the DevUI server configuration and capabilities. + """ + + ui_mode: Literal["developer", "user"] = "developer" + """UI interface mode - 'developer' shows debug tools, 'user' shows simplified interface.""" + + version: str + """DevUI version string.""" + + framework: str = "agent_framework" + """Backend framework identifier.""" + + runtime: Literal["python", "dotnet"] = "python" + """Backend runtime/language - 'python' or 'dotnet' for deployment guides and feature availability.""" + + capabilities: dict[str, bool] = {} + """Server capabilities (e.g., tracing, openai_proxy).""" + + auth_required: bool = False + """Whether the server requires Bearer token authentication.""" + + # Export all custom types __all__ = [ "AgentFrameworkRequest", + "MetaResponse", "OpenAIError", "ResponseFunctionResultComplete", + "ResponseOutputData", + "ResponseOutputFile", + "ResponseOutputImage", "ResponseTraceEvent", "ResponseTraceEventComplete", "ResponseWorkflowEventComplete", diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.css b/python/packages/devui/agent_framework_devui/ui/assets/index.css index df5d66375c..d44bb61519 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.css +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.css @@ -1 +1 @@ -/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-300:oklch(84.5% .143 164.978);--color-emerald-400:oklch(76.5% .177 163.223);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-emerald-900:oklch(37.8% .077 168.94);--color-emerald-950:oklch(26.2% .051 172.552);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-3xl:48rem;--container-4xl:56rem;--container-5xl:64rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-auto{pointer-events:auto}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-x-0{inset-inline:calc(var(--spacing)*0)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.-top-1{top:calc(var(--spacing)*-1)}.-top-2{top:calc(var(--spacing)*-2)}.top-1{top:calc(var(--spacing)*1)}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.-right-1{right:calc(var(--spacing)*-1)}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-1{right:calc(var(--spacing)*1)}.right-2{right:calc(var(--spacing)*2)}.right-3{right:calc(var(--spacing)*3)}.right-4{right:calc(var(--spacing)*4)}.-bottom-2{bottom:calc(var(--spacing)*-2)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-3{bottom:calc(var(--spacing)*3)}.bottom-14{bottom:calc(var(--spacing)*14)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.container\!{width:100%!important}@media (min-width:40rem){.container\!{max-width:40rem!important}}@media (min-width:48rem){.container\!{max-width:48rem!important}}@media (min-width:64rem){.container\!{max-width:64rem!important}}@media (min-width:80rem){.container\!{max-width:80rem!important}}@media (min-width:96rem){.container\!{max-width:96rem!important}}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.my-3{margin-block:calc(var(--spacing)*3)}.my-4{margin-block:calc(var(--spacing)*4)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-1\.5{margin-right:calc(var(--spacing)*1.5)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-1\.5{margin-left:calc(var(--spacing)*1.5)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-auto{margin-left:auto}.line-clamp-2{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.table{display:table}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-96{height:calc(var(--spacing)*96)}.h-\[1\.2rem\]{height:1.2rem}.h-\[500px\]{height:500px}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-20{max-height:calc(var(--spacing)*20)}.max-h-24{max-height:calc(var(--spacing)*24)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[240px\]{min-height:240px}.min-h-\[400px\]{min-height:400px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-\[1\.2rem\]{width:1.2rem}.w-\[600px\]{width:600px}.w-\[800px\]{width:800px}.w-fit{width:fit-content}.w-full{width:100%}.max-w-2xl{max-width:var(--container-2xl)}.max-w-3xl{max-width:var(--container-3xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.max-w-none{max-width:none}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[8rem\]{min-width:8rem}.min-w-\[300px\]{min-width:300px}.min-w-\[400px\]{min-width:400px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-full{min-width:100%}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.cursor-row-resize{cursor:row-resize}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-inside{list-style-position:inside}.list-decimal{list-style-type:decimal}.list-disc{list-style-type:disc}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-t{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-l-4{border-left-style:var(--tw-border-style);border-left-width:4px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-\[\#643FB2\]\/40{border-color:#643fb266}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500\/30{border-color:#3080ff4d}@supports (color:color-mix(in lab,red,red)){.border-blue-500\/30{border-color:color-mix(in oklab,var(--color-blue-500)30%,transparent)}}.border-blue-500\/40{border-color:#3080ff66}@supports (color:color-mix(in lab,red,red)){.border-blue-500\/40{border-color:color-mix(in oklab,var(--color-blue-500)40%,transparent)}}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-current\/30{border-color:currentColor}@supports (color:color-mix(in lab,red,red)){.border-current\/30{border-color:color-mix(in oklab,currentcolor 30%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-destructive\/50{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/50{border-color:color-mix(in oklab,var(--destructive)50%,transparent)}}.border-destructive\/70{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/70{border-color:color-mix(in oklab,var(--destructive)70%,transparent)}}.border-emerald-300{border-color:var(--color-emerald-300)}.border-foreground\/5{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/5{border-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.border-foreground\/10{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/10{border-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.border-foreground\/20{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/20{border-color:color-mix(in oklab,var(--foreground)20%,transparent)}}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/30{border-color:#00c7584d}@supports (color:color-mix(in lab,red,red)){.border-green-500\/30{border-color:color-mix(in oklab,var(--color-green-500)30%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-input{border-color:var(--input)}.border-muted{border-color:var(--muted)}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-500{border-color:var(--color-orange-500)}.border-primary,.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/5{background-color:#643fb20d}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-background{background-color:var(--background)}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-500\/10{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/10{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/5{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/5{background-color:color-mix(in oklab,var(--destructive)5%,transparent)}}.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-destructive\/80{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/80{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.bg-emerald-50{background-color:var(--color-emerald-50)}.bg-emerald-100{background-color:var(--color-emerald-100)}.bg-foreground\/5{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/5{background-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.bg-foreground\/10{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted,.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-500{background-color:var(--color-orange-500)}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-1\.5{padding:calc(var(--spacing)*1.5)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-4{padding-right:calc(var(--spacing)*4)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pb-12{padding-bottom:calc(var(--spacing)*12)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-4{padding-left:calc(var(--spacing)*4)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive{color:var(--destructive)}.text-emerald-600{color:var(--color-emerald-600)}.text-emerald-700{color:var(--color-emerald-700)}.text-emerald-800{color:var(--color-emerald-800)}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-muted-foreground,.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-800{color:var(--color-orange-800)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.capitalize{text-transform:capitalize}.uppercase{text-transform:uppercase}.italic{font-style:italic}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}.first\:mt-0:first-child{margin-top:calc(var(--spacing)*0)}.last\:border-r-0:last-child{border-right-style:var(--tw-border-style);border-right-width:0}.last\:border-b-0:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}@media (hover:hover){.hover\:bg-\[\#643FB2\]\/10:hover{background-color:#643fb21a}.hover\:bg-accent:hover{background-color:var(--accent)}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-blue-500\/10:hover{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.hover\:bg-blue-500\/10:hover{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.hover\:bg-destructive\/20:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/20:hover{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-green-500\/10:hover{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.hover\:bg-green-500\/10:hover{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/30:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/30:hover{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:col-span-2{grid-column:span 2/span 2}.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:gap-8{gap:calc(var(--spacing)*8)}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-\[\#8B5CF6\]\/40:is(.dark *){border-color:#8b5cf666}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-500\/30:is(.dark *){border-color:#3080ff4d}@supports (color:color-mix(in lab,red,red)){.dark\:border-blue-500\/30:is(.dark *){border-color:color-mix(in oklab,var(--color-blue-500)30%,transparent)}}.dark\:border-blue-500\/40:is(.dark *){border-color:#3080ff66}@supports (color:color-mix(in lab,red,red)){.dark\:border-blue-500\/40:is(.dark *){border-color:color-mix(in oklab,var(--color-blue-500)40%,transparent)}}.dark\:border-blue-600:is(.dark *){border-color:var(--color-blue-600)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-emerald-600:is(.dark *){border-color:var(--color-emerald-600)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-400\/30:is(.dark *){border-color:#05df724d}@supports (color:color-mix(in lab,red,red)){.dark\:border-green-400\/30:is(.dark *){border-color:color-mix(in oklab,var(--color-green-400)30%,transparent)}}.dark\:border-green-400\/40:is(.dark *){border-color:#05df7266}@supports (color:color-mix(in lab,red,red)){.dark\:border-green-400\/40:is(.dark *){border-color:color-mix(in oklab,var(--color-green-400)40%,transparent)}}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/5:is(.dark *){background-color:#8b5cf60d}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-amber-950\/50:is(.dark *){background-color:#46190180}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)50%,transparent)}}.dark\:bg-background:is(.dark *){background-color:var(--background)}.dark\:bg-blue-500\/5:is(.dark *){background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/5:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/50:is(.dark *){background-color:#1c398e80}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)50%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/20:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/20:is(.dark *){background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-emerald-900\/50:is(.dark *){background-color:#004e3b80}@supports (color:color-mix(in lab,red,red)){.dark\:bg-emerald-900\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-emerald-900)50%,transparent)}}.dark\:bg-emerald-950\/50:is(.dark *){background-color:#002c2280}@supports (color:color-mix(in lab,red,red)){.dark\:bg-emerald-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-emerald-950)50%,transparent)}}.dark\:bg-foreground\/10:is(.dark *){background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-foreground\/10:is(.dark *){background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-400\/5:is(.dark *){background-color:#05df720d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-400\/5:is(.dark *){background-color:color-mix(in oklab,var(--color-green-400)5%,transparent)}}.dark\:bg-green-400\/10:is(.dark *){background-color:#05df721a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-400\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-400)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-emerald-200:is(.dark *){color:var(--color-emerald-200)}.dark\:text-emerald-300:is(.dark *){color:var(--color-emerald-300)}.dark\:text-emerald-400:is(.dark *){color:var(--color-emerald-400)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:bg-\[\#8B5CF6\]\/10:is(.dark *):hover{background-color:#8b5cf61a}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-blue-500\/10:is(.dark *):hover{background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-blue-500\/10:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:hover\:bg-destructive\/30:is(.dark *):hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-destructive\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-green-400\/10:is(.dark *):hover{background-color:#05df721a}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-green-400\/10:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-green-400)10%,transparent)}}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} +/*! tailwindcss v4.1.12 | MIT License | https://tailwindcss.com */@layer properties{@supports (((-webkit-hyphens:none)) and (not (margin-trim:inline))) or ((-moz-orient:inline) and (not (color:rgb(from red r g b)))){*,:before,:after,::backdrop{--tw-translate-x:0;--tw-translate-y:0;--tw-translate-z:0;--tw-scale-x:1;--tw-scale-y:1;--tw-scale-z:1;--tw-rotate-x:initial;--tw-rotate-y:initial;--tw-rotate-z:initial;--tw-skew-x:initial;--tw-skew-y:initial;--tw-space-y-reverse:0;--tw-space-x-reverse:0;--tw-border-style:solid;--tw-leading:initial;--tw-font-weight:initial;--tw-tracking:initial;--tw-shadow:0 0 #0000;--tw-shadow-color:initial;--tw-shadow-alpha:100%;--tw-inset-shadow:0 0 #0000;--tw-inset-shadow-color:initial;--tw-inset-shadow-alpha:100%;--tw-ring-color:initial;--tw-ring-shadow:0 0 #0000;--tw-inset-ring-color:initial;--tw-inset-ring-shadow:0 0 #0000;--tw-ring-inset:initial;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-offset-shadow:0 0 #0000;--tw-outline-style:solid;--tw-blur:initial;--tw-brightness:initial;--tw-contrast:initial;--tw-grayscale:initial;--tw-hue-rotate:initial;--tw-invert:initial;--tw-opacity:initial;--tw-saturate:initial;--tw-sepia:initial;--tw-drop-shadow:initial;--tw-drop-shadow-color:initial;--tw-drop-shadow-alpha:100%;--tw-drop-shadow-size:initial;--tw-backdrop-blur:initial;--tw-backdrop-brightness:initial;--tw-backdrop-contrast:initial;--tw-backdrop-grayscale:initial;--tw-backdrop-hue-rotate:initial;--tw-backdrop-invert:initial;--tw-backdrop-opacity:initial;--tw-backdrop-saturate:initial;--tw-backdrop-sepia:initial;--tw-duration:initial;--tw-ease:initial;--tw-animation-delay:0s;--tw-animation-direction:normal;--tw-animation-duration:initial;--tw-animation-fill-mode:none;--tw-animation-iteration-count:1;--tw-enter-blur:0;--tw-enter-opacity:1;--tw-enter-rotate:0;--tw-enter-scale:1;--tw-enter-translate-x:0;--tw-enter-translate-y:0;--tw-exit-blur:0;--tw-exit-opacity:1;--tw-exit-rotate:0;--tw-exit-scale:1;--tw-exit-translate-x:0;--tw-exit-translate-y:0}}}@layer theme{:root,:host{--font-sans:ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-mono:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;--color-red-50:oklch(97.1% .013 17.38);--color-red-100:oklch(93.6% .032 17.717);--color-red-200:oklch(88.5% .062 18.334);--color-red-300:oklch(80.8% .114 19.571);--color-red-400:oklch(70.4% .191 22.216);--color-red-500:oklch(63.7% .237 25.331);--color-red-600:oklch(57.7% .245 27.325);--color-red-700:oklch(50.5% .213 27.518);--color-red-800:oklch(44.4% .177 26.899);--color-red-900:oklch(39.6% .141 25.723);--color-red-950:oklch(25.8% .092 26.042);--color-orange-50:oklch(98% .016 73.684);--color-orange-100:oklch(95.4% .038 75.164);--color-orange-200:oklch(90.1% .076 70.697);--color-orange-400:oklch(75% .183 55.934);--color-orange-500:oklch(70.5% .213 47.604);--color-orange-600:oklch(64.6% .222 41.116);--color-orange-800:oklch(47% .157 37.304);--color-orange-900:oklch(40.8% .123 38.172);--color-orange-950:oklch(26.6% .079 36.259);--color-amber-50:oklch(98.7% .022 95.277);--color-amber-100:oklch(96.2% .059 95.617);--color-amber-200:oklch(92.4% .12 95.746);--color-amber-300:oklch(87.9% .169 91.605);--color-amber-400:oklch(82.8% .189 84.429);--color-amber-500:oklch(76.9% .188 70.08);--color-amber-600:oklch(66.6% .179 58.318);--color-amber-700:oklch(55.5% .163 48.998);--color-amber-800:oklch(47.3% .137 46.201);--color-amber-900:oklch(41.4% .112 45.904);--color-amber-950:oklch(27.9% .077 45.635);--color-yellow-100:oklch(97.3% .071 103.193);--color-yellow-200:oklch(94.5% .129 101.54);--color-yellow-400:oklch(85.2% .199 91.936);--color-yellow-600:oklch(68.1% .162 75.834);--color-yellow-700:oklch(55.4% .135 66.442);--color-green-50:oklch(98.2% .018 155.826);--color-green-100:oklch(96.2% .044 156.743);--color-green-200:oklch(92.5% .084 155.995);--color-green-300:oklch(87.1% .15 154.449);--color-green-400:oklch(79.2% .209 151.711);--color-green-500:oklch(72.3% .219 149.579);--color-green-600:oklch(62.7% .194 149.214);--color-green-700:oklch(52.7% .154 150.069);--color-green-800:oklch(44.8% .119 151.328);--color-green-900:oklch(39.3% .095 152.535);--color-green-950:oklch(26.6% .065 152.934);--color-emerald-50:oklch(97.9% .021 166.113);--color-emerald-100:oklch(95% .052 163.051);--color-emerald-200:oklch(90.5% .093 164.15);--color-emerald-600:oklch(59.6% .145 163.225);--color-emerald-700:oklch(50.8% .118 165.612);--color-emerald-800:oklch(43.2% .095 166.913);--color-blue-50:oklch(97% .014 254.604);--color-blue-100:oklch(93.2% .032 255.585);--color-blue-200:oklch(88.2% .059 254.128);--color-blue-300:oklch(80.9% .105 251.813);--color-blue-400:oklch(70.7% .165 254.624);--color-blue-500:oklch(62.3% .214 259.815);--color-blue-600:oklch(54.6% .245 262.881);--color-blue-700:oklch(48.8% .243 264.376);--color-blue-800:oklch(42.4% .199 265.638);--color-blue-900:oklch(37.9% .146 265.522);--color-blue-950:oklch(28.2% .091 267.935);--color-purple-50:oklch(97.7% .014 308.299);--color-purple-100:oklch(94.6% .033 307.174);--color-purple-400:oklch(71.4% .203 305.504);--color-purple-500:oklch(62.7% .265 303.9);--color-purple-600:oklch(55.8% .288 302.321);--color-purple-900:oklch(38.1% .176 304.987);--color-gray-50:oklch(98.5% .002 247.839);--color-gray-100:oklch(96.7% .003 264.542);--color-gray-200:oklch(92.8% .006 264.531);--color-gray-300:oklch(87.2% .01 258.338);--color-gray-400:oklch(70.7% .022 261.325);--color-gray-500:oklch(55.1% .027 264.364);--color-gray-600:oklch(44.6% .03 256.802);--color-gray-700:oklch(37.3% .034 259.733);--color-gray-800:oklch(27.8% .033 256.848);--color-gray-900:oklch(21% .034 264.665);--color-black:#000;--color-white:#fff;--spacing:.25rem;--container-md:28rem;--container-lg:32rem;--container-2xl:42rem;--container-3xl:48rem;--container-4xl:56rem;--container-5xl:64rem;--container-7xl:80rem;--text-xs:.75rem;--text-xs--line-height:calc(1/.75);--text-sm:.875rem;--text-sm--line-height:calc(1.25/.875);--text-base:1rem;--text-base--line-height: 1.5 ;--text-lg:1.125rem;--text-lg--line-height:calc(1.75/1.125);--text-xl:1.25rem;--text-xl--line-height:calc(1.75/1.25);--text-2xl:1.5rem;--text-2xl--line-height:calc(2/1.5);--font-weight-medium:500;--font-weight-semibold:600;--font-weight-bold:700;--tracking-tight:-.025em;--tracking-widest:.1em;--leading-tight:1.25;--leading-relaxed:1.625;--drop-shadow-lg:0 4px 4px #00000026;--ease-out:cubic-bezier(0,0,.2,1);--ease-in-out:cubic-bezier(.4,0,.2,1);--animate-spin:spin 1s linear infinite;--animate-pulse:pulse 2s cubic-bezier(.4,0,.6,1)infinite;--animate-bounce:bounce 1s infinite;--blur-sm:8px;--default-transition-duration:.15s;--default-transition-timing-function:cubic-bezier(.4,0,.2,1);--default-font-family:var(--font-sans);--default-mono-font-family:var(--font-mono)}}@layer base{*,:after,:before,::backdrop{box-sizing:border-box;border:0 solid;margin:0;padding:0}::file-selector-button{box-sizing:border-box;border:0 solid;margin:0;padding:0}html,:host{-webkit-text-size-adjust:100%;tab-size:4;line-height:1.5;font-family:var(--default-font-family,ui-sans-serif,system-ui,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji");font-feature-settings:var(--default-font-feature-settings,normal);font-variation-settings:var(--default-font-variation-settings,normal);-webkit-tap-highlight-color:transparent}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;-webkit-text-decoration:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:var(--default-mono-font-family,ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace);font-feature-settings:var(--default-mono-font-feature-settings,normal);font-variation-settings:var(--default-mono-font-variation-settings,normal);font-size:1em}small{font-size:80%}sub,sup{vertical-align:baseline;font-size:75%;line-height:0;position:relative}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}:-moz-focusring{outline:auto}progress{vertical-align:baseline}summary{display:list-item}ol,ul,menu{list-style:none}img,svg,video,canvas,audio,iframe,embed,object{vertical-align:middle;display:block}img,video{max-width:100%;height:auto}button,input,select,optgroup,textarea{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}::file-selector-button{font:inherit;font-feature-settings:inherit;font-variation-settings:inherit;letter-spacing:inherit;color:inherit;opacity:1;background-color:#0000;border-radius:0}:where(select:is([multiple],[size])) optgroup{font-weight:bolder}:where(select:is([multiple],[size])) optgroup option{padding-inline-start:20px}::file-selector-button{margin-inline-end:4px}::placeholder{opacity:1}@supports (not ((-webkit-appearance:-apple-pay-button))) or (contain-intrinsic-size:1px){::placeholder{color:currentColor}@supports (color:color-mix(in lab,red,red)){::placeholder{color:color-mix(in oklab,currentcolor 50%,transparent)}}}textarea{resize:vertical}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-date-and-time-value{min-height:1lh;text-align:inherit}::-webkit-datetime-edit{display:inline-flex}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-datetime-edit{padding-block:0}::-webkit-datetime-edit-year-field{padding-block:0}::-webkit-datetime-edit-month-field{padding-block:0}::-webkit-datetime-edit-day-field{padding-block:0}::-webkit-datetime-edit-hour-field{padding-block:0}::-webkit-datetime-edit-minute-field{padding-block:0}::-webkit-datetime-edit-second-field{padding-block:0}::-webkit-datetime-edit-millisecond-field{padding-block:0}::-webkit-datetime-edit-meridiem-field{padding-block:0}::-webkit-calendar-picker-indicator{line-height:1}:-moz-ui-invalid{box-shadow:none}button,input:where([type=button],[type=reset],[type=submit]){appearance:button}::file-selector-button{appearance:button}::-webkit-inner-spin-button{height:auto}::-webkit-outer-spin-button{height:auto}[hidden]:where(:not([hidden=until-found])){display:none!important}*{border-color:var(--border);outline-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){*{outline-color:color-mix(in oklab,var(--ring)50%,transparent)}}body{background-color:var(--background);color:var(--foreground)}}@layer components;@layer utilities{.\@container\/card-header{container:card-header/inline-size}.pointer-events-none{pointer-events:none}.collapse{visibility:collapse}.visible{visibility:visible}.sr-only{clip:rect(0,0,0,0);white-space:nowrap;border-width:0;width:1px;height:1px;margin:-1px;padding:0;position:absolute;overflow:hidden}.absolute{position:absolute}.fixed{position:fixed}.relative{position:relative}.static{position:static}.inset-0{inset:calc(var(--spacing)*0)}.inset-2{inset:calc(var(--spacing)*2)}.inset-y-0{inset-block:calc(var(--spacing)*0)}.top-1{top:calc(var(--spacing)*1)}.top-2{top:calc(var(--spacing)*2)}.top-4{top:calc(var(--spacing)*4)}.-right-2{right:calc(var(--spacing)*-2)}.right-0{right:calc(var(--spacing)*0)}.right-1{right:calc(var(--spacing)*1)}.right-2{right:calc(var(--spacing)*2)}.right-4{right:calc(var(--spacing)*4)}.bottom-0{bottom:calc(var(--spacing)*0)}.bottom-24{bottom:calc(var(--spacing)*24)}.-left-2{left:calc(var(--spacing)*-2)}.left-0{left:calc(var(--spacing)*0)}.left-1\/2{left:50%}.left-2{left:calc(var(--spacing)*2)}.z-10{z-index:10}.z-20{z-index:20}.z-50{z-index:50}.col-start-2{grid-column-start:2}.row-span-2{grid-row:span 2/span 2}.row-start-1{grid-row-start:1}.container{width:100%}@media (min-width:40rem){.container{max-width:40rem}}@media (min-width:48rem){.container{max-width:48rem}}@media (min-width:64rem){.container{max-width:64rem}}@media (min-width:80rem){.container{max-width:80rem}}@media (min-width:96rem){.container{max-width:96rem}}.container\!{width:100%!important}@media (min-width:40rem){.container\!{max-width:40rem!important}}@media (min-width:48rem){.container\!{max-width:48rem!important}}@media (min-width:64rem){.container\!{max-width:64rem!important}}@media (min-width:80rem){.container\!{max-width:80rem!important}}@media (min-width:96rem){.container\!{max-width:96rem!important}}.-mx-1{margin-inline:calc(var(--spacing)*-1)}.mx-4{margin-inline:calc(var(--spacing)*4)}.mx-auto{margin-inline:auto}.my-1{margin-block:calc(var(--spacing)*1)}.my-2{margin-block:calc(var(--spacing)*2)}.my-3{margin-block:calc(var(--spacing)*3)}.my-4{margin-block:calc(var(--spacing)*4)}.mt-0{margin-top:calc(var(--spacing)*0)}.mt-0\.5{margin-top:calc(var(--spacing)*.5)}.mt-1{margin-top:calc(var(--spacing)*1)}.mt-2{margin-top:calc(var(--spacing)*2)}.mt-3{margin-top:calc(var(--spacing)*3)}.mt-4{margin-top:calc(var(--spacing)*4)}.mt-12{margin-top:calc(var(--spacing)*12)}.mr-1{margin-right:calc(var(--spacing)*1)}.mr-2{margin-right:calc(var(--spacing)*2)}.mb-1{margin-bottom:calc(var(--spacing)*1)}.mb-2{margin-bottom:calc(var(--spacing)*2)}.mb-3{margin-bottom:calc(var(--spacing)*3)}.mb-4{margin-bottom:calc(var(--spacing)*4)}.mb-6{margin-bottom:calc(var(--spacing)*6)}.mb-8{margin-bottom:calc(var(--spacing)*8)}.ml-1{margin-left:calc(var(--spacing)*1)}.ml-1\.5{margin-left:calc(var(--spacing)*1.5)}.ml-2{margin-left:calc(var(--spacing)*2)}.ml-3{margin-left:calc(var(--spacing)*3)}.ml-4{margin-left:calc(var(--spacing)*4)}.ml-5{margin-left:calc(var(--spacing)*5)}.ml-auto{margin-left:auto}.line-clamp-2{-webkit-line-clamp:2;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.line-clamp-3{-webkit-line-clamp:3;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}.block{display:block}.contents{display:contents}.flex{display:flex}.grid{display:grid}.hidden{display:none}.inline{display:inline}.inline-block{display:inline-block}.inline-flex{display:inline-flex}.table{display:table}.field-sizing-content{field-sizing:content}.size-2{width:calc(var(--spacing)*2);height:calc(var(--spacing)*2)}.size-3\.5{width:calc(var(--spacing)*3.5);height:calc(var(--spacing)*3.5)}.size-4{width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.size-9{width:calc(var(--spacing)*9);height:calc(var(--spacing)*9)}.\!h-2{height:calc(var(--spacing)*2)!important}.h-0{height:calc(var(--spacing)*0)}.h-0\.5{height:calc(var(--spacing)*.5)}.h-1{height:calc(var(--spacing)*1)}.h-2{height:calc(var(--spacing)*2)}.h-2\.5{height:calc(var(--spacing)*2.5)}.h-3{height:calc(var(--spacing)*3)}.h-3\.5{height:calc(var(--spacing)*3.5)}.h-4{height:calc(var(--spacing)*4)}.h-5{height:calc(var(--spacing)*5)}.h-6{height:calc(var(--spacing)*6)}.h-7{height:calc(var(--spacing)*7)}.h-8{height:calc(var(--spacing)*8)}.h-9{height:calc(var(--spacing)*9)}.h-10{height:calc(var(--spacing)*10)}.h-12{height:calc(var(--spacing)*12)}.h-14{height:calc(var(--spacing)*14)}.h-16{height:calc(var(--spacing)*16)}.h-32{height:calc(var(--spacing)*32)}.h-96{height:calc(var(--spacing)*96)}.h-\[1\.2rem\]{height:1.2rem}.h-\[1px\]{height:1px}.h-\[500px\]{height:500px}.h-\[calc\(100vh-3\.5rem\)\]{height:calc(100vh - 3.5rem)}.h-\[calc\(100vh-3\.7rem\)\]{height:calc(100vh - 3.7rem)}.h-\[var\(--radix-select-trigger-height\)\]{height:var(--radix-select-trigger-height)}.h-full{height:100%}.h-px{height:1px}.h-screen{height:100vh}.max-h-\(--radix-dropdown-menu-content-available-height\){max-height:var(--radix-dropdown-menu-content-available-height)}.max-h-\(--radix-select-content-available-height\){max-height:var(--radix-select-content-available-height)}.max-h-20{max-height:calc(var(--spacing)*20)}.max-h-32{max-height:calc(var(--spacing)*32)}.max-h-40{max-height:calc(var(--spacing)*40)}.max-h-48{max-height:calc(var(--spacing)*48)}.max-h-60{max-height:calc(var(--spacing)*60)}.max-h-64{max-height:calc(var(--spacing)*64)}.max-h-\[80vh\]{max-height:80vh}.max-h-\[85vh\]{max-height:85vh}.max-h-\[90vh\]{max-height:90vh}.max-h-\[200px\]{max-height:200px}.max-h-none{max-height:none}.max-h-screen{max-height:100vh}.\!min-h-0{min-height:calc(var(--spacing)*0)!important}.min-h-0{min-height:calc(var(--spacing)*0)}.min-h-16{min-height:calc(var(--spacing)*16)}.min-h-\[36px\]{min-height:36px}.min-h-\[40px\]{min-height:40px}.min-h-\[50vh\]{min-height:50vh}.min-h-\[400px\]{min-height:400px}.min-h-screen{min-height:100vh}.\!w-2{width:calc(var(--spacing)*2)!important}.w-1{width:calc(var(--spacing)*1)}.w-2{width:calc(var(--spacing)*2)}.w-2\.5{width:calc(var(--spacing)*2.5)}.w-3{width:calc(var(--spacing)*3)}.w-3\.5{width:calc(var(--spacing)*3.5)}.w-4{width:calc(var(--spacing)*4)}.w-5{width:calc(var(--spacing)*5)}.w-6{width:calc(var(--spacing)*6)}.w-8{width:calc(var(--spacing)*8)}.w-9{width:calc(var(--spacing)*9)}.w-10{width:calc(var(--spacing)*10)}.w-12{width:calc(var(--spacing)*12)}.w-16{width:calc(var(--spacing)*16)}.w-56{width:calc(var(--spacing)*56)}.w-64{width:calc(var(--spacing)*64)}.w-80{width:calc(var(--spacing)*80)}.w-96{width:calc(var(--spacing)*96)}.w-\[1\.2rem\]{width:1.2rem}.w-\[1px\]{width:1px}.w-\[200px\]{width:200px}.w-\[600px\]{width:600px}.w-\[800px\]{width:800px}.w-fit{width:fit-content}.w-full{width:100%}.max-w-2xl{max-width:var(--container-2xl)}.max-w-3xl{max-width:var(--container-3xl)}.max-w-4xl{max-width:var(--container-4xl)}.max-w-7xl{max-width:var(--container-7xl)}.max-w-\[80\%\]{max-width:80%}.max-w-\[90vw\]{max-width:90vw}.max-w-full{max-width:100%}.max-w-lg{max-width:var(--container-lg)}.max-w-md{max-width:var(--container-md)}.max-w-none{max-width:none}.\!min-w-0{min-width:calc(var(--spacing)*0)!important}.min-w-0{min-width:calc(var(--spacing)*0)}.min-w-\[8rem\]{min-width:8rem}.min-w-\[300px\]{min-width:300px}.min-w-\[400px\]{min-width:400px}.min-w-\[var\(--radix-select-trigger-width\)\]{min-width:var(--radix-select-trigger-width)}.min-w-full{min-width:100%}.flex-1{flex:1}.flex-shrink-0,.shrink-0{flex-shrink:0}.origin-\(--radix-dropdown-menu-content-transform-origin\){transform-origin:var(--radix-dropdown-menu-content-transform-origin)}.origin-\(--radix-select-content-transform-origin\){transform-origin:var(--radix-select-content-transform-origin)}.-translate-x-1\/2{--tw-translate-x: -50% ;translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-0{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.translate-x-4{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.scale-0{--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-75{--tw-scale-x:75%;--tw-scale-y:75%;--tw-scale-z:75%;scale:var(--tw-scale-x)var(--tw-scale-y)}.scale-100{--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.rotate-0{rotate:none}.rotate-90{rotate:90deg}.transform{transform:var(--tw-rotate-x,)var(--tw-rotate-y,)var(--tw-rotate-z,)var(--tw-skew-x,)var(--tw-skew-y,)}.animate-bounce{animation:var(--animate-bounce)}.animate-in{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.animate-pulse{animation:var(--animate-pulse)}.animate-spin{animation:var(--animate-spin)}.cursor-col-resize{cursor:col-resize}.cursor-default{cursor:default}.cursor-pointer{cursor:pointer}.touch-none{touch-action:none}.resize{resize:both}.resize-none{resize:none}.scroll-my-1{scroll-margin-block:calc(var(--spacing)*1)}.list-inside{list-style-position:inside}.list-decimal{list-style-type:decimal}.list-disc{list-style-type:disc}.list-none{list-style-type:none}.auto-rows-min{grid-auto-rows:min-content}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-\[auto_auto_1fr_auto\]{grid-template-columns:auto auto 1fr auto}.grid-rows-\[auto_auto\]{grid-template-rows:auto auto}.flex-col{flex-direction:column}.flex-row-reverse{flex-direction:row-reverse}.flex-wrap{flex-wrap:wrap}.items-center{align-items:center}.items-end{align-items:flex-end}.items-start{align-items:flex-start}.items-stretch{align-items:stretch}.justify-between{justify-content:space-between}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}.gap-0{gap:calc(var(--spacing)*0)}.gap-1{gap:calc(var(--spacing)*1)}.gap-1\.5{gap:calc(var(--spacing)*1.5)}.gap-2{gap:calc(var(--spacing)*2)}.gap-3{gap:calc(var(--spacing)*3)}.gap-4{gap:calc(var(--spacing)*4)}.gap-6{gap:calc(var(--spacing)*6)}:where(.space-y-0\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-1\.5>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*1.5)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*1.5)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-2>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*2)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-3>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*3)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*3)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-4>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*4)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*4)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-y-6>:not(:last-child)){--tw-space-y-reverse:0;margin-block-start:calc(calc(var(--spacing)*6)*var(--tw-space-y-reverse));margin-block-end:calc(calc(var(--spacing)*6)*calc(1 - var(--tw-space-y-reverse)))}:where(.space-x-1>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*1)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*1)*calc(1 - var(--tw-space-x-reverse)))}:where(.space-x-2>:not(:last-child)){--tw-space-x-reverse:0;margin-inline-start:calc(calc(var(--spacing)*2)*var(--tw-space-x-reverse));margin-inline-end:calc(calc(var(--spacing)*2)*calc(1 - var(--tw-space-x-reverse)))}.self-start{align-self:flex-start}.justify-self-end{justify-self:flex-end}.truncate{text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-x-auto{overflow-x:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-auto{overflow-y:auto}.\!rounded-full{border-radius:3.40282e38px!important}.rounded{border-radius:.25rem}.rounded-\[4px\]{border-radius:4px}.rounded-\[inherit\]{border-radius:inherit}.rounded-full{border-radius:3.40282e38px}.rounded-lg{border-radius:var(--radius)}.rounded-md{border-radius:calc(var(--radius) - 2px)}.rounded-none{border-radius:0}.rounded-sm{border-radius:calc(var(--radius) - 4px)}.rounded-l-none{border-top-left-radius:0;border-bottom-left-radius:0}.rounded-r-none{border-top-right-radius:0;border-bottom-right-radius:0}.\!border{border-style:var(--tw-border-style)!important;border-width:1px!important}.border{border-style:var(--tw-border-style);border-width:1px}.border-2{border-style:var(--tw-border-style);border-width:2px}.border-t{border-top-style:var(--tw-border-style);border-top-width:1px}.border-r{border-right-style:var(--tw-border-style);border-right-width:1px}.border-b{border-bottom-style:var(--tw-border-style);border-bottom-width:1px}.border-l{border-left-style:var(--tw-border-style);border-left-width:1px}.border-l-0{border-left-style:var(--tw-border-style);border-left-width:0}.border-l-2{border-left-style:var(--tw-border-style);border-left-width:2px}.border-l-4{border-left-style:var(--tw-border-style);border-left-width:4px}.border-dashed{--tw-border-style:dashed;border-style:dashed}.\!border-gray-600{border-color:var(--color-gray-600)!important}.border-\[\#643FB2\]{border-color:#643fb2}.border-\[\#643FB2\]\/20{border-color:#643fb233}.border-\[\#643FB2\]\/30{border-color:#643fb24d}.border-amber-200{border-color:var(--color-amber-200)}.border-blue-200{border-color:var(--color-blue-200)}.border-blue-300{border-color:var(--color-blue-300)}.border-blue-400{border-color:var(--color-blue-400)}.border-blue-500{border-color:var(--color-blue-500)}.border-border,.border-border\/50{border-color:var(--border)}@supports (color:color-mix(in lab,red,red)){.border-border\/50{border-color:color-mix(in oklab,var(--border)50%,transparent)}}.border-current\/30{border-color:currentColor}@supports (color:color-mix(in lab,red,red)){.border-current\/30{border-color:color-mix(in oklab,currentcolor 30%,transparent)}}.border-destructive\/30{border-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.border-destructive\/30{border-color:color-mix(in oklab,var(--destructive)30%,transparent)}}.border-foreground\/5{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/5{border-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.border-foreground\/10{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/10{border-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.border-foreground\/20{border-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.border-foreground\/20{border-color:color-mix(in oklab,var(--foreground)20%,transparent)}}.border-gray-200{border-color:var(--color-gray-200)}.border-gray-300{border-color:var(--color-gray-300)}.border-gray-400{border-color:var(--color-gray-400)}.border-gray-500\/20{border-color:#6a728233}@supports (color:color-mix(in lab,red,red)){.border-gray-500\/20{border-color:color-mix(in oklab,var(--color-gray-500)20%,transparent)}}.border-green-200{border-color:var(--color-green-200)}.border-green-500{border-color:var(--color-green-500)}.border-green-500\/20{border-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.border-green-500\/20{border-color:color-mix(in oklab,var(--color-green-500)20%,transparent)}}.border-green-500\/40{border-color:#00c75866}@supports (color:color-mix(in lab,red,red)){.border-green-500\/40{border-color:color-mix(in oklab,var(--color-green-500)40%,transparent)}}.border-input{border-color:var(--input)}.border-muted{border-color:var(--muted)}.border-orange-200{border-color:var(--color-orange-200)}.border-orange-500{border-color:var(--color-orange-500)}.border-orange-500\/20{border-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.border-orange-500\/20{border-color:color-mix(in oklab,var(--color-orange-500)20%,transparent)}}.border-primary,.border-primary\/20{border-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.border-primary\/20{border-color:color-mix(in oklab,var(--primary)20%,transparent)}}.border-red-200{border-color:var(--color-red-200)}.border-red-500{border-color:var(--color-red-500)}.border-red-500\/20{border-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.border-red-500\/20{border-color:color-mix(in oklab,var(--color-red-500)20%,transparent)}}.border-transparent{border-color:#0000}.border-yellow-200{border-color:var(--color-yellow-200)}.border-t-transparent{border-top-color:#0000}.border-l-transparent{border-left-color:#0000}.bg-\[\#643FB2\]{background-color:#643fb2}.bg-\[\#643FB2\]\/10{background-color:#643fb21a}.bg-accent\/10{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.bg-accent\/10{background-color:color-mix(in oklab,var(--accent)10%,transparent)}}.bg-amber-50{background-color:var(--color-amber-50)}.bg-background{background-color:var(--background)}.bg-black{background-color:var(--color-black)}.bg-black\/50{background-color:#00000080}@supports (color:color-mix(in lab,red,red)){.bg-black\/50{background-color:color-mix(in oklab,var(--color-black)50%,transparent)}}.bg-black\/60{background-color:#0009}@supports (color:color-mix(in lab,red,red)){.bg-black\/60{background-color:color-mix(in oklab,var(--color-black)60%,transparent)}}.bg-blue-50{background-color:var(--color-blue-50)}.bg-blue-50\/80{background-color:#eff6ffcc}@supports (color:color-mix(in lab,red,red)){.bg-blue-50\/80{background-color:color-mix(in oklab,var(--color-blue-50)80%,transparent)}}.bg-blue-100{background-color:var(--color-blue-100)}.bg-blue-500{background-color:var(--color-blue-500)}.bg-blue-500\/5{background-color:#3080ff0d}@supports (color:color-mix(in lab,red,red)){.bg-blue-500\/5{background-color:color-mix(in oklab,var(--color-blue-500)5%,transparent)}}.bg-blue-600{background-color:var(--color-blue-600)}.bg-border{background-color:var(--border)}.bg-card{background-color:var(--card)}.bg-current{background-color:currentColor}.bg-destructive,.bg-destructive\/10{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.bg-destructive\/10{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.bg-foreground\/5{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/5{background-color:color-mix(in oklab,var(--foreground)5%,transparent)}}.bg-foreground\/10{background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.bg-foreground\/10{background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.bg-gray-50{background-color:var(--color-gray-50)}.bg-gray-100{background-color:var(--color-gray-100)}.bg-gray-200{background-color:var(--color-gray-200)}.bg-gray-400{background-color:var(--color-gray-400)}.bg-gray-500\/10{background-color:#6a72821a}@supports (color:color-mix(in lab,red,red)){.bg-gray-500\/10{background-color:color-mix(in oklab,var(--color-gray-500)10%,transparent)}}.bg-gray-900\/90{background-color:#101828e6}@supports (color:color-mix(in lab,red,red)){.bg-gray-900\/90{background-color:color-mix(in oklab,var(--color-gray-900)90%,transparent)}}.bg-green-50{background-color:var(--color-green-50)}.bg-green-100{background-color:var(--color-green-100)}.bg-green-500{background-color:var(--color-green-500)}.bg-green-500\/5{background-color:#00c7580d}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/5{background-color:color-mix(in oklab,var(--color-green-500)5%,transparent)}}.bg-green-500\/10{background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.bg-green-500\/10{background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.bg-muted,.bg-muted\/30{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/30{background-color:color-mix(in oklab,var(--muted)30%,transparent)}}.bg-muted\/50{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.bg-muted\/50{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.bg-orange-50{background-color:var(--color-orange-50)}.bg-orange-100{background-color:var(--color-orange-100)}.bg-orange-500{background-color:var(--color-orange-500)}.bg-orange-500\/10{background-color:#fe6e001a}@supports (color:color-mix(in lab,red,red)){.bg-orange-500\/10{background-color:color-mix(in oklab,var(--color-orange-500)10%,transparent)}}.bg-popover{background-color:var(--popover)}.bg-primary,.bg-primary\/10{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/10{background-color:color-mix(in oklab,var(--primary)10%,transparent)}}.bg-primary\/30{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/30{background-color:color-mix(in oklab,var(--primary)30%,transparent)}}.bg-primary\/40{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.bg-primary\/40{background-color:color-mix(in oklab,var(--primary)40%,transparent)}}.bg-purple-50{background-color:var(--color-purple-50)}.bg-purple-100{background-color:var(--color-purple-100)}.bg-red-50{background-color:var(--color-red-50)}.bg-red-100{background-color:var(--color-red-100)}.bg-red-500{background-color:var(--color-red-500)}.bg-red-500\/10{background-color:#fb2c361a}@supports (color:color-mix(in lab,red,red)){.bg-red-500\/10{background-color:color-mix(in oklab,var(--color-red-500)10%,transparent)}}.bg-secondary{background-color:var(--secondary)}.bg-transparent{background-color:#0000}.bg-white{background-color:var(--color-white)}.bg-white\/90{background-color:#ffffffe6}@supports (color:color-mix(in lab,red,red)){.bg-white\/90{background-color:color-mix(in oklab,var(--color-white)90%,transparent)}}.bg-yellow-100{background-color:var(--color-yellow-100)}.fill-current{fill:currentColor}.object-cover{object-fit:cover}.p-0{padding:calc(var(--spacing)*0)}.p-1{padding:calc(var(--spacing)*1)}.p-1\.5{padding:calc(var(--spacing)*1.5)}.p-2{padding:calc(var(--spacing)*2)}.p-3{padding:calc(var(--spacing)*3)}.p-4{padding:calc(var(--spacing)*4)}.p-6{padding:calc(var(--spacing)*6)}.p-8{padding:calc(var(--spacing)*8)}.p-\[1px\]{padding:1px}.px-1{padding-inline:calc(var(--spacing)*1)}.px-1\.5{padding-inline:calc(var(--spacing)*1.5)}.px-2{padding-inline:calc(var(--spacing)*2)}.px-2\.5{padding-inline:calc(var(--spacing)*2.5)}.px-3{padding-inline:calc(var(--spacing)*3)}.px-4{padding-inline:calc(var(--spacing)*4)}.px-6{padding-inline:calc(var(--spacing)*6)}.px-8{padding-inline:calc(var(--spacing)*8)}.py-0{padding-block:calc(var(--spacing)*0)}.py-0\.5{padding-block:calc(var(--spacing)*.5)}.py-1{padding-block:calc(var(--spacing)*1)}.py-1\.5{padding-block:calc(var(--spacing)*1.5)}.py-2{padding-block:calc(var(--spacing)*2)}.py-2\.5{padding-block:calc(var(--spacing)*2.5)}.py-3{padding-block:calc(var(--spacing)*3)}.py-4{padding-block:calc(var(--spacing)*4)}.py-6{padding-block:calc(var(--spacing)*6)}.py-8{padding-block:calc(var(--spacing)*8)}.pt-0{padding-top:calc(var(--spacing)*0)}.pt-1{padding-top:calc(var(--spacing)*1)}.pt-2{padding-top:calc(var(--spacing)*2)}.pt-3{padding-top:calc(var(--spacing)*3)}.pt-4{padding-top:calc(var(--spacing)*4)}.pt-6{padding-top:calc(var(--spacing)*6)}.pt-8{padding-top:calc(var(--spacing)*8)}.pr-2{padding-right:calc(var(--spacing)*2)}.pr-4{padding-right:calc(var(--spacing)*4)}.pr-8{padding-right:calc(var(--spacing)*8)}.pb-2{padding-bottom:calc(var(--spacing)*2)}.pb-3{padding-bottom:calc(var(--spacing)*3)}.pb-4{padding-bottom:calc(var(--spacing)*4)}.pb-6{padding-bottom:calc(var(--spacing)*6)}.pl-2{padding-left:calc(var(--spacing)*2)}.pl-3{padding-left:calc(var(--spacing)*3)}.pl-4{padding-left:calc(var(--spacing)*4)}.pl-8{padding-left:calc(var(--spacing)*8)}.text-center{text-align:center}.text-left{text-align:left}.font-mono{font-family:var(--font-mono)}.text-2xl{font-size:var(--text-2xl);line-height:var(--tw-leading,var(--text-2xl--line-height))}.text-base{font-size:var(--text-base);line-height:var(--tw-leading,var(--text-base--line-height))}.text-lg{font-size:var(--text-lg);line-height:var(--tw-leading,var(--text-lg--line-height))}.text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.text-xl{font-size:var(--text-xl);line-height:var(--tw-leading,var(--text-xl--line-height))}.text-xs{font-size:var(--text-xs);line-height:var(--tw-leading,var(--text-xs--line-height))}.text-\[10px\]{font-size:10px}.leading-none{--tw-leading:1;line-height:1}.leading-relaxed{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.leading-tight{--tw-leading:var(--leading-tight);line-height:var(--leading-tight)}.font-bold{--tw-font-weight:var(--font-weight-bold);font-weight:var(--font-weight-bold)}.font-medium{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.font-semibold{--tw-font-weight:var(--font-weight-semibold);font-weight:var(--font-weight-semibold)}.tracking-tight{--tw-tracking:var(--tracking-tight);letter-spacing:var(--tracking-tight)}.tracking-widest{--tw-tracking:var(--tracking-widest);letter-spacing:var(--tracking-widest)}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.whitespace-nowrap{white-space:nowrap}.whitespace-pre-wrap{white-space:pre-wrap}.text-\[\#643FB2\]{color:#643fb2}.text-amber-500{color:var(--color-amber-500)}.text-amber-600{color:var(--color-amber-600)}.text-amber-700{color:var(--color-amber-700)}.text-amber-800{color:var(--color-amber-800)}.text-amber-900{color:var(--color-amber-900)}.text-blue-500{color:var(--color-blue-500)}.text-blue-600{color:var(--color-blue-600)}.text-blue-700{color:var(--color-blue-700)}.text-blue-800{color:var(--color-blue-800)}.text-blue-900{color:var(--color-blue-900)}.text-card-foreground{color:var(--card-foreground)}.text-current{color:currentColor}.text-destructive,.text-destructive\/70{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/70{color:color-mix(in oklab,var(--destructive)70%,transparent)}}.text-destructive\/90{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.text-destructive\/90{color:color-mix(in oklab,var(--destructive)90%,transparent)}}.text-foreground{color:var(--foreground)}.text-gray-300{color:var(--color-gray-300)}.text-gray-400{color:var(--color-gray-400)}.text-gray-500{color:var(--color-gray-500)}.text-gray-600{color:var(--color-gray-600)}.text-gray-700{color:var(--color-gray-700)}.text-gray-900{color:var(--color-gray-900)}.text-green-500{color:var(--color-green-500)}.text-green-600{color:var(--color-green-600)}.text-green-700{color:var(--color-green-700)}.text-green-800{color:var(--color-green-800)}.text-green-900{color:var(--color-green-900)}.text-muted-foreground,.text-muted-foreground\/80{color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.text-muted-foreground\/80{color:color-mix(in oklab,var(--muted-foreground)80%,transparent)}}.text-orange-500{color:var(--color-orange-500)}.text-orange-600{color:var(--color-orange-600)}.text-orange-800{color:var(--color-orange-800)}.text-popover-foreground{color:var(--popover-foreground)}.text-primary{color:var(--primary)}.text-primary-foreground{color:var(--primary-foreground)}.text-purple-500{color:var(--color-purple-500)}.text-purple-600{color:var(--color-purple-600)}.text-red-500{color:var(--color-red-500)}.text-red-600{color:var(--color-red-600)}.text-red-700{color:var(--color-red-700)}.text-red-800{color:var(--color-red-800)}.text-secondary-foreground{color:var(--secondary-foreground)}.text-white{color:var(--color-white)}.text-yellow-600{color:var(--color-yellow-600)}.text-yellow-700{color:var(--color-yellow-700)}.lowercase{text-transform:lowercase}.uppercase{text-transform:uppercase}.italic{font-style:italic}.underline-offset-4{text-underline-offset:4px}.opacity-0{opacity:0}.opacity-50{opacity:.5}.opacity-60{opacity:.6}.opacity-70{opacity:.7}.opacity-80{opacity:.8}.opacity-100{opacity:1}.shadow{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px var(--tw-shadow-color,#0000001a),0 4px 6px -4px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-md{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-xs{--tw-shadow:0 1px 2px 0 var(--tw-shadow-color,#0000000d);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-0{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(0px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.ring-2{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.shadow-\[\#643FB2\]\/20{--tw-shadow-color:#643fb233}@supports (color:color-mix(in lab,red,red)){.shadow-\[\#643FB2\]\/20{--tw-shadow-color:color-mix(in oklab,oklab(47.4316% .069152 -.159147/.2) var(--tw-shadow-alpha),transparent)}}.shadow-green-500\/20{--tw-shadow-color:#00c75833}@supports (color:color-mix(in lab,red,red)){.shadow-green-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-green-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-orange-500\/20{--tw-shadow-color:#fe6e0033}@supports (color:color-mix(in lab,red,red)){.shadow-orange-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-orange-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-primary\/25{--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.shadow-primary\/25{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)25%,transparent)var(--tw-shadow-alpha),transparent)}}.shadow-red-500\/20{--tw-shadow-color:#fb2c3633}@supports (color:color-mix(in lab,red,red)){.shadow-red-500\/20{--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--color-red-500)20%,transparent)var(--tw-shadow-alpha),transparent)}}.ring-blue-500{--tw-ring-color:var(--color-blue-500)}.ring-offset-2{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.ring-offset-background{--tw-ring-offset-color:var(--background)}.outline-hidden{--tw-outline-style:none;outline-style:none}@media (forced-colors:active){.outline-hidden{outline-offset:2px;outline:2px solid #0000}}.outline{outline-style:var(--tw-outline-style);outline-width:1px}.drop-shadow-lg{--tw-drop-shadow-size:drop-shadow(0 4px 4px var(--tw-drop-shadow-color,#00000026));--tw-drop-shadow:drop-shadow(var(--drop-shadow-lg));filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.filter{filter:var(--tw-blur,)var(--tw-brightness,)var(--tw-contrast,)var(--tw-grayscale,)var(--tw-hue-rotate,)var(--tw-invert,)var(--tw-saturate,)var(--tw-sepia,)var(--tw-drop-shadow,)}.backdrop-blur-sm{--tw-backdrop-blur:blur(var(--blur-sm));-webkit-backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,);backdrop-filter:var(--tw-backdrop-blur,)var(--tw-backdrop-brightness,)var(--tw-backdrop-contrast,)var(--tw-backdrop-grayscale,)var(--tw-backdrop-hue-rotate,)var(--tw-backdrop-invert,)var(--tw-backdrop-opacity,)var(--tw-backdrop-saturate,)var(--tw-backdrop-sepia,)}.transition{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to,opacity,box-shadow,transform,translate,scale,rotate,filter,-webkit-backdrop-filter,backdrop-filter,display,visibility,content-visibility,overlay,pointer-events;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-\[color\,box-shadow\]{transition-property:color,box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-all{transition-property:all;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-colors{transition-property:color,background-color,border-color,outline-color,text-decoration-color,fill,stroke,--tw-gradient-from,--tw-gradient-via,--tw-gradient-to;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-opacity{transition-property:opacity;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-shadow{transition-property:box-shadow;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-transform{transition-property:transform,translate,scale,rotate;transition-timing-function:var(--tw-ease,var(--default-transition-timing-function));transition-duration:var(--tw-duration,var(--default-transition-duration))}.transition-none{transition-property:none}.duration-200{--tw-duration:.2s;transition-duration:.2s}.duration-300{--tw-duration:.3s;transition-duration:.3s}.ease-in-out{--tw-ease:var(--ease-in-out);transition-timing-function:var(--ease-in-out)}.ease-out{--tw-ease:var(--ease-out);transition-timing-function:var(--ease-out)}.outline-none{--tw-outline-style:none;outline-style:none}.select-none{-webkit-user-select:none;user-select:none}.\[animation-delay\:-0\.3s\]{animation-delay:-.3s}.\[animation-delay\:-0\.15s\]{animation-delay:-.15s}.fade-in{--tw-enter-opacity:0}.paused{animation-play-state:paused}.running{animation-play-state:running}.slide-in-from-bottom-2{--tw-enter-translate-y:calc(2*var(--spacing))}.group-open\:rotate-90:is(:where(.group):is([open],:popover-open,:open) *){rotate:90deg}.group-open\:rotate-180:is(:where(.group):is([open],:popover-open,:open) *){rotate:180deg}@media (hover:hover){.group-hover\:bg-primary:is(:where(.group):hover *){background-color:var(--primary)}.group-hover\:opacity-100:is(:where(.group):hover *){opacity:1}.group-hover\:shadow-md:is(:where(.group):hover *){--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.group-hover\:shadow-primary\/20:is(:where(.group):hover *){--tw-shadow-color:color-mix(in oklab,color-mix(in oklab,var(--primary)20%,transparent)var(--tw-shadow-alpha),transparent)}}}.group-data-\[disabled\=true\]\:pointer-events-none:is(:where(.group)[data-disabled=true] *){pointer-events:none}.group-data-\[disabled\=true\]\:opacity-50:is(:where(.group)[data-disabled=true] *){opacity:.5}.peer-disabled\:cursor-not-allowed:is(:where(.peer):disabled~*){cursor:not-allowed}.peer-disabled\:opacity-50:is(:where(.peer):disabled~*){opacity:.5}.selection\:bg-primary ::selection{background-color:var(--primary)}.selection\:bg-primary::selection{background-color:var(--primary)}.selection\:text-primary-foreground ::selection{color:var(--primary-foreground)}.selection\:text-primary-foreground::selection{color:var(--primary-foreground)}.file\:inline-flex::file-selector-button{display:inline-flex}.file\:h-7::file-selector-button{height:calc(var(--spacing)*7)}.file\:border-0::file-selector-button{border-style:var(--tw-border-style);border-width:0}.file\:bg-transparent::file-selector-button{background-color:#0000}.file\:text-sm::file-selector-button{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}.file\:font-medium::file-selector-button{--tw-font-weight:var(--font-weight-medium);font-weight:var(--font-weight-medium)}.file\:text-foreground::file-selector-button{color:var(--foreground)}.placeholder\:text-muted-foreground::placeholder{color:var(--muted-foreground)}.first\:mt-0:first-child{margin-top:calc(var(--spacing)*0)}.last\:border-r-0:last-child{border-right-style:var(--tw-border-style);border-right-width:0}.last\:border-b-0:last-child{border-bottom-style:var(--tw-border-style);border-bottom-width:0}@media (hover:hover){.hover\:border-gray-300:hover{border-color:var(--color-gray-300)}.hover\:border-muted-foreground\/30:hover{border-color:var(--muted-foreground)}@supports (color:color-mix(in lab,red,red)){.hover\:border-muted-foreground\/30:hover{border-color:color-mix(in oklab,var(--muted-foreground)30%,transparent)}}.hover\:bg-accent:hover,.hover\:bg-accent\/50:hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-accent\/50:hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.hover\:bg-amber-100:hover{background-color:var(--color-amber-100)}.hover\:bg-blue-700:hover{background-color:var(--color-blue-700)}.hover\:bg-destructive\/80:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/80:hover{background-color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:bg-destructive\/90:hover{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-destructive\/90:hover{background-color:color-mix(in oklab,var(--destructive)90%,transparent)}}.hover\:bg-muted:hover,.hover\:bg-muted\/50:hover{background-color:var(--muted)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-muted\/50:hover{background-color:color-mix(in oklab,var(--muted)50%,transparent)}}.hover\:bg-primary\/20:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/20:hover{background-color:color-mix(in oklab,var(--primary)20%,transparent)}}.hover\:bg-primary\/80:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/80:hover{background-color:color-mix(in oklab,var(--primary)80%,transparent)}}.hover\:bg-primary\/90:hover{background-color:var(--primary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-primary\/90:hover{background-color:color-mix(in oklab,var(--primary)90%,transparent)}}.hover\:bg-red-50:hover{background-color:var(--color-red-50)}.hover\:bg-secondary\/80:hover{background-color:var(--secondary)}@supports (color:color-mix(in lab,red,red)){.hover\:bg-secondary\/80:hover{background-color:color-mix(in oklab,var(--secondary)80%,transparent)}}.hover\:bg-white:hover{background-color:var(--color-white)}.hover\:text-accent-foreground:hover{color:var(--accent-foreground)}.hover\:text-destructive\/80:hover{color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.hover\:text-destructive\/80:hover{color:color-mix(in oklab,var(--destructive)80%,transparent)}}.hover\:text-foreground:hover{color:var(--foreground)}.hover\:text-red-600:hover{color:var(--color-red-600)}.hover\:underline:hover{text-decoration-line:underline}.hover\:opacity-70:hover{opacity:.7}.hover\:opacity-100:hover{opacity:1}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px var(--tw-shadow-color,#0000001a),0 2px 4px -2px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}}.focus\:bg-accent:focus{background-color:var(--accent)}.focus\:text-accent-foreground:focus{color:var(--accent-foreground)}.focus\:ring-2:focus{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus\:ring-ring:focus{--tw-ring-color:var(--ring)}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus\:outline-none:focus{--tw-outline-style:none;outline-style:none}.focus-visible\:border-ring:focus-visible{border-color:var(--ring)}.focus-visible\:ring-1:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(1px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-2:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(2px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-\[3px\]:focus-visible{--tw-ring-shadow:var(--tw-ring-inset,)0 0 0 calc(3px + var(--tw-ring-offset-width))var(--tw-ring-color,currentcolor);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-destructive\/20:focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.focus-visible\:ring-ring:focus-visible,.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:var(--ring)}@supports (color:color-mix(in lab,red,red)){.focus-visible\:ring-ring\/50:focus-visible{--tw-ring-color:color-mix(in oklab,var(--ring)50%,transparent)}}.focus-visible\:ring-offset-2:focus-visible{--tw-ring-offset-width:2px;--tw-ring-offset-shadow:var(--tw-ring-inset,)0 0 0 var(--tw-ring-offset-width)var(--tw-ring-offset-color)}.focus-visible\:ring-offset-background:focus-visible{--tw-ring-offset-color:var(--background)}.focus-visible\:outline-none:focus-visible{--tw-outline-style:none;outline-style:none}.disabled\:pointer-events-none:disabled{pointer-events:none}.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}.disabled\:opacity-50:disabled{opacity:.5}.has-data-\[slot\=card-action\]\:grid-cols-\[1fr_auto\]:has([data-slot=card-action]){grid-template-columns:1fr auto}.has-\[\>svg\]\:px-2\.5:has(>svg){padding-inline:calc(var(--spacing)*2.5)}.has-\[\>svg\]\:px-3:has(>svg){padding-inline:calc(var(--spacing)*3)}.has-\[\>svg\]\:px-4:has(>svg){padding-inline:calc(var(--spacing)*4)}.aria-invalid\:border-destructive[aria-invalid=true]{border-color:var(--destructive)}.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.aria-invalid\:ring-destructive\/20[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.data-\[disabled\]\:pointer-events-none[data-disabled]{pointer-events:none}.data-\[disabled\]\:opacity-50[data-disabled]{opacity:.5}.data-\[inset\]\:pl-8[data-inset]{padding-left:calc(var(--spacing)*8)}.data-\[placeholder\]\:text-muted-foreground[data-placeholder]{color:var(--muted-foreground)}.data-\[side\=bottom\]\:translate-y-1[data-side=bottom]{--tw-translate-y:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=bottom\]\:slide-in-from-top-2[data-side=bottom]{--tw-enter-translate-y:calc(2*var(--spacing)*-1)}.data-\[side\=left\]\:-translate-x-1[data-side=left]{--tw-translate-x:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=left\]\:slide-in-from-right-2[data-side=left]{--tw-enter-translate-x:calc(2*var(--spacing))}.data-\[side\=right\]\:translate-x-1[data-side=right]{--tw-translate-x:calc(var(--spacing)*1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=right\]\:slide-in-from-left-2[data-side=right]{--tw-enter-translate-x:calc(2*var(--spacing)*-1)}.data-\[side\=top\]\:-translate-y-1[data-side=top]{--tw-translate-y:calc(var(--spacing)*-1);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[side\=top\]\:slide-in-from-bottom-2[data-side=top]{--tw-enter-translate-y:calc(2*var(--spacing))}.data-\[size\=default\]\:h-9[data-size=default]{height:calc(var(--spacing)*9)}.data-\[size\=sm\]\:h-8[data-size=sm]{height:calc(var(--spacing)*8)}:is(.\*\:data-\[slot\=select-value\]\:line-clamp-1>*)[data-slot=select-value]{-webkit-line-clamp:1;-webkit-box-orient:vertical;display:-webkit-box;overflow:hidden}:is(.\*\:data-\[slot\=select-value\]\:flex>*)[data-slot=select-value]{display:flex}:is(.\*\:data-\[slot\=select-value\]\:items-center>*)[data-slot=select-value]{align-items:center}:is(.\*\:data-\[slot\=select-value\]\:gap-2>*)[data-slot=select-value]{gap:calc(var(--spacing)*2)}.data-\[state\=active\]\:bg-background[data-state=active]{background-color:var(--background)}.data-\[state\=active\]\:text-foreground[data-state=active]{color:var(--foreground)}.data-\[state\=active\]\:shadow[data-state=active]{--tw-shadow:0 1px 3px 0 var(--tw-shadow-color,#0000001a),0 1px 2px -1px var(--tw-shadow-color,#0000001a);box-shadow:var(--tw-inset-shadow),var(--tw-inset-ring-shadow),var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.data-\[state\=checked\]\:translate-x-4[data-state=checked]{--tw-translate-x:calc(var(--spacing)*4);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=checked\]\:border-primary[data-state=checked]{border-color:var(--primary)}.data-\[state\=checked\]\:bg-primary[data-state=checked]{background-color:var(--primary)}.data-\[state\=checked\]\:text-primary-foreground[data-state=checked]{color:var(--primary-foreground)}.data-\[state\=closed\]\:animate-out[data-state=closed]{animation:exit var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=closed\]\:fade-out-0[data-state=closed]{--tw-exit-opacity:0}.data-\[state\=closed\]\:zoom-out-95[data-state=closed]{--tw-exit-scale:.95}.data-\[state\=open\]\:animate-in[data-state=open]{animation:enter var(--tw-animation-duration,var(--tw-duration,.15s))var(--tw-ease,ease)var(--tw-animation-delay,0s)var(--tw-animation-iteration-count,1)var(--tw-animation-direction,normal)var(--tw-animation-fill-mode,none)}.data-\[state\=open\]\:bg-accent[data-state=open]{background-color:var(--accent)}.data-\[state\=open\]\:text-accent-foreground[data-state=open]{color:var(--accent-foreground)}.data-\[state\=open\]\:fade-in-0[data-state=open]{--tw-enter-opacity:0}.data-\[state\=open\]\:zoom-in-95[data-state=open]{--tw-enter-scale:.95}.data-\[state\=unchecked\]\:translate-x-0[data-state=unchecked]{--tw-translate-x:calc(var(--spacing)*0);translate:var(--tw-translate-x)var(--tw-translate-y)}.data-\[state\=unchecked\]\:bg-input[data-state=unchecked]{background-color:var(--input)}.data-\[variant\=destructive\]\:text-destructive[data-variant=destructive]{color:var(--destructive)}.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.data-\[variant\=destructive\]\:focus\:bg-destructive\/10[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)10%,transparent)}}.data-\[variant\=destructive\]\:focus\:text-destructive[data-variant=destructive]:focus{color:var(--destructive)}@media (min-width:40rem){.sm\:col-span-2{grid-column:span 2/span 2}.sm\:w-64{width:calc(var(--spacing)*64)}.sm\:max-w-lg{max-width:var(--container-lg)}.sm\:flex-none{flex:none}.sm\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.sm\:flex-row{flex-direction:row}.sm\:items-center{align-items:center}}@media (min-width:48rem){.md\:col-span-2{grid-column:span 2/span 2}.md\:col-start-2{grid-column-start:2}.md\:inline{display:inline}.md\:max-w-2xl{max-width:var(--container-2xl)}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:gap-6{gap:calc(var(--spacing)*6)}.md\:gap-8{gap:calc(var(--spacing)*8)}.md\:text-sm{font-size:var(--text-sm);line-height:var(--tw-leading,var(--text-sm--line-height))}}@media (min-width:64rem){.lg\:col-span-3{grid-column:span 3/span 3}.lg\:max-w-4xl{max-width:var(--container-4xl)}.lg\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.lg\:flex-row{flex-direction:row}.lg\:items-center{align-items:center}.lg\:justify-between{justify-content:space-between}}@media (min-width:80rem){.xl\:col-span-2{grid-column:span 2/span 2}.xl\:col-span-4{grid-column:span 4/span 4}.xl\:max-w-5xl{max-width:var(--container-5xl)}.xl\:grid-cols-4{grid-template-columns:repeat(4,minmax(0,1fr))}}.dark\:scale-0:is(.dark *){--tw-scale-x:0%;--tw-scale-y:0%;--tw-scale-z:0%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:scale-100:is(.dark *){--tw-scale-x:100%;--tw-scale-y:100%;--tw-scale-z:100%;scale:var(--tw-scale-x)var(--tw-scale-y)}.dark\:-rotate-90:is(.dark *){rotate:-90deg}.dark\:rotate-0:is(.dark *){rotate:none}.dark\:\!border-gray-500:is(.dark *){border-color:var(--color-gray-500)!important}.dark\:\!border-gray-600:is(.dark *){border-color:var(--color-gray-600)!important}.dark\:border-\[\#8B5CF6\]:is(.dark *){border-color:#8b5cf6}.dark\:border-\[\#8B5CF6\]\/20:is(.dark *){border-color:#8b5cf633}.dark\:border-\[\#8B5CF6\]\/30:is(.dark *){border-color:#8b5cf64d}.dark\:border-amber-800:is(.dark *){border-color:var(--color-amber-800)}.dark\:border-amber-900:is(.dark *){border-color:var(--color-amber-900)}.dark\:border-blue-400:is(.dark *){border-color:var(--color-blue-400)}.dark\:border-blue-500:is(.dark *){border-color:var(--color-blue-500)}.dark\:border-blue-700:is(.dark *){border-color:var(--color-blue-700)}.dark\:border-blue-800:is(.dark *){border-color:var(--color-blue-800)}.dark\:border-gray-500:is(.dark *){border-color:var(--color-gray-500)}.dark\:border-gray-600:is(.dark *){border-color:var(--color-gray-600)}.dark\:border-gray-700:is(.dark *){border-color:var(--color-gray-700)}.dark\:border-green-400:is(.dark *){border-color:var(--color-green-400)}.dark\:border-green-800:is(.dark *){border-color:var(--color-green-800)}.dark\:border-input:is(.dark *){border-color:var(--input)}.dark\:border-orange-400:is(.dark *){border-color:var(--color-orange-400)}.dark\:border-orange-800:is(.dark *){border-color:var(--color-orange-800)}.dark\:border-red-400:is(.dark *){border-color:var(--color-red-400)}.dark\:border-red-800:is(.dark *){border-color:var(--color-red-800)}.dark\:\!bg-gray-800\/90:is(.dark *){background-color:#1e2939e6!important}@supports (color:color-mix(in lab,red,red)){.dark\:\!bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)!important}}.dark\:bg-\[\#8B5CF6\]:is(.dark *){background-color:#8b5cf6}.dark\:bg-\[\#8B5CF6\]\/10:is(.dark *){background-color:#8b5cf61a}.dark\:bg-amber-950\/20:is(.dark *){background-color:#46190133}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)20%,transparent)}}.dark\:bg-amber-950\/50:is(.dark *){background-color:#46190180}@supports (color:color-mix(in lab,red,red)){.dark\:bg-amber-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-amber-950)50%,transparent)}}.dark\:bg-blue-500\/10:is(.dark *){background-color:#3080ff1a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-500)10%,transparent)}}.dark\:bg-blue-900:is(.dark *){background-color:var(--color-blue-900)}.dark\:bg-blue-900\/20:is(.dark *){background-color:#1c398e33}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-900\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-900)20%,transparent)}}.dark\:bg-blue-950\/20:is(.dark *){background-color:#16245633}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)20%,transparent)}}.dark\:bg-blue-950\/30:is(.dark *){background-color:#1624564d}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/30:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)30%,transparent)}}.dark\:bg-blue-950\/40:is(.dark *){background-color:#16245666}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/40:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)40%,transparent)}}.dark\:bg-blue-950\/50:is(.dark *){background-color:#16245680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-blue-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-blue-950)50%,transparent)}}.dark\:bg-card:is(.dark *){background-color:var(--card)}.dark\:bg-destructive\/60:is(.dark *){background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-destructive\/60:is(.dark *){background-color:color-mix(in oklab,var(--destructive)60%,transparent)}}.dark\:bg-foreground\/10:is(.dark *){background-color:var(--foreground)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-foreground\/10:is(.dark *){background-color:color-mix(in oklab,var(--foreground)10%,transparent)}}.dark\:bg-gray-500:is(.dark *){background-color:var(--color-gray-500)}.dark\:bg-gray-800:is(.dark *){background-color:var(--color-gray-800)}.dark\:bg-gray-800\/90:is(.dark *){background-color:#1e2939e6}@supports (color:color-mix(in lab,red,red)){.dark\:bg-gray-800\/90:is(.dark *){background-color:color-mix(in oklab,var(--color-gray-800)90%,transparent)}}.dark\:bg-gray-900:is(.dark *){background-color:var(--color-gray-900)}.dark\:bg-green-400:is(.dark *){background-color:var(--color-green-400)}.dark\:bg-green-500\/10:is(.dark *){background-color:#00c7581a}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-500\/10:is(.dark *){background-color:color-mix(in oklab,var(--color-green-500)10%,transparent)}}.dark\:bg-green-900:is(.dark *){background-color:var(--color-green-900)}.dark\:bg-green-950:is(.dark *){background-color:var(--color-green-950)}.dark\:bg-green-950\/20:is(.dark *){background-color:#032e1533}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)20%,transparent)}}.dark\:bg-green-950\/50:is(.dark *){background-color:#032e1580}@supports (color:color-mix(in lab,red,red)){.dark\:bg-green-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-green-950)50%,transparent)}}.dark\:bg-input\/30:is(.dark *){background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:bg-input\/30:is(.dark *){background-color:color-mix(in oklab,var(--input)30%,transparent)}}.dark\:bg-orange-400:is(.dark *){background-color:var(--color-orange-400)}.dark\:bg-orange-900:is(.dark *){background-color:var(--color-orange-900)}.dark\:bg-orange-950:is(.dark *){background-color:var(--color-orange-950)}.dark\:bg-orange-950\/50:is(.dark *){background-color:#44130680}@supports (color:color-mix(in lab,red,red)){.dark\:bg-orange-950\/50:is(.dark *){background-color:color-mix(in oklab,var(--color-orange-950)50%,transparent)}}.dark\:bg-purple-900:is(.dark *){background-color:var(--color-purple-900)}.dark\:bg-red-400:is(.dark *){background-color:var(--color-red-400)}.dark\:bg-red-900:is(.dark *){background-color:var(--color-red-900)}.dark\:bg-red-950:is(.dark *){background-color:var(--color-red-950)}.dark\:bg-red-950\/20:is(.dark *){background-color:#46080933}@supports (color:color-mix(in lab,red,red)){.dark\:bg-red-950\/20:is(.dark *){background-color:color-mix(in oklab,var(--color-red-950)20%,transparent)}}.dark\:text-\[\#8B5CF6\]:is(.dark *){color:#8b5cf6}.dark\:text-amber-100:is(.dark *){color:var(--color-amber-100)}.dark\:text-amber-200:is(.dark *){color:var(--color-amber-200)}.dark\:text-amber-300:is(.dark *){color:var(--color-amber-300)}.dark\:text-amber-400:is(.dark *){color:var(--color-amber-400)}.dark\:text-amber-500:is(.dark *){color:var(--color-amber-500)}.dark\:text-blue-100:is(.dark *){color:var(--color-blue-100)}.dark\:text-blue-200:is(.dark *){color:var(--color-blue-200)}.dark\:text-blue-300:is(.dark *){color:var(--color-blue-300)}.dark\:text-blue-400:is(.dark *){color:var(--color-blue-400)}.dark\:text-blue-500:is(.dark *){color:var(--color-blue-500)}.dark\:text-gray-100:is(.dark *){color:var(--color-gray-100)}.dark\:text-gray-300:is(.dark *){color:var(--color-gray-300)}.dark\:text-gray-400:is(.dark *){color:var(--color-gray-400)}.dark\:text-green-100:is(.dark *){color:var(--color-green-100)}.dark\:text-green-200:is(.dark *){color:var(--color-green-200)}.dark\:text-green-300:is(.dark *){color:var(--color-green-300)}.dark\:text-green-400:is(.dark *){color:var(--color-green-400)}.dark\:text-orange-200:is(.dark *){color:var(--color-orange-200)}.dark\:text-orange-400:is(.dark *){color:var(--color-orange-400)}.dark\:text-purple-400:is(.dark *){color:var(--color-purple-400)}.dark\:text-red-200:is(.dark *){color:var(--color-red-200)}.dark\:text-red-300:is(.dark *){color:var(--color-red-300)}.dark\:text-red-400:is(.dark *){color:var(--color-red-400)}.dark\:text-yellow-400:is(.dark *){color:var(--color-yellow-400)}.dark\:opacity-30:is(.dark *){opacity:.3}@media (hover:hover){.dark\:hover\:border-gray-600:is(.dark *):hover{border-color:var(--color-gray-600)}.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:var(--accent)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-accent\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--accent)50%,transparent)}}.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:#4619014d}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-amber-950\/30:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-amber-950)30%,transparent)}}.dark\:hover\:bg-gray-800:is(.dark *):hover{background-color:var(--color-gray-800)}.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:var(--input)}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-input\/50:is(.dark *):hover{background-color:color-mix(in oklab,var(--input)50%,transparent)}}.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:#82181a33}@supports (color:color-mix(in lab,red,red)){.dark\:hover\:bg-red-900\/20:is(.dark *):hover{background-color:color-mix(in oklab,var(--color-red-900)20%,transparent)}}}.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:focus-visible\:ring-destructive\/40:is(.dark *):focus-visible{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:aria-invalid\:ring-destructive\/40:is(.dark *)[aria-invalid=true]{--tw-ring-color:color-mix(in oklab,var(--destructive)40%,transparent)}}.dark\:data-\[state\=checked\]\:bg-primary:is(.dark *)[data-state=checked]{background-color:var(--primary)}.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:var(--destructive)}@supports (color:color-mix(in lab,red,red)){.dark\:data-\[variant\=destructive\]\:focus\:bg-destructive\/20:is(.dark *)[data-variant=destructive]:focus{background-color:color-mix(in oklab,var(--destructive)20%,transparent)}}.\[\&_p\]\:leading-relaxed p{--tw-leading:var(--leading-relaxed);line-height:var(--leading-relaxed)}.\[\&_svg\]\:pointer-events-none svg{pointer-events:none}.\[\&_svg\]\:shrink-0 svg{flex-shrink:0}.\[\&_svg\:not\(\[class\*\=\'size-\'\]\)\]\:size-4 svg:not([class*=size-]){width:calc(var(--spacing)*4);height:calc(var(--spacing)*4)}.\[\&_svg\:not\(\[class\*\=\'text-\'\]\)\]\:text-muted-foreground svg:not([class*=text-]){color:var(--muted-foreground)}.\[\.border-b\]\:pb-6.border-b{padding-bottom:calc(var(--spacing)*6)}.\[\.border-t\]\:pt-6.border-t{padding-top:calc(var(--spacing)*6)}:is(.\*\:\[span\]\:last\:flex>*):is(span):last-child{display:flex}:is(.\*\:\[span\]\:last\:items-center>*):is(span):last-child{align-items:center}:is(.\*\:\[span\]\:last\:gap-2>*):is(span):last-child{gap:calc(var(--spacing)*2)}:is(.data-\[variant\=destructive\]\:\*\:\[svg\]\:\!text-destructive[data-variant=destructive]>*):is(svg){color:var(--destructive)!important}.\[\&\>svg\]\:absolute>svg{position:absolute}.\[\&\>svg\]\:top-4>svg{top:calc(var(--spacing)*4)}.\[\&\>svg\]\:left-4>svg{left:calc(var(--spacing)*4)}.\[\&\>svg\]\:text-foreground>svg{color:var(--foreground)}.\[\&\>svg\+div\]\:translate-y-\[-3px\]>svg+div{--tw-translate-y:-3px;translate:var(--tw-translate-x)var(--tw-translate-y)}.\[\&\>svg\~\*\]\:pl-7>svg~*{padding-left:calc(var(--spacing)*7)}}@property --tw-animation-delay{syntax:"*";inherits:false;initial-value:0s}@property --tw-animation-direction{syntax:"*";inherits:false;initial-value:normal}@property --tw-animation-duration{syntax:"*";inherits:false}@property --tw-animation-fill-mode{syntax:"*";inherits:false;initial-value:none}@property --tw-animation-iteration-count{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-enter-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-enter-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-blur{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-opacity{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-rotate{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-scale{syntax:"*";inherits:false;initial-value:1}@property --tw-exit-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-exit-translate-y{syntax:"*";inherits:false;initial-value:0}:root{--radius:.625rem;--background:oklch(100% 0 0);--foreground:oklch(14.5% 0 0);--card:oklch(100% 0 0);--card-foreground:oklch(14.5% 0 0);--popover:oklch(100% 0 0);--popover-foreground:oklch(14.5% 0 0);--primary:oklch(48% .18 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(97% 0 0);--secondary-foreground:oklch(20.5% 0 0);--muted:oklch(97% 0 0);--muted-foreground:oklch(55.6% 0 0);--accent:oklch(97% 0 0);--accent-foreground:oklch(20.5% 0 0);--destructive:oklch(57.7% .245 27.325);--border:oklch(92.2% 0 0);--input:oklch(92.2% 0 0);--ring:oklch(70.8% 0 0);--chart-1:oklch(64.6% .222 41.116);--chart-2:oklch(60% .118 184.704);--chart-3:oklch(39.8% .07 227.392);--chart-4:oklch(82.8% .189 84.429);--chart-5:oklch(76.9% .188 70.08);--sidebar:oklch(98.5% 0 0);--sidebar-foreground:oklch(14.5% 0 0);--sidebar-primary:oklch(20.5% 0 0);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(97% 0 0);--sidebar-accent-foreground:oklch(20.5% 0 0);--sidebar-border:oklch(92.2% 0 0);--sidebar-ring:oklch(70.8% 0 0)}.dark{--background:oklch(14.5% 0 0);--foreground:oklch(98.5% 0 0);--card:oklch(20.5% 0 0);--card-foreground:oklch(98.5% 0 0);--popover:oklch(20.5% 0 0);--popover-foreground:oklch(98.5% 0 0);--primary:oklch(62% .2 290);--primary-foreground:oklch(98.5% 0 0);--secondary:oklch(26.9% 0 0);--secondary-foreground:oklch(98.5% 0 0);--muted:oklch(26.9% 0 0);--muted-foreground:oklch(70.8% 0 0);--accent:oklch(26.9% 0 0);--accent-foreground:oklch(98.5% 0 0);--destructive:oklch(70.4% .191 22.216);--border:oklch(100% 0 0/.1);--input:oklch(100% 0 0/.15);--ring:oklch(55.6% 0 0);--chart-1:oklch(48.8% .243 264.376);--chart-2:oklch(69.6% .17 162.48);--chart-3:oklch(76.9% .188 70.08);--chart-4:oklch(62.7% .265 303.9);--chart-5:oklch(64.5% .246 16.439);--sidebar:oklch(20.5% 0 0);--sidebar-foreground:oklch(98.5% 0 0);--sidebar-primary:oklch(48.8% .243 264.376);--sidebar-primary-foreground:oklch(98.5% 0 0);--sidebar-accent:oklch(26.9% 0 0);--sidebar-accent-foreground:oklch(98.5% 0 0);--sidebar-border:oklch(100% 0 0/.1);--sidebar-ring:oklch(55.6% 0 0)}.workflow-chat-view .border-green-200{border-color:var(--color-emerald-200)}.workflow-chat-view .bg-green-50{background-color:var(--color-emerald-50)}.workflow-chat-view .bg-green-100{background-color:var(--color-emerald-100)}.workflow-chat-view .text-green-600{color:var(--color-emerald-600)}.workflow-chat-view .text-green-700{color:var(--color-emerald-700)}.workflow-chat-view .text-green-800{color:var(--color-emerald-800)}@property --tw-translate-x{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-y{syntax:"*";inherits:false;initial-value:0}@property --tw-translate-z{syntax:"*";inherits:false;initial-value:0}@property --tw-scale-x{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-y{syntax:"*";inherits:false;initial-value:1}@property --tw-scale-z{syntax:"*";inherits:false;initial-value:1}@property --tw-rotate-x{syntax:"*";inherits:false}@property --tw-rotate-y{syntax:"*";inherits:false}@property --tw-rotate-z{syntax:"*";inherits:false}@property --tw-skew-x{syntax:"*";inherits:false}@property --tw-skew-y{syntax:"*";inherits:false}@property --tw-space-y-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-space-x-reverse{syntax:"*";inherits:false;initial-value:0}@property --tw-border-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-leading{syntax:"*";inherits:false}@property --tw-font-weight{syntax:"*";inherits:false}@property --tw-tracking{syntax:"*";inherits:false}@property --tw-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-shadow-color{syntax:"*";inherits:false}@property --tw-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-inset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-shadow-color{syntax:"*";inherits:false}@property --tw-inset-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-ring-color{syntax:"*";inherits:false}@property --tw-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-inset-ring-color{syntax:"*";inherits:false}@property --tw-inset-ring-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-ring-inset{syntax:"*";inherits:false}@property --tw-ring-offset-width{syntax:"";inherits:false;initial-value:0}@property --tw-ring-offset-color{syntax:"*";inherits:false;initial-value:#fff}@property --tw-ring-offset-shadow{syntax:"*";inherits:false;initial-value:0 0 #0000}@property --tw-outline-style{syntax:"*";inherits:false;initial-value:solid}@property --tw-blur{syntax:"*";inherits:false}@property --tw-brightness{syntax:"*";inherits:false}@property --tw-contrast{syntax:"*";inherits:false}@property --tw-grayscale{syntax:"*";inherits:false}@property --tw-hue-rotate{syntax:"*";inherits:false}@property --tw-invert{syntax:"*";inherits:false}@property --tw-opacity{syntax:"*";inherits:false}@property --tw-saturate{syntax:"*";inherits:false}@property --tw-sepia{syntax:"*";inherits:false}@property --tw-drop-shadow{syntax:"*";inherits:false}@property --tw-drop-shadow-color{syntax:"*";inherits:false}@property --tw-drop-shadow-alpha{syntax:"";inherits:false;initial-value:100%}@property --tw-drop-shadow-size{syntax:"*";inherits:false}@property --tw-backdrop-blur{syntax:"*";inherits:false}@property --tw-backdrop-brightness{syntax:"*";inherits:false}@property --tw-backdrop-contrast{syntax:"*";inherits:false}@property --tw-backdrop-grayscale{syntax:"*";inherits:false}@property --tw-backdrop-hue-rotate{syntax:"*";inherits:false}@property --tw-backdrop-invert{syntax:"*";inherits:false}@property --tw-backdrop-opacity{syntax:"*";inherits:false}@property --tw-backdrop-saturate{syntax:"*";inherits:false}@property --tw-backdrop-sepia{syntax:"*";inherits:false}@property --tw-duration{syntax:"*";inherits:false}@property --tw-ease{syntax:"*";inherits:false}@keyframes spin{to{transform:rotate(360deg)}}@keyframes pulse{50%{opacity:.5}}@keyframes bounce{0%,to{animation-timing-function:cubic-bezier(.8,0,1,1);transform:translateY(-25%)}50%{animation-timing-function:cubic-bezier(0,0,.2,1);transform:none}}@keyframes enter{0%{opacity:var(--tw-enter-opacity,1);transform:translate3d(var(--tw-enter-translate-x,0),var(--tw-enter-translate-y,0),0)scale3d(var(--tw-enter-scale,1),var(--tw-enter-scale,1),var(--tw-enter-scale,1))rotate(var(--tw-enter-rotate,0));filter:blur(var(--tw-enter-blur,0))}}@keyframes exit{to{opacity:var(--tw-exit-opacity,1);transform:translate3d(var(--tw-exit-translate-x,0),var(--tw-exit-translate-y,0),0)scale3d(var(--tw-exit-scale,1),var(--tw-exit-scale,1),var(--tw-exit-scale,1))rotate(var(--tw-exit-rotate,0));filter:blur(var(--tw-exit-blur,0))}}.react-flow{direction:ltr;--xy-edge-stroke-default: #b1b1b7;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #555;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(255, 255, 255, .5);--xy-minimap-background-color-default: #fff;--xy-minimap-mask-background-color-default: rgba(240, 240, 240, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #e2e2e2;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: transparent;--xy-background-pattern-dots-color-default: #91919a;--xy-background-pattern-lines-color-default: #eee;--xy-background-pattern-cross-color-default: #e2e2e2;background-color:var(--xy-background-color, var(--xy-background-color-default));--xy-node-color-default: inherit;--xy-node-border-default: 1px solid #1a192b;--xy-node-background-color-default: #fff;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(0, 0, 0, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #1a192b;--xy-node-border-radius-default: 3px;--xy-handle-background-color-default: #1a192b;--xy-handle-border-color-default: #fff;--xy-selection-background-color-default: rgba(0, 89, 220, .08);--xy-selection-border-default: 1px dotted rgba(0, 89, 220, .8);--xy-controls-button-background-color-default: #fefefe;--xy-controls-button-background-color-hover-default: #f4f4f4;--xy-controls-button-color-default: inherit;--xy-controls-button-color-hover-default: inherit;--xy-controls-button-border-color-default: #eee;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #ffffff;--xy-edge-label-color-default: inherit;--xy-resize-background-color-default: #3367d9}.react-flow.dark{--xy-edge-stroke-default: #3e3e3e;--xy-edge-stroke-width-default: 1;--xy-edge-stroke-selected-default: #727272;--xy-connectionline-stroke-default: #b1b1b7;--xy-connectionline-stroke-width-default: 1;--xy-attribution-background-color-default: rgba(150, 150, 150, .25);--xy-minimap-background-color-default: #141414;--xy-minimap-mask-background-color-default: rgba(60, 60, 60, .6);--xy-minimap-mask-stroke-color-default: transparent;--xy-minimap-mask-stroke-width-default: 1;--xy-minimap-node-background-color-default: #2b2b2b;--xy-minimap-node-stroke-color-default: transparent;--xy-minimap-node-stroke-width-default: 2;--xy-background-color-default: #141414;--xy-background-pattern-dots-color-default: #777;--xy-background-pattern-lines-color-default: #777;--xy-background-pattern-cross-color-default: #777;--xy-node-color-default: #f8f8f8;--xy-node-border-default: 1px solid #3c3c3c;--xy-node-background-color-default: #1e1e1e;--xy-node-group-background-color-default: rgba(240, 240, 240, .25);--xy-node-boxshadow-hover-default: 0 1px 4px 1px rgba(255, 255, 255, .08);--xy-node-boxshadow-selected-default: 0 0 0 .5px #999;--xy-handle-background-color-default: #bebebe;--xy-handle-border-color-default: #1e1e1e;--xy-selection-background-color-default: rgba(200, 200, 220, .08);--xy-selection-border-default: 1px dotted rgba(200, 200, 220, .8);--xy-controls-button-background-color-default: #2b2b2b;--xy-controls-button-background-color-hover-default: #3e3e3e;--xy-controls-button-color-default: #f8f8f8;--xy-controls-button-color-hover-default: #fff;--xy-controls-button-border-color-default: #5b5b5b;--xy-controls-box-shadow-default: 0 0 2px 1px rgba(0, 0, 0, .08);--xy-edge-label-background-color-default: #141414;--xy-edge-label-color-default: #f8f8f8}.react-flow__background{background-color:var(--xy-background-color-props, var(--xy-background-color, var(--xy-background-color-default)));pointer-events:none;z-index:-1}.react-flow__container{position:absolute;width:100%;height:100%;top:0;left:0}.react-flow__pane{z-index:1}.react-flow__pane.draggable{cursor:grab}.react-flow__pane.dragging{cursor:grabbing}.react-flow__pane.selection{cursor:pointer}.react-flow__viewport{transform-origin:0 0;z-index:2;pointer-events:none}.react-flow__renderer{z-index:4}.react-flow__selection{z-index:6}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible{outline:none}.react-flow__edge-path{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default));stroke-width:var(--xy-edge-stroke-width, var(--xy-edge-stroke-width-default));fill:none}.react-flow__connection-path{stroke:var(--xy-connectionline-stroke, var(--xy-connectionline-stroke-default));stroke-width:var(--xy-connectionline-stroke-width, var(--xy-connectionline-stroke-width-default));fill:none}.react-flow .react-flow__edges{position:absolute}.react-flow .react-flow__edges svg{overflow:visible;position:absolute;pointer-events:none}.react-flow__edge{pointer-events:visibleStroke}.react-flow__edge.selectable{cursor:pointer}.react-flow__edge.animated path{stroke-dasharray:5;animation:dashdraw .5s linear infinite}.react-flow__edge.animated path.react-flow__edge-interaction{stroke-dasharray:none;animation:none}.react-flow__edge.inactive{pointer-events:none}.react-flow__edge.selected,.react-flow__edge:focus,.react-flow__edge:focus-visible{outline:none}.react-flow__edge.selected .react-flow__edge-path,.react-flow__edge.selectable:focus .react-flow__edge-path,.react-flow__edge.selectable:focus-visible .react-flow__edge-path{stroke:var(--xy-edge-stroke-selected, var(--xy-edge-stroke-selected-default))}.react-flow__edge-textwrapper{pointer-events:all}.react-flow__edge .react-flow__edge-text{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__arrowhead polyline{stroke:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__arrowhead polyline.arrowclosed{fill:var(--xy-edge-stroke, var(--xy-edge-stroke-default))}.react-flow__connection{pointer-events:none}.react-flow__connection .animated{stroke-dasharray:5;animation:dashdraw .5s linear infinite}svg.react-flow__connectionline{z-index:1001;overflow:visible;position:absolute}.react-flow__nodes{pointer-events:none;transform-origin:0 0}.react-flow__node{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;pointer-events:all;transform-origin:0 0;box-sizing:border-box;cursor:default}.react-flow__node.selectable{cursor:pointer}.react-flow__node.draggable{cursor:grab;pointer-events:all}.react-flow__node.draggable.dragging{cursor:grabbing}.react-flow__nodesselection{z-index:3;transform-origin:left top;pointer-events:none}.react-flow__nodesselection-rect{position:absolute;pointer-events:all;cursor:grab}.react-flow__handle{position:absolute;pointer-events:none;min-width:5px;min-height:5px;width:6px;height:6px;background-color:var(--xy-handle-background-color, var(--xy-handle-background-color-default));border:1px solid var(--xy-handle-border-color, var(--xy-handle-border-color-default));border-radius:100%}.react-flow__handle.connectingfrom{pointer-events:all}.react-flow__handle.connectionindicator{pointer-events:all;cursor:crosshair}.react-flow__handle-bottom{top:auto;left:50%;bottom:0;transform:translate(-50%,50%)}.react-flow__handle-top{top:0;left:50%;transform:translate(-50%,-50%)}.react-flow__handle-left{top:50%;left:0;transform:translate(-50%,-50%)}.react-flow__handle-right{top:50%;right:0;transform:translate(50%,-50%)}.react-flow__edgeupdater{cursor:move;pointer-events:all}.react-flow__pane.selection .react-flow__panel{pointer-events:none}.react-flow__panel{position:absolute;z-index:5;margin:15px}.react-flow__panel.top{top:0}.react-flow__panel.bottom{bottom:0}.react-flow__panel.top.center,.react-flow__panel.bottom.center{left:50%;transform:translate(-15px) translate(-50%)}.react-flow__panel.left{left:0}.react-flow__panel.right{right:0}.react-flow__panel.left.center,.react-flow__panel.right.center{top:50%;transform:translateY(-15px) translateY(-50%)}.react-flow__attribution{font-size:10px;background:var(--xy-attribution-background-color, var(--xy-attribution-background-color-default));padding:2px 3px;margin:0}.react-flow__attribution a{text-decoration:none;color:#999}@keyframes dashdraw{0%{stroke-dashoffset:10}}.react-flow__edgelabel-renderer{position:absolute;width:100%;height:100%;pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none;left:0;top:0}.react-flow__viewport-portal{position:absolute;width:100%;height:100%;left:0;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__minimap{background:var( --xy-minimap-background-color-props, var(--xy-minimap-background-color, var(--xy-minimap-background-color-default)) )}.react-flow__minimap-svg{display:block}.react-flow__minimap-mask{fill:var( --xy-minimap-mask-background-color-props, var(--xy-minimap-mask-background-color, var(--xy-minimap-mask-background-color-default)) );stroke:var( --xy-minimap-mask-stroke-color-props, var(--xy-minimap-mask-stroke-color, var(--xy-minimap-mask-stroke-color-default)) );stroke-width:var( --xy-minimap-mask-stroke-width-props, var(--xy-minimap-mask-stroke-width, var(--xy-minimap-mask-stroke-width-default)) )}.react-flow__minimap-node{fill:var( --xy-minimap-node-background-color-props, var(--xy-minimap-node-background-color, var(--xy-minimap-node-background-color-default)) );stroke:var( --xy-minimap-node-stroke-color-props, var(--xy-minimap-node-stroke-color, var(--xy-minimap-node-stroke-color-default)) );stroke-width:var( --xy-minimap-node-stroke-width-props, var(--xy-minimap-node-stroke-width, var(--xy-minimap-node-stroke-width-default)) )}.react-flow__background-pattern.dots{fill:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-dots-color-default)) )}.react-flow__background-pattern.lines{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-lines-color-default)) )}.react-flow__background-pattern.cross{stroke:var( --xy-background-pattern-color-props, var(--xy-background-pattern-color, var(--xy-background-pattern-cross-color-default)) )}.react-flow__controls{display:flex;flex-direction:column;box-shadow:var(--xy-controls-box-shadow, var(--xy-controls-box-shadow-default))}.react-flow__controls.horizontal{flex-direction:row}.react-flow__controls-button{display:flex;justify-content:center;align-items:center;height:26px;width:26px;padding:4px;border:none;background:var(--xy-controls-button-background-color, var(--xy-controls-button-background-color-default));border-bottom:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) );color:var( --xy-controls-button-color-props, var(--xy-controls-button-color, var(--xy-controls-button-color-default)) );cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none}.react-flow__controls-button svg{width:100%;max-width:12px;max-height:12px;fill:currentColor}.react-flow__edge.updating .react-flow__edge-path{stroke:#777}.react-flow__edge-text{font-size:10px}.react-flow__node.selectable:focus,.react-flow__node.selectable:focus-visible{outline:none}.react-flow__node-input,.react-flow__node-default,.react-flow__node-output,.react-flow__node-group{padding:10px;border-radius:var(--xy-node-border-radius, var(--xy-node-border-radius-default));width:150px;font-size:12px;color:var(--xy-node-color, var(--xy-node-color-default));text-align:center;border:var(--xy-node-border, var(--xy-node-border-default));background-color:var(--xy-node-background-color, var(--xy-node-background-color-default))}.react-flow__node-input.selectable:hover,.react-flow__node-default.selectable:hover,.react-flow__node-output.selectable:hover,.react-flow__node-group.selectable:hover{box-shadow:var(--xy-node-boxshadow-hover, var(--xy-node-boxshadow-hover-default))}.react-flow__node-input.selectable.selected,.react-flow__node-input.selectable:focus,.react-flow__node-input.selectable:focus-visible,.react-flow__node-default.selectable.selected,.react-flow__node-default.selectable:focus,.react-flow__node-default.selectable:focus-visible,.react-flow__node-output.selectable.selected,.react-flow__node-output.selectable:focus,.react-flow__node-output.selectable:focus-visible,.react-flow__node-group.selectable.selected,.react-flow__node-group.selectable:focus,.react-flow__node-group.selectable:focus-visible{box-shadow:var(--xy-node-boxshadow-selected, var(--xy-node-boxshadow-selected-default))}.react-flow__node-group{background-color:var(--xy-node-group-background-color, var(--xy-node-group-background-color-default))}.react-flow__nodesselection-rect,.react-flow__selection{background:var(--xy-selection-background-color, var(--xy-selection-background-color-default));border:var(--xy-selection-border, var(--xy-selection-border-default))}.react-flow__nodesselection-rect:focus,.react-flow__nodesselection-rect:focus-visible,.react-flow__selection:focus,.react-flow__selection:focus-visible{outline:none}.react-flow__controls-button:hover{background:var( --xy-controls-button-background-color-hover-props, var(--xy-controls-button-background-color-hover, var(--xy-controls-button-background-color-hover-default)) );color:var( --xy-controls-button-color-hover-props, var(--xy-controls-button-color-hover, var(--xy-controls-button-color-hover-default)) )}.react-flow__controls-button:disabled{pointer-events:none}.react-flow__controls-button:disabled svg{fill-opacity:.4}.react-flow__controls-button:last-child{border-bottom:none}.react-flow__controls.horizontal .react-flow__controls-button{border-bottom:none;border-right:1px solid var( --xy-controls-button-border-color-props, var(--xy-controls-button-border-color, var(--xy-controls-button-border-color-default)) )}.react-flow__controls.horizontal .react-flow__controls-button:last-child{border-right:none}.react-flow__resize-control{position:absolute}.react-flow__resize-control.left,.react-flow__resize-control.right{cursor:ew-resize}.react-flow__resize-control.top,.react-flow__resize-control.bottom{cursor:ns-resize}.react-flow__resize-control.top.left,.react-flow__resize-control.bottom.right{cursor:nwse-resize}.react-flow__resize-control.bottom.left,.react-flow__resize-control.top.right{cursor:nesw-resize}.react-flow__resize-control.handle{width:5px;height:5px;border:1px solid #fff;border-radius:1px;background-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));translate:-50% -50%}.react-flow__resize-control.handle.left{left:0;top:50%}.react-flow__resize-control.handle.right{left:100%;top:50%}.react-flow__resize-control.handle.top{left:50%;top:0}.react-flow__resize-control.handle.bottom{left:50%;top:100%}.react-flow__resize-control.handle.top.left,.react-flow__resize-control.handle.bottom.left{left:0}.react-flow__resize-control.handle.top.right,.react-flow__resize-control.handle.bottom.right{left:100%}.react-flow__resize-control.line{border-color:var(--xy-resize-background-color, var(--xy-resize-background-color-default));border-width:0;border-style:solid}.react-flow__resize-control.line.left,.react-flow__resize-control.line.right{width:1px;transform:translate(-50%);top:0;height:100%}.react-flow__resize-control.line.left{left:0;border-left-width:1px}.react-flow__resize-control.line.right{left:100%;border-right-width:1px}.react-flow__resize-control.line.top,.react-flow__resize-control.line.bottom{height:1px;transform:translateY(-50%);left:0;width:100%}.react-flow__resize-control.line.top{top:0;border-top-width:1px}.react-flow__resize-control.line.bottom{border-bottom-width:1px;top:100%}.react-flow__edge-textbg{fill:var(--xy-edge-label-background-color, var(--xy-edge-label-background-color-default))}.react-flow__edge-text{fill:var(--xy-edge-label-color, var(--xy-edge-label-color-default))} diff --git a/python/packages/devui/agent_framework_devui/ui/assets/index.js b/python/packages/devui/agent_framework_devui/ui/assets/index.js index 56570a4f3c..3744c1e10d 100644 --- a/python/packages/devui/agent_framework_devui/ui/assets/index.js +++ b/python/packages/devui/agent_framework_devui/ui/assets/index.js @@ -1,4 +1,4 @@ -function ZE(e,n){for(var o=0;os[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))s(l);new MutationObserver(l=>{for(const u of l)if(u.type==="childList")for(const d of u.addedNodes)d.tagName==="LINK"&&d.rel==="modulepreload"&&s(d)}).observe(document,{childList:!0,subtree:!0});function o(l){const u={};return l.integrity&&(u.integrity=l.integrity),l.referrerPolicy&&(u.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?u.credentials="include":l.crossOrigin==="anonymous"?u.credentials="omit":u.credentials="same-origin",u}function s(l){if(l.ep)return;l.ep=!0;const u=o(l);fetch(l.href,u)}})();function Zh(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var Cm={exports:{}},Ni={};/** +function gE(e,n){for(var s=0;so[l]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const n=document.createElement("link").relList;if(n&&n.supports&&n.supports("modulepreload"))return;for(const l of document.querySelectorAll('link[rel="modulepreload"]'))o(l);new MutationObserver(l=>{for(const c of l)if(c.type==="childList")for(const d of c.addedNodes)d.tagName==="LINK"&&d.rel==="modulepreload"&&o(d)}).observe(document,{childList:!0,subtree:!0});function s(l){const c={};return l.integrity&&(c.integrity=l.integrity),l.referrerPolicy&&(c.referrerPolicy=l.referrerPolicy),l.crossOrigin==="use-credentials"?c.credentials="include":l.crossOrigin==="anonymous"?c.credentials="omit":c.credentials="same-origin",c}function o(l){if(l.ep)return;l.ep=!0;const c=s(l);fetch(l.href,c)}})();function dp(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var $m={exports:{}},Oi={};/** * @license React * react-jsx-runtime.production.js * @@ -6,7 +6,7 @@ function ZE(e,n){for(var o=0;o>>1,T=_[B];if(0>>1;Bl(ee,z))sel(he,ee)?(_[B]=he,_[se]=z,B=se):(_[B]=ee,_[X]=z,B=X);else if(sel(he,z))_[B]=he,_[se]=z,B=se;else break e}}return O}function l(_,O){var z=_.sortIndex-O.sortIndex;return z!==0?z:_.id-O.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var u=performance;e.unstable_now=function(){return u.now()}}else{var d=Date,f=d.now();e.unstable_now=function(){return d.now()-f}}var h=[],p=[],g=1,y=null,v=3,b=!1,N=!1,w=!1,j=!1,k=typeof setTimeout=="function"?setTimeout:null,M=typeof clearTimeout=="function"?clearTimeout:null,E=typeof setImmediate<"u"?setImmediate:null;function A(_){for(var O=o(p);O!==null;){if(O.callback===null)s(p);else if(O.startTime<=_)s(p),O.sortIndex=O.expirationTime,n(h,O);else break;O=o(p)}}function D(_){if(w=!1,A(_),!N)if(o(h)!==null)N=!0,L||(L=!0,G());else{var O=o(p);O!==null&&V(D,O.startTime-_)}}var L=!1,H=-1,U=5,q=-1;function F(){return j?!0:!(e.unstable_now()-q_&&F());){var B=y.callback;if(typeof B=="function"){y.callback=null,v=y.priorityLevel;var T=B(y.expirationTime<=_);if(_=e.unstable_now(),typeof T=="function"){y.callback=T,A(_),O=!0;break t}y===o(h)&&s(h),A(_)}else s(h);y=o(h)}if(y!==null)O=!0;else{var P=o(p);P!==null&&V(D,P.startTime-_),O=!1}}break e}finally{y=null,v=z,b=!1}O=void 0}}finally{O?G():L=!1}}}var G;if(typeof E=="function")G=function(){E(K)};else if(typeof MessageChannel<"u"){var te=new MessageChannel,I=te.port2;te.port1.onmessage=K,G=function(){I.postMessage(null)}}else G=function(){k(K,0)};function V(_,O){H=k(function(){_(e.unstable_now())},O)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(_){_.callback=null},e.unstable_forceFrameRate=function(_){0>_||125<_?console.error("forceFrameRate takes a positive int between 0 and 125, forcing frame rates higher than 125 fps is not supported"):U=0<_?Math.floor(1e3/_):5},e.unstable_getCurrentPriorityLevel=function(){return v},e.unstable_next=function(_){switch(v){case 1:case 2:case 3:var O=3;break;default:O=v}var z=v;v=O;try{return _()}finally{v=z}},e.unstable_requestPaint=function(){j=!0},e.unstable_runWithPriority=function(_,O){switch(_){case 1:case 2:case 3:case 4:case 5:break;default:_=3}var z=v;v=_;try{return O()}finally{v=z}},e.unstable_scheduleCallback=function(_,O,z){var B=e.unstable_now();switch(typeof z=="object"&&z!==null?(z=z.delay,z=typeof z=="number"&&0B?(_.sortIndex=z,n(p,_),o(h)===null&&_===o(p)&&(w?(M(H),H=-1):w=!0,V(D,z-B))):(_.sortIndex=T,n(h,_),N||b||(N=!0,L||(L=!0,G()))),_},e.unstable_shouldYield=F,e.unstable_wrapCallback=function(_){var O=v;return function(){var z=v;v=O;try{return _.apply(this,arguments)}finally{v=z}}}})(Tm)),Tm}var Mv;function ej(){return Mv||(Mv=1,Mm.exports=JE()),Mm.exports}var Rm={exports:{}},Bt={};/** + */var Zy;function bE(){return Zy||(Zy=1,(function(e){function n(A,I){var $=A.length;A.push(I);e:for(;0<$;){var P=$-1>>>1,T=A[P];if(0>>1;Pl(Z,$))rel(de,Z)?(A[P]=de,A[re]=$,P=re):(A[P]=Z,A[W]=$,P=W);else if(rel(de,$))A[P]=de,A[re]=$,P=re;else break e}}return I}function l(A,I){var $=A.sortIndex-I.sortIndex;return $!==0?$:A.id-I.id}if(e.unstable_now=void 0,typeof performance=="object"&&typeof performance.now=="function"){var c=performance;e.unstable_now=function(){return c.now()}}else{var d=Date,f=d.now();e.unstable_now=function(){return d.now()-f}}var m=[],p=[],g=1,v=null,y=3,b=!1,S=!1,N=!1,_=!1,E=typeof setTimeout=="function"?setTimeout:null,M=typeof clearTimeout=="function"?clearTimeout:null,j=typeof setImmediate<"u"?setImmediate:null;function k(A){for(var I=s(p);I!==null;){if(I.callback===null)o(p);else if(I.startTime<=A)o(p),I.sortIndex=I.expirationTime,n(m,I);else break;I=s(p)}}function R(A){if(N=!1,k(A),!S)if(s(m)!==null)S=!0,D||(D=!0,G());else{var I=s(p);I!==null&&V(R,I.startTime-A)}}var D=!1,z=-1,H=5,U=-1;function F(){return _?!0:!(e.unstable_now()-UA&&F());){var P=v.callback;if(typeof P=="function"){v.callback=null,y=v.priorityLevel;var T=P(v.expirationTime<=A);if(A=e.unstable_now(),typeof T=="function"){v.callback=T,k(A),I=!0;break t}v===s(m)&&o(m),k(A)}else o(m);v=s(m)}if(v!==null)I=!0;else{var B=s(p);B!==null&&V(R,B.startTime-A),I=!1}}break e}finally{v=null,y=$,b=!1}I=void 0}}finally{I?G():D=!1}}}var G;if(typeof j=="function")G=function(){j(K)};else if(typeof MessageChannel<"u"){var ne=new MessageChannel,L=ne.port2;ne.port1.onmessage=K,G=function(){L.postMessage(null)}}else G=function(){E(K,0)};function V(A,I){z=E(function(){A(e.unstable_now())},I)}e.unstable_IdlePriority=5,e.unstable_ImmediatePriority=1,e.unstable_LowPriority=4,e.unstable_NormalPriority=3,e.unstable_Profiling=null,e.unstable_UserBlockingPriority=2,e.unstable_cancelCallback=function(A){A.callback=null},e.unstable_forceFrameRate=function(A){0>A||125P?(A.sortIndex=$,n(p,A),s(m)===null&&A===s(p)&&(N?(M(z),z=-1):N=!0,V(R,$-P))):(A.sortIndex=T,n(m,A),S||b||(S=!0,D||(D=!0,G()))),A},e.unstable_shouldYield=F,e.unstable_wrapCallback=function(A){var I=y;return function(){var $=y;y=I;try{return A.apply(this,arguments)}finally{y=$}}}})(Vm)),Vm}var Wy;function wE(){return Wy||(Wy=1,Um.exports=bE()),Um.exports}var qm={exports:{}},Yt={};/** * @license React * react-dom.production.js * @@ -30,7 +30,7 @@ function ZE(e,n){for(var o=0;o"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(n){console.error(n)}}return e(),Rm.exports=tj(),Rm.exports}/** + */var Ky;function NE(){if(Ky)return Yt;Ky=1;var e=dl();function n(m){var p="https://react.dev/errors/"+m;if(1"u"||typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE!="function"))try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(n){console.error(n)}}return e(),qm.exports=NE(),qm.exports}/** * @license React * react-dom-client.production.js * @@ -38,392 +38,414 @@ function ZE(e,n){for(var o=0;oT||(t.current=B[T],B[T]=null,T--)}function ee(t,r){T++,B[T]=t.current,t.current=r}var se=P(null),he=P(null),fe=P(null),Q=P(null);function ae(t,r){switch(ee(fe,r),ee(he,t),ee(se,null),r.nodeType){case 9:case 11:t=(t=r.documentElement)&&(t=t.namespaceURI)?J0(t):0;break;default:if(t=r.tagName,r=r.namespaceURI)r=J0(r),t=ev(r,t);else switch(t){case"svg":t=1;break;case"math":t=2;break;default:t=0}}X(se),ee(se,t)}function xe(){X(se),X(he),X(fe)}function le(t){t.memoizedState!==null&&ee(Q,t);var r=se.current,a=ev(r,t.type);r!==a&&(ee(he,t),ee(se,a))}function ce(t){he.current===t&&(X(se),X(he)),Q.current===t&&(X(Q),vi._currentValue=z)}var ue=Object.prototype.hasOwnProperty,ge=e.unstable_scheduleCallback,pe=e.unstable_cancelCallback,Ye=e.unstable_shouldYield,it=e.unstable_requestPaint,re=e.unstable_now,Ce=e.unstable_getCurrentPriorityLevel,ke=e.unstable_ImmediatePriority,ze=e.unstable_UserBlockingPriority,Ne=e.unstable_NormalPriority,je=e.unstable_LowPriority,Le=e.unstable_IdlePriority,Ue=e.log,ct=e.unstable_setDisableYieldValue,$e=null,ye=null;function Te(t){if(typeof Ue=="function"&&ct(t),ye&&typeof ye.setStrictMode=="function")try{ye.setStrictMode($e,t)}catch{}}var Ie=Math.clz32?Math.clz32:Tr,Nt=Math.log,Tt=Math.LN2;function Tr(t){return t>>>=0,t===0?32:31-(Nt(t)/Tt|0)|0}var os=256,ss=4194304;function Qn(t){var r=t&42;if(r!==0)return r;switch(t&-t){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:return 128;case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return t&4194048;case 4194304:case 8388608:case 16777216:case 33554432:return t&62914560;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return t}}function as(t,r,a){var c=t.pendingLanes;if(c===0)return 0;var m=0,x=t.suspendedLanes,C=t.pingedLanes;t=t.warmLanes;var R=c&134217727;return R!==0?(c=R&~x,c!==0?m=Qn(c):(C&=R,C!==0?m=Qn(C):a||(a=R&~t,a!==0&&(m=Qn(a))))):(R=c&~x,R!==0?m=Qn(R):C!==0?m=Qn(C):a||(a=c&~t,a!==0&&(m=Qn(a)))),m===0?0:r!==0&&r!==m&&(r&x)===0&&(x=m&-m,a=r&-r,x>=a||x===32&&(a&4194048)!==0)?r:m}function yo(t,r){return(t.pendingLanes&~(t.suspendedLanes&~t.pingedLanes)&r)===0}function pd(t,r){switch(t){case 1:case 2:case 4:case 8:case 64:return r+250;case 16:case 32:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return r+5e3;case 4194304:case 8388608:case 16777216:case 33554432:return-1;case 67108864:case 134217728:case 268435456:case 536870912:case 1073741824:return-1;default:return-1}}function ml(){var t=os;return os<<=1,(os&4194048)===0&&(os=256),t}function hl(){var t=ss;return ss<<=1,(ss&62914560)===0&&(ss=4194304),t}function ya(t){for(var r=[],a=0;31>a;a++)r.push(t);return r}function bo(t,r){t.pendingLanes|=r,r!==268435456&&(t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0)}function gd(t,r,a,c,m,x){var C=t.pendingLanes;t.pendingLanes=a,t.suspendedLanes=0,t.pingedLanes=0,t.warmLanes=0,t.expiredLanes&=a,t.entangledLanes&=a,t.errorRecoveryDisabledLanes&=a,t.shellSuspendCounter=0;var R=t.entanglements,$=t.expirationTimes,J=t.hiddenUpdates;for(a=C&~a;0)":-1m||$[c]!==J[m]){var ie=` -`+$[c].replace(" at new "," at ");return t.displayName&&ie.includes("")&&(ie=ie.replace("",t.displayName)),ie}while(1<=c&&0<=m);break}}}finally{Ca=!1,Error.prepareStackTrace=a}return(a=t?t.displayName||t.name:"")?nr(a):""}function Sd(t){switch(t.tag){case 26:case 27:case 5:return nr(t.type);case 16:return nr("Lazy");case 13:return nr("Suspense");case 19:return nr("SuspenseList");case 0:case 15:return ka(t.type,!1);case 11:return ka(t.type.render,!1);case 1:return ka(t.type,!0);case 31:return nr("Activity");default:return""}}function Sl(t){try{var r="";do r+=Sd(t),t=t.return;while(t);return r}catch(a){return` -Error generating stack: `+a.message+` -`+a.stack}}function $t(t){switch(typeof t){case"bigint":case"boolean":case"number":case"string":case"undefined":return t;case"object":return t;default:return""}}function Nl(t){var r=t.type;return(t=t.nodeName)&&t.toLowerCase()==="input"&&(r==="checkbox"||r==="radio")}function Nd(t){var r=Nl(t)?"checked":"value",a=Object.getOwnPropertyDescriptor(t.constructor.prototype,r),c=""+t[r];if(!t.hasOwnProperty(r)&&typeof a<"u"&&typeof a.get=="function"&&typeof a.set=="function"){var m=a.get,x=a.set;return Object.defineProperty(t,r,{configurable:!0,get:function(){return m.call(this)},set:function(C){c=""+C,x.call(this,C)}}),Object.defineProperty(t,r,{enumerable:a.enumerable}),{getValue:function(){return c},setValue:function(C){c=""+C},stopTracking:function(){t._valueTracker=null,delete t[r]}}}}function cs(t){t._valueTracker||(t._valueTracker=Nd(t))}function Aa(t){if(!t)return!1;var r=t._valueTracker;if(!r)return!0;var a=r.getValue(),c="";return t&&(c=Nl(t)?t.checked?"true":"false":t.value),t=c,t!==a?(r.setValue(t),!0):!1}function us(t){if(t=t||(typeof document<"u"?document:void 0),typeof t>"u")return null;try{return t.activeElement||t.body}catch{return t.body}}var _d=/[\n"\\]/g;function Vt(t){return t.replace(_d,function(r){return"\\"+r.charCodeAt(0).toString(16)+" "})}function So(t,r,a,c,m,x,C,R){t.name="",C!=null&&typeof C!="function"&&typeof C!="symbol"&&typeof C!="boolean"?t.type=C:t.removeAttribute("type"),r!=null?C==="number"?(r===0&&t.value===""||t.value!=r)&&(t.value=""+$t(r)):t.value!==""+$t(r)&&(t.value=""+$t(r)):C!=="submit"&&C!=="reset"||t.removeAttribute("value"),r!=null?Ma(t,C,$t(r)):a!=null?Ma(t,C,$t(a)):c!=null&&t.removeAttribute("value"),m==null&&x!=null&&(t.defaultChecked=!!x),m!=null&&(t.checked=m&&typeof m!="function"&&typeof m!="symbol"),R!=null&&typeof R!="function"&&typeof R!="symbol"&&typeof R!="boolean"?t.name=""+$t(R):t.removeAttribute("name")}function _l(t,r,a,c,m,x,C,R){if(x!=null&&typeof x!="function"&&typeof x!="symbol"&&typeof x!="boolean"&&(t.type=x),r!=null||a!=null){if(!(x!=="submit"&&x!=="reset"||r!=null))return;a=a!=null?""+$t(a):"",r=r!=null?""+$t(r):a,R||r===t.value||(t.value=r),t.defaultValue=r}c=c??m,c=typeof c!="function"&&typeof c!="symbol"&&!!c,t.checked=R?t.checked:!!c,t.defaultChecked=!!c,C!=null&&typeof C!="function"&&typeof C!="symbol"&&typeof C!="boolean"&&(t.name=C)}function Ma(t,r,a){r==="number"&&us(t.ownerDocument)===t||t.defaultValue===""+a||(t.defaultValue=""+a)}function rr(t,r,a,c){if(t=t.options,r){r={};for(var m=0;m"u"||typeof window.document>"u"||typeof window.document.createElement>"u"),Ad=!1;if(or)try{var Ra={};Object.defineProperty(Ra,"passive",{get:function(){Ad=!0}}),window.addEventListener("test",Ra,Ra),window.removeEventListener("test",Ra,Ra)}catch{Ad=!1}var Lr=null,Md=null,jl=null;function rg(){if(jl)return jl;var t,r=Md,a=r.length,c,m="value"in Lr?Lr.value:Lr.textContent,x=m.length;for(t=0;t=za),cg=" ",ug=!1;function dg(t,r){switch(t){case"keyup":return y_.indexOf(r.keyCode)!==-1;case"keydown":return r.keyCode!==229;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function fg(t){return t=t.detail,typeof t=="object"&&"data"in t?t.data:null}var hs=!1;function w_(t,r){switch(t){case"compositionend":return fg(r);case"keypress":return r.which!==32?null:(ug=!0,cg);case"textInput":return t=r.data,t===cg&&ug?null:t;default:return null}}function S_(t,r){if(hs)return t==="compositionend"||!zd&&dg(t,r)?(t=rg(),jl=Md=Lr=null,hs=!1,t):null;switch(t){case"paste":return null;case"keypress":if(!(r.ctrlKey||r.altKey||r.metaKey)||r.ctrlKey&&r.altKey){if(r.char&&1=r)return{node:a,offset:r-t};t=c}e:{for(;a;){if(a.nextSibling){a=a.nextSibling;break e}a=a.parentNode}a=void 0}a=bg(a)}}function Sg(t,r){return t&&r?t===r?!0:t&&t.nodeType===3?!1:r&&r.nodeType===3?Sg(t,r.parentNode):"contains"in t?t.contains(r):t.compareDocumentPosition?!!(t.compareDocumentPosition(r)&16):!1:!1}function Ng(t){t=t!=null&&t.ownerDocument!=null&&t.ownerDocument.defaultView!=null?t.ownerDocument.defaultView:window;for(var r=us(t.document);r instanceof t.HTMLIFrameElement;){try{var a=typeof r.contentWindow.location.href=="string"}catch{a=!1}if(a)t=r.contentWindow;else break;r=us(t.document)}return r}function Hd(t){var r=t&&t.nodeName&&t.nodeName.toLowerCase();return r&&(r==="input"&&(t.type==="text"||t.type==="search"||t.type==="tel"||t.type==="url"||t.type==="password")||r==="textarea"||t.contentEditable==="true")}var M_=or&&"documentMode"in document&&11>=document.documentMode,ps=null,Bd=null,Ba=null,Ud=!1;function _g(t,r,a){var c=a.window===a?a.document:a.nodeType===9?a:a.ownerDocument;Ud||ps==null||ps!==us(c)||(c=ps,"selectionStart"in c&&Hd(c)?c={start:c.selectionStart,end:c.selectionEnd}:(c=(c.ownerDocument&&c.ownerDocument.defaultView||window).getSelection(),c={anchorNode:c.anchorNode,anchorOffset:c.anchorOffset,focusNode:c.focusNode,focusOffset:c.focusOffset}),Ba&&Ha(Ba,c)||(Ba=c,c=gc(Bd,"onSelect"),0>=C,m-=C,ar=1<<32-Ie(r)+m|a<x?x:8;var C=_.T,R={};_.T=R,jf(t,!1,r,a);try{var $=m(),J=_.S;if(J!==null&&J(R,$),$!==null&&typeof $=="object"&&typeof $.then=="function"){var ie=B_($,c);ei(t,r,ie,nn(t))}else ei(t,r,c,nn(t))}catch(me){ei(t,r,{then:function(){},status:"rejected",reason:me},nn())}finally{O.p=x,_.T=C}}function q_(){}function _f(t,r,a,c){if(t.tag!==5)throw Error(s(476));var m=Ex(t).queue;_x(t,m,r,z,a===null?q_:function(){return jx(t),a(c)})}function Ex(t){var r=t.memoizedState;if(r!==null)return r;r={memoizedState:z,baseState:z,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:ur,lastRenderedState:z},next:null};var a={};return r.next={memoizedState:a,baseState:a,baseQueue:null,queue:{pending:null,lanes:0,dispatch:null,lastRenderedReducer:ur,lastRenderedState:a},next:null},t.memoizedState=r,t=t.alternate,t!==null&&(t.memoizedState=r),r}function jx(t){var r=Ex(t).next.queue;ei(t,r,{},nn())}function Ef(){return Ht(vi)}function Cx(){return bt().memoizedState}function kx(){return bt().memoizedState}function Y_(t){for(var r=t.return;r!==null;){switch(r.tag){case 24:case 3:var a=nn();t=Br(a);var c=Ur(r,t,a);c!==null&&(rn(c,r,a),Fa(c,r,a)),r={cache:ef()},t.payload=r;return}r=r.return}}function G_(t,r,a){var c=nn();a={lane:c,revertLane:0,action:a,hasEagerState:!1,eagerState:null,next:null},Kl(t)?Mx(r,a):(a=qd(t,r,a,c),a!==null&&(rn(a,t,c),Tx(a,r,c)))}function Ax(t,r,a){var c=nn();ei(t,r,a,c)}function ei(t,r,a,c){var m={lane:c,revertLane:0,action:a,hasEagerState:!1,eagerState:null,next:null};if(Kl(t))Mx(r,m);else{var x=t.alternate;if(t.lanes===0&&(x===null||x.lanes===0)&&(x=r.lastRenderedReducer,x!==null))try{var C=r.lastRenderedState,R=x(C,a);if(m.hasEagerState=!0,m.eagerState=R,Wt(R,C))return Dl(t,r,m,0),lt===null&&Rl(),!1}catch{}finally{}if(a=qd(t,r,m,c),a!==null)return rn(a,t,c),Tx(a,r,c),!0}return!1}function jf(t,r,a,c){if(c={lane:2,revertLane:om(),action:c,hasEagerState:!1,eagerState:null,next:null},Kl(t)){if(r)throw Error(s(479))}else r=qd(t,a,c,2),r!==null&&rn(r,t,2)}function Kl(t){var r=t.alternate;return t===Ve||r!==null&&r===Ve}function Mx(t,r){Es=ql=!0;var a=t.pending;a===null?r.next=r:(r.next=a.next,a.next=r),t.pending=r}function Tx(t,r,a){if((a&4194048)!==0){var c=r.lanes;c&=t.pendingLanes,a|=c,r.lanes=a,ba(t,a)}}var Wl={readContext:Ht,use:Gl,useCallback:gt,useContext:gt,useEffect:gt,useImperativeHandle:gt,useLayoutEffect:gt,useInsertionEffect:gt,useMemo:gt,useReducer:gt,useRef:gt,useState:gt,useDebugValue:gt,useDeferredValue:gt,useTransition:gt,useSyncExternalStore:gt,useId:gt,useHostTransitionStatus:gt,useFormState:gt,useActionState:gt,useOptimistic:gt,useMemoCache:gt,useCacheRefresh:gt},Rx={readContext:Ht,use:Gl,useCallback:function(t,r){return Yt().memoizedState=[t,r===void 0?null:r],t},useContext:Ht,useEffect:px,useImperativeHandle:function(t,r,a){a=a!=null?a.concat([t]):null,Zl(4194308,4,yx.bind(null,r,t),a)},useLayoutEffect:function(t,r){return Zl(4194308,4,t,r)},useInsertionEffect:function(t,r){Zl(4,2,t,r)},useMemo:function(t,r){var a=Yt();r=r===void 0?null:r;var c=t();if(Oo){Te(!0);try{t()}finally{Te(!1)}}return a.memoizedState=[c,r],c},useReducer:function(t,r,a){var c=Yt();if(a!==void 0){var m=a(r);if(Oo){Te(!0);try{a(r)}finally{Te(!1)}}}else m=r;return c.memoizedState=c.baseState=m,t={pending:null,lanes:0,dispatch:null,lastRenderedReducer:t,lastRenderedState:m},c.queue=t,t=t.dispatch=G_.bind(null,Ve,t),[c.memoizedState,t]},useRef:function(t){var r=Yt();return t={current:t},r.memoizedState=t},useState:function(t){t=bf(t);var r=t.queue,a=Ax.bind(null,Ve,r);return r.dispatch=a,[t.memoizedState,a]},useDebugValue:Sf,useDeferredValue:function(t,r){var a=Yt();return Nf(a,t,r)},useTransition:function(){var t=bf(!1);return t=_x.bind(null,Ve,t.queue,!0,!1),Yt().memoizedState=t,[!1,t]},useSyncExternalStore:function(t,r,a){var c=Ve,m=Yt();if(et){if(a===void 0)throw Error(s(407));a=a()}else{if(a=r(),lt===null)throw Error(s(349));(Ke&124)!==0||Jg(c,r,a)}m.memoizedState=a;var x={value:a,getSnapshot:r};return m.queue=x,px(tx.bind(null,c,x,t),[t]),c.flags|=2048,Cs(9,Fl(),ex.bind(null,c,x,a,r),null),a},useId:function(){var t=Yt(),r=lt.identifierPrefix;if(et){var a=ir,c=ar;a=(c&~(1<<32-Ie(c)-1)).toString(32)+a,r="«"+r+"R"+a,a=Yl++,0De?(Mt=Ae,Ae=null):Mt=Ae.sibling;var Qe=ne(Z,Ae,W[De],de);if(Qe===null){Ae===null&&(Ae=Mt);break}t&&Ae&&Qe.alternate===null&&r(Z,Ae),Y=x(Qe,Y,De),Ge===null?be=Qe:Ge.sibling=Qe,Ge=Qe,Ae=Mt}if(De===W.length)return a(Z,Ae),et&&ko(Z,De),be;if(Ae===null){for(;DeDe?(Mt=Ae,Ae=null):Mt=Ae.sibling;var ro=ne(Z,Ae,Qe.value,de);if(ro===null){Ae===null&&(Ae=Mt);break}t&&Ae&&ro.alternate===null&&r(Z,Ae),Y=x(ro,Y,De),Ge===null?be=ro:Ge.sibling=ro,Ge=ro,Ae=Mt}if(Qe.done)return a(Z,Ae),et&&ko(Z,De),be;if(Ae===null){for(;!Qe.done;De++,Qe=W.next())Qe=me(Z,Qe.value,de),Qe!==null&&(Y=x(Qe,Y,De),Ge===null?be=Qe:Ge.sibling=Qe,Ge=Qe);return et&&ko(Z,De),be}for(Ae=c(Ae);!Qe.done;De++,Qe=W.next())Qe=oe(Ae,Z,De,Qe.value,de),Qe!==null&&(t&&Qe.alternate!==null&&Ae.delete(Qe.key===null?De:Qe.key),Y=x(Qe,Y,De),Ge===null?be=Qe:Ge.sibling=Qe,Ge=Qe);return t&&Ae.forEach(function(FE){return r(Z,FE)}),et&&ko(Z,De),be}function st(Z,Y,W,de){if(typeof W=="object"&&W!==null&&W.type===N&&W.key===null&&(W=W.props.children),typeof W=="object"&&W!==null){switch(W.$$typeof){case v:e:{for(var be=W.key;Y!==null;){if(Y.key===be){if(be=W.type,be===N){if(Y.tag===7){a(Z,Y.sibling),de=m(Y,W.props.children),de.return=Z,Z=de;break e}}else if(Y.elementType===be||typeof be=="object"&&be!==null&&be.$$typeof===U&&Ox(be)===Y.type){a(Z,Y.sibling),de=m(Y,W.props),ni(de,W),de.return=Z,Z=de;break e}a(Z,Y);break}else r(Z,Y);Y=Y.sibling}W.type===N?(de=jo(W.props.children,Z.mode,de,W.key),de.return=Z,Z=de):(de=zl(W.type,W.key,W.props,null,Z.mode,de),ni(de,W),de.return=Z,Z=de)}return C(Z);case b:e:{for(be=W.key;Y!==null;){if(Y.key===be)if(Y.tag===4&&Y.stateNode.containerInfo===W.containerInfo&&Y.stateNode.implementation===W.implementation){a(Z,Y.sibling),de=m(Y,W.children||[]),de.return=Z,Z=de;break e}else{a(Z,Y);break}else r(Z,Y);Y=Y.sibling}de=Xd(W,Z.mode,de),de.return=Z,Z=de}return C(Z);case U:return be=W._init,W=be(W._payload),st(Z,Y,W,de)}if(V(W))return Oe(Z,Y,W,de);if(G(W)){if(be=G(W),typeof be!="function")throw Error(s(150));return W=be.call(W),Re(Z,Y,W,de)}if(typeof W.then=="function")return st(Z,Y,Ql(W),de);if(W.$$typeof===E)return st(Z,Y,Bl(Z,W),de);Jl(Z,W)}return typeof W=="string"&&W!==""||typeof W=="number"||typeof W=="bigint"?(W=""+W,Y!==null&&Y.tag===6?(a(Z,Y.sibling),de=m(Y,W),de.return=Z,Z=de):(a(Z,Y),de=Gd(W,Z.mode,de),de.return=Z,Z=de),C(Z)):a(Z,Y)}return function(Z,Y,W,de){try{ti=0;var be=st(Z,Y,W,de);return ks=null,be}catch(Ae){if(Ae===Ga||Ae===Pl)throw Ae;var Ge=Qt(29,Ae,null,Z.mode);return Ge.lanes=de,Ge.return=Z,Ge}finally{}}}var As=zx(!0),Lx=zx(!1),pn=P(null),zn=null;function $r(t){var r=t.alternate;ee(Et,Et.current&1),ee(pn,t),zn===null&&(r===null||_s.current!==null||r.memoizedState!==null)&&(zn=t)}function Ix(t){if(t.tag===22){if(ee(Et,Et.current),ee(pn,t),zn===null){var r=t.alternate;r!==null&&r.memoizedState!==null&&(zn=t)}}else Vr()}function Vr(){ee(Et,Et.current),ee(pn,pn.current)}function dr(t){X(pn),zn===t&&(zn=null),X(Et)}var Et=P(0);function ec(t){for(var r=t;r!==null;){if(r.tag===13){var a=r.memoizedState;if(a!==null&&(a=a.dehydrated,a===null||a.data==="$?"||gm(a)))return r}else if(r.tag===19&&r.memoizedProps.revealOrder!==void 0){if((r.flags&128)!==0)return r}else if(r.child!==null){r.child.return=r,r=r.child;continue}if(r===t)break;for(;r.sibling===null;){if(r.return===null||r.return===t)return null;r=r.return}r.sibling.return=r.return,r=r.sibling}return null}function Cf(t,r,a,c){r=t.memoizedState,a=a(c,r),a=a==null?r:g({},r,a),t.memoizedState=a,t.lanes===0&&(t.updateQueue.baseState=a)}var kf={enqueueSetState:function(t,r,a){t=t._reactInternals;var c=nn(),m=Br(c);m.payload=r,a!=null&&(m.callback=a),r=Ur(t,m,c),r!==null&&(rn(r,t,c),Fa(r,t,c))},enqueueReplaceState:function(t,r,a){t=t._reactInternals;var c=nn(),m=Br(c);m.tag=1,m.payload=r,a!=null&&(m.callback=a),r=Ur(t,m,c),r!==null&&(rn(r,t,c),Fa(r,t,c))},enqueueForceUpdate:function(t,r){t=t._reactInternals;var a=nn(),c=Br(a);c.tag=2,r!=null&&(c.callback=r),r=Ur(t,c,a),r!==null&&(rn(r,t,a),Fa(r,t,a))}};function Hx(t,r,a,c,m,x,C){return t=t.stateNode,typeof t.shouldComponentUpdate=="function"?t.shouldComponentUpdate(c,x,C):r.prototype&&r.prototype.isPureReactComponent?!Ha(a,c)||!Ha(m,x):!0}function Bx(t,r,a,c){t=r.state,typeof r.componentWillReceiveProps=="function"&&r.componentWillReceiveProps(a,c),typeof r.UNSAFE_componentWillReceiveProps=="function"&&r.UNSAFE_componentWillReceiveProps(a,c),r.state!==t&&kf.enqueueReplaceState(r,r.state,null)}function zo(t,r){var a=r;if("ref"in r){a={};for(var c in r)c!=="ref"&&(a[c]=r[c])}if(t=t.defaultProps){a===r&&(a=g({},a));for(var m in t)a[m]===void 0&&(a[m]=t[m])}return a}var tc=typeof reportError=="function"?reportError:function(t){if(typeof window=="object"&&typeof window.ErrorEvent=="function"){var r=new window.ErrorEvent("error",{bubbles:!0,cancelable:!0,message:typeof t=="object"&&t!==null&&typeof t.message=="string"?String(t.message):String(t),error:t});if(!window.dispatchEvent(r))return}else if(typeof process=="object"&&typeof process.emit=="function"){process.emit("uncaughtException",t);return}console.error(t)};function Ux(t){tc(t)}function Px(t){console.error(t)}function $x(t){tc(t)}function nc(t,r){try{var a=t.onUncaughtError;a(r.value,{componentStack:r.stack})}catch(c){setTimeout(function(){throw c})}}function Vx(t,r,a){try{var c=t.onCaughtError;c(a.value,{componentStack:a.stack,errorBoundary:r.tag===1?r.stateNode:null})}catch(m){setTimeout(function(){throw m})}}function Af(t,r,a){return a=Br(a),a.tag=3,a.payload={element:null},a.callback=function(){nc(t,r)},a}function qx(t){return t=Br(t),t.tag=3,t}function Yx(t,r,a,c){var m=a.type.getDerivedStateFromError;if(typeof m=="function"){var x=c.value;t.payload=function(){return m(x)},t.callback=function(){Vx(r,a,c)}}var C=a.stateNode;C!==null&&typeof C.componentDidCatch=="function"&&(t.callback=function(){Vx(r,a,c),typeof m!="function"&&(Zr===null?Zr=new Set([this]):Zr.add(this));var R=c.stack;this.componentDidCatch(c.value,{componentStack:R!==null?R:""})})}function F_(t,r,a,c,m){if(a.flags|=32768,c!==null&&typeof c=="object"&&typeof c.then=="function"){if(r=a.alternate,r!==null&&Va(r,a,m,!0),a=pn.current,a!==null){switch(a.tag){case 13:return zn===null?Jf():a.alternate===null&&pt===0&&(pt=3),a.flags&=-257,a.flags|=65536,a.lanes=m,c===rf?a.flags|=16384:(r=a.updateQueue,r===null?a.updateQueue=new Set([c]):r.add(c),tm(t,c,m)),!1;case 22:return a.flags|=65536,c===rf?a.flags|=16384:(r=a.updateQueue,r===null?(r={transitions:null,markerInstances:null,retryQueue:new Set([c])},a.updateQueue=r):(a=r.retryQueue,a===null?r.retryQueue=new Set([c]):a.add(c)),tm(t,c,m)),!1}throw Error(s(435,a.tag))}return tm(t,c,m),Jf(),!1}if(et)return r=pn.current,r!==null?((r.flags&65536)===0&&(r.flags|=256),r.flags|=65536,r.lanes=m,c!==Kd&&(t=Error(s(422),{cause:c}),$a(dn(t,a)))):(c!==Kd&&(r=Error(s(423),{cause:c}),$a(dn(r,a))),t=t.current.alternate,t.flags|=65536,m&=-m,t.lanes|=m,c=dn(c,a),m=Af(t.stateNode,c,m),af(t,m),pt!==4&&(pt=2)),!1;var x=Error(s(520),{cause:c});if(x=dn(x,a),ci===null?ci=[x]:ci.push(x),pt!==4&&(pt=2),r===null)return!0;c=dn(c,a),a=r;do{switch(a.tag){case 3:return a.flags|=65536,t=m&-m,a.lanes|=t,t=Af(a.stateNode,c,t),af(a,t),!1;case 1:if(r=a.type,x=a.stateNode,(a.flags&128)===0&&(typeof r.getDerivedStateFromError=="function"||x!==null&&typeof x.componentDidCatch=="function"&&(Zr===null||!Zr.has(x))))return a.flags|=65536,m&=-m,a.lanes|=m,m=qx(m),Yx(m,t,a,c),af(a,m),!1}a=a.return}while(a!==null);return!1}var Gx=Error(s(461)),kt=!1;function Rt(t,r,a,c){r.child=t===null?Lx(r,null,a,c):As(r,t.child,a,c)}function Xx(t,r,a,c,m){a=a.render;var x=r.ref;if("ref"in c){var C={};for(var R in c)R!=="ref"&&(C[R]=c[R])}else C=c;return Ro(r),c=ff(t,r,a,C,x,m),R=mf(),t!==null&&!kt?(hf(t,r,m),fr(t,r,m)):(et&&R&&Fd(r),r.flags|=1,Rt(t,r,c,m),r.child)}function Fx(t,r,a,c,m){if(t===null){var x=a.type;return typeof x=="function"&&!Yd(x)&&x.defaultProps===void 0&&a.compare===null?(r.tag=15,r.type=x,Zx(t,r,x,c,m)):(t=zl(a.type,null,c,r,r.mode,m),t.ref=r.ref,t.return=r,r.child=t)}if(x=t.child,!If(t,m)){var C=x.memoizedProps;if(a=a.compare,a=a!==null?a:Ha,a(C,c)&&t.ref===r.ref)return fr(t,r,m)}return r.flags|=1,t=sr(x,c),t.ref=r.ref,t.return=r,r.child=t}function Zx(t,r,a,c,m){if(t!==null){var x=t.memoizedProps;if(Ha(x,c)&&t.ref===r.ref)if(kt=!1,r.pendingProps=c=x,If(t,m))(t.flags&131072)!==0&&(kt=!0);else return r.lanes=t.lanes,fr(t,r,m)}return Mf(t,r,a,c,m)}function Kx(t,r,a){var c=r.pendingProps,m=c.children,x=t!==null?t.memoizedState:null;if(c.mode==="hidden"){if((r.flags&128)!==0){if(c=x!==null?x.baseLanes|a:a,t!==null){for(m=r.child=t.child,x=0;m!==null;)x=x|m.lanes|m.childLanes,m=m.sibling;r.childLanes=x&~c}else r.childLanes=0,r.child=null;return Wx(t,r,c,a)}if((a&536870912)!==0)r.memoizedState={baseLanes:0,cachePool:null},t!==null&&Ul(r,x!==null?x.cachePool:null),x!==null?Zg(r,x):cf(),Ix(r);else return r.lanes=r.childLanes=536870912,Wx(t,r,x!==null?x.baseLanes|a:a,a)}else x!==null?(Ul(r,x.cachePool),Zg(r,x),Vr(),r.memoizedState=null):(t!==null&&Ul(r,null),cf(),Vr());return Rt(t,r,m,a),r.child}function Wx(t,r,a,c){var m=nf();return m=m===null?null:{parent:_t._currentValue,pool:m},r.memoizedState={baseLanes:a,cachePool:m},t!==null&&Ul(r,null),cf(),Ix(r),t!==null&&Va(t,r,c,!0),null}function rc(t,r){var a=r.ref;if(a===null)t!==null&&t.ref!==null&&(r.flags|=4194816);else{if(typeof a!="function"&&typeof a!="object")throw Error(s(284));(t===null||t.ref!==a)&&(r.flags|=4194816)}}function Mf(t,r,a,c,m){return Ro(r),a=ff(t,r,a,c,void 0,m),c=mf(),t!==null&&!kt?(hf(t,r,m),fr(t,r,m)):(et&&c&&Fd(r),r.flags|=1,Rt(t,r,a,m),r.child)}function Qx(t,r,a,c,m,x){return Ro(r),r.updateQueue=null,a=Wg(r,c,a,m),Kg(t),c=mf(),t!==null&&!kt?(hf(t,r,x),fr(t,r,x)):(et&&c&&Fd(r),r.flags|=1,Rt(t,r,a,x),r.child)}function Jx(t,r,a,c,m){if(Ro(r),r.stateNode===null){var x=ys,C=a.contextType;typeof C=="object"&&C!==null&&(x=Ht(C)),x=new a(c,x),r.memoizedState=x.state!==null&&x.state!==void 0?x.state:null,x.updater=kf,r.stateNode=x,x._reactInternals=r,x=r.stateNode,x.props=c,x.state=r.memoizedState,x.refs={},of(r),C=a.contextType,x.context=typeof C=="object"&&C!==null?Ht(C):ys,x.state=r.memoizedState,C=a.getDerivedStateFromProps,typeof C=="function"&&(Cf(r,a,C,c),x.state=r.memoizedState),typeof a.getDerivedStateFromProps=="function"||typeof x.getSnapshotBeforeUpdate=="function"||typeof x.UNSAFE_componentWillMount!="function"&&typeof x.componentWillMount!="function"||(C=x.state,typeof x.componentWillMount=="function"&&x.componentWillMount(),typeof x.UNSAFE_componentWillMount=="function"&&x.UNSAFE_componentWillMount(),C!==x.state&&kf.enqueueReplaceState(x,x.state,null),Ka(r,c,x,m),Za(),x.state=r.memoizedState),typeof x.componentDidMount=="function"&&(r.flags|=4194308),c=!0}else if(t===null){x=r.stateNode;var R=r.memoizedProps,$=zo(a,R);x.props=$;var J=x.context,ie=a.contextType;C=ys,typeof ie=="object"&&ie!==null&&(C=Ht(ie));var me=a.getDerivedStateFromProps;ie=typeof me=="function"||typeof x.getSnapshotBeforeUpdate=="function",R=r.pendingProps!==R,ie||typeof x.UNSAFE_componentWillReceiveProps!="function"&&typeof x.componentWillReceiveProps!="function"||(R||J!==C)&&Bx(r,x,c,C),Hr=!1;var ne=r.memoizedState;x.state=ne,Ka(r,c,x,m),Za(),J=r.memoizedState,R||ne!==J||Hr?(typeof me=="function"&&(Cf(r,a,me,c),J=r.memoizedState),($=Hr||Hx(r,a,$,c,ne,J,C))?(ie||typeof x.UNSAFE_componentWillMount!="function"&&typeof x.componentWillMount!="function"||(typeof x.componentWillMount=="function"&&x.componentWillMount(),typeof x.UNSAFE_componentWillMount=="function"&&x.UNSAFE_componentWillMount()),typeof x.componentDidMount=="function"&&(r.flags|=4194308)):(typeof x.componentDidMount=="function"&&(r.flags|=4194308),r.memoizedProps=c,r.memoizedState=J),x.props=c,x.state=J,x.context=C,c=$):(typeof x.componentDidMount=="function"&&(r.flags|=4194308),c=!1)}else{x=r.stateNode,sf(t,r),C=r.memoizedProps,ie=zo(a,C),x.props=ie,me=r.pendingProps,ne=x.context,J=a.contextType,$=ys,typeof J=="object"&&J!==null&&($=Ht(J)),R=a.getDerivedStateFromProps,(J=typeof R=="function"||typeof x.getSnapshotBeforeUpdate=="function")||typeof x.UNSAFE_componentWillReceiveProps!="function"&&typeof x.componentWillReceiveProps!="function"||(C!==me||ne!==$)&&Bx(r,x,c,$),Hr=!1,ne=r.memoizedState,x.state=ne,Ka(r,c,x,m),Za();var oe=r.memoizedState;C!==me||ne!==oe||Hr||t!==null&&t.dependencies!==null&&Hl(t.dependencies)?(typeof R=="function"&&(Cf(r,a,R,c),oe=r.memoizedState),(ie=Hr||Hx(r,a,ie,c,ne,oe,$)||t!==null&&t.dependencies!==null&&Hl(t.dependencies))?(J||typeof x.UNSAFE_componentWillUpdate!="function"&&typeof x.componentWillUpdate!="function"||(typeof x.componentWillUpdate=="function"&&x.componentWillUpdate(c,oe,$),typeof x.UNSAFE_componentWillUpdate=="function"&&x.UNSAFE_componentWillUpdate(c,oe,$)),typeof x.componentDidUpdate=="function"&&(r.flags|=4),typeof x.getSnapshotBeforeUpdate=="function"&&(r.flags|=1024)):(typeof x.componentDidUpdate!="function"||C===t.memoizedProps&&ne===t.memoizedState||(r.flags|=4),typeof x.getSnapshotBeforeUpdate!="function"||C===t.memoizedProps&&ne===t.memoizedState||(r.flags|=1024),r.memoizedProps=c,r.memoizedState=oe),x.props=c,x.state=oe,x.context=$,c=ie):(typeof x.componentDidUpdate!="function"||C===t.memoizedProps&&ne===t.memoizedState||(r.flags|=4),typeof x.getSnapshotBeforeUpdate!="function"||C===t.memoizedProps&&ne===t.memoizedState||(r.flags|=1024),c=!1)}return x=c,rc(t,r),c=(r.flags&128)!==0,x||c?(x=r.stateNode,a=c&&typeof a.getDerivedStateFromError!="function"?null:x.render(),r.flags|=1,t!==null&&c?(r.child=As(r,t.child,null,m),r.child=As(r,null,a,m)):Rt(t,r,a,m),r.memoizedState=x.state,t=r.child):t=fr(t,r,m),t}function e0(t,r,a,c){return Pa(),r.flags|=256,Rt(t,r,a,c),r.child}var Tf={dehydrated:null,treeContext:null,retryLane:0,hydrationErrors:null};function Rf(t){return{baseLanes:t,cachePool:Pg()}}function Df(t,r,a){return t=t!==null?t.childLanes&~a:0,r&&(t|=gn),t}function t0(t,r,a){var c=r.pendingProps,m=!1,x=(r.flags&128)!==0,C;if((C=x)||(C=t!==null&&t.memoizedState===null?!1:(Et.current&2)!==0),C&&(m=!0,r.flags&=-129),C=(r.flags&32)!==0,r.flags&=-33,t===null){if(et){if(m?$r(r):Vr(),et){var R=ht,$;if($=R){e:{for($=R,R=On;$.nodeType!==8;){if(!R){R=null;break e}if($=Nn($.nextSibling),$===null){R=null;break e}}R=$}R!==null?(r.memoizedState={dehydrated:R,treeContext:Co!==null?{id:ar,overflow:ir}:null,retryLane:536870912,hydrationErrors:null},$=Qt(18,null,null,0),$.stateNode=R,$.return=r,r.child=$,Ut=r,ht=null,$=!0):$=!1}$||Mo(r)}if(R=r.memoizedState,R!==null&&(R=R.dehydrated,R!==null))return gm(R)?r.lanes=32:r.lanes=536870912,null;dr(r)}return R=c.children,c=c.fallback,m?(Vr(),m=r.mode,R=oc({mode:"hidden",children:R},m),c=jo(c,m,a,null),R.return=r,c.return=r,R.sibling=c,r.child=R,m=r.child,m.memoizedState=Rf(a),m.childLanes=Df(t,C,a),r.memoizedState=Tf,c):($r(r),Of(r,R))}if($=t.memoizedState,$!==null&&(R=$.dehydrated,R!==null)){if(x)r.flags&256?($r(r),r.flags&=-257,r=zf(t,r,a)):r.memoizedState!==null?(Vr(),r.child=t.child,r.flags|=128,r=null):(Vr(),m=c.fallback,R=r.mode,c=oc({mode:"visible",children:c.children},R),m=jo(m,R,a,null),m.flags|=2,c.return=r,m.return=r,c.sibling=m,r.child=c,As(r,t.child,null,a),c=r.child,c.memoizedState=Rf(a),c.childLanes=Df(t,C,a),r.memoizedState=Tf,r=m);else if($r(r),gm(R)){if(C=R.nextSibling&&R.nextSibling.dataset,C)var J=C.dgst;C=J,c=Error(s(419)),c.stack="",c.digest=C,$a({value:c,source:null,stack:null}),r=zf(t,r,a)}else if(kt||Va(t,r,a,!1),C=(a&t.childLanes)!==0,kt||C){if(C=lt,C!==null&&(c=a&-a,c=(c&42)!==0?1:wa(c),c=(c&(C.suspendedLanes|a))!==0?0:c,c!==0&&c!==$.retryLane))throw $.retryLane=c,vs(t,c),rn(C,t,c),Gx;R.data==="$?"||Jf(),r=zf(t,r,a)}else R.data==="$?"?(r.flags|=192,r.child=t.child,r=null):(t=$.treeContext,ht=Nn(R.nextSibling),Ut=r,et=!0,Ao=null,On=!1,t!==null&&(mn[hn++]=ar,mn[hn++]=ir,mn[hn++]=Co,ar=t.id,ir=t.overflow,Co=r),r=Of(r,c.children),r.flags|=4096);return r}return m?(Vr(),m=c.fallback,R=r.mode,$=t.child,J=$.sibling,c=sr($,{mode:"hidden",children:c.children}),c.subtreeFlags=$.subtreeFlags&65011712,J!==null?m=sr(J,m):(m=jo(m,R,a,null),m.flags|=2),m.return=r,c.return=r,c.sibling=m,r.child=c,c=m,m=r.child,R=t.child.memoizedState,R===null?R=Rf(a):($=R.cachePool,$!==null?(J=_t._currentValue,$=$.parent!==J?{parent:J,pool:J}:$):$=Pg(),R={baseLanes:R.baseLanes|a,cachePool:$}),m.memoizedState=R,m.childLanes=Df(t,C,a),r.memoizedState=Tf,c):($r(r),a=t.child,t=a.sibling,a=sr(a,{mode:"visible",children:c.children}),a.return=r,a.sibling=null,t!==null&&(C=r.deletions,C===null?(r.deletions=[t],r.flags|=16):C.push(t)),r.child=a,r.memoizedState=null,a)}function Of(t,r){return r=oc({mode:"visible",children:r},t.mode),r.return=t,t.child=r}function oc(t,r){return t=Qt(22,t,null,r),t.lanes=0,t.stateNode={_visibility:1,_pendingMarkers:null,_retryCache:null,_transitions:null},t}function zf(t,r,a){return As(r,t.child,null,a),t=Of(r,r.pendingProps.children),t.flags|=2,r.memoizedState=null,t}function n0(t,r,a){t.lanes|=r;var c=t.alternate;c!==null&&(c.lanes|=r),Qd(t.return,r,a)}function Lf(t,r,a,c,m){var x=t.memoizedState;x===null?t.memoizedState={isBackwards:r,rendering:null,renderingStartTime:0,last:c,tail:a,tailMode:m}:(x.isBackwards=r,x.rendering=null,x.renderingStartTime=0,x.last=c,x.tail=a,x.tailMode=m)}function r0(t,r,a){var c=r.pendingProps,m=c.revealOrder,x=c.tail;if(Rt(t,r,c.children,a),c=Et.current,(c&2)!==0)c=c&1|2,r.flags|=128;else{if(t!==null&&(t.flags&128)!==0)e:for(t=r.child;t!==null;){if(t.tag===13)t.memoizedState!==null&&n0(t,a,r);else if(t.tag===19)n0(t,a,r);else if(t.child!==null){t.child.return=t,t=t.child;continue}if(t===r)break e;for(;t.sibling===null;){if(t.return===null||t.return===r)break e;t=t.return}t.sibling.return=t.return,t=t.sibling}c&=1}switch(ee(Et,c),m){case"forwards":for(a=r.child,m=null;a!==null;)t=a.alternate,t!==null&&ec(t)===null&&(m=a),a=a.sibling;a=m,a===null?(m=r.child,r.child=null):(m=a.sibling,a.sibling=null),Lf(r,!1,m,a,x);break;case"backwards":for(a=null,m=r.child,r.child=null;m!==null;){if(t=m.alternate,t!==null&&ec(t)===null){r.child=m;break}t=m.sibling,m.sibling=a,a=m,m=t}Lf(r,!0,a,null,x);break;case"together":Lf(r,!1,null,null,void 0);break;default:r.memoizedState=null}return r.child}function fr(t,r,a){if(t!==null&&(r.dependencies=t.dependencies),Fr|=r.lanes,(a&r.childLanes)===0)if(t!==null){if(Va(t,r,a,!1),(a&r.childLanes)===0)return null}else return null;if(t!==null&&r.child!==t.child)throw Error(s(153));if(r.child!==null){for(t=r.child,a=sr(t,t.pendingProps),r.child=a,a.return=r;t.sibling!==null;)t=t.sibling,a=a.sibling=sr(t,t.pendingProps),a.return=r;a.sibling=null}return r.child}function If(t,r){return(t.lanes&r)!==0?!0:(t=t.dependencies,!!(t!==null&&Hl(t)))}function Z_(t,r,a){switch(r.tag){case 3:ae(r,r.stateNode.containerInfo),Ir(r,_t,t.memoizedState.cache),Pa();break;case 27:case 5:le(r);break;case 4:ae(r,r.stateNode.containerInfo);break;case 10:Ir(r,r.type,r.memoizedProps.value);break;case 13:var c=r.memoizedState;if(c!==null)return c.dehydrated!==null?($r(r),r.flags|=128,null):(a&r.child.childLanes)!==0?t0(t,r,a):($r(r),t=fr(t,r,a),t!==null?t.sibling:null);$r(r);break;case 19:var m=(t.flags&128)!==0;if(c=(a&r.childLanes)!==0,c||(Va(t,r,a,!1),c=(a&r.childLanes)!==0),m){if(c)return r0(t,r,a);r.flags|=128}if(m=r.memoizedState,m!==null&&(m.rendering=null,m.tail=null,m.lastEffect=null),ee(Et,Et.current),c)break;return null;case 22:case 23:return r.lanes=0,Kx(t,r,a);case 24:Ir(r,_t,t.memoizedState.cache)}return fr(t,r,a)}function o0(t,r,a){if(t!==null)if(t.memoizedProps!==r.pendingProps)kt=!0;else{if(!If(t,a)&&(r.flags&128)===0)return kt=!1,Z_(t,r,a);kt=(t.flags&131072)!==0}else kt=!1,et&&(r.flags&1048576)!==0&&Og(r,Il,r.index);switch(r.lanes=0,r.tag){case 16:e:{t=r.pendingProps;var c=r.elementType,m=c._init;if(c=m(c._payload),r.type=c,typeof c=="function")Yd(c)?(t=zo(c,t),r.tag=1,r=Jx(null,r,c,t,a)):(r.tag=0,r=Mf(null,r,c,t,a));else{if(c!=null){if(m=c.$$typeof,m===A){r.tag=11,r=Xx(null,r,c,t,a);break e}else if(m===H){r.tag=14,r=Fx(null,r,c,t,a);break e}}throw r=I(c)||c,Error(s(306,r,""))}}return r;case 0:return Mf(t,r,r.type,r.pendingProps,a);case 1:return c=r.type,m=zo(c,r.pendingProps),Jx(t,r,c,m,a);case 3:e:{if(ae(r,r.stateNode.containerInfo),t===null)throw Error(s(387));c=r.pendingProps;var x=r.memoizedState;m=x.element,sf(t,r),Ka(r,c,null,a);var C=r.memoizedState;if(c=C.cache,Ir(r,_t,c),c!==x.cache&&Jd(r,[_t],a,!0),Za(),c=C.element,x.isDehydrated)if(x={element:c,isDehydrated:!1,cache:C.cache},r.updateQueue.baseState=x,r.memoizedState=x,r.flags&256){r=e0(t,r,c,a);break e}else if(c!==m){m=dn(Error(s(424)),r),$a(m),r=e0(t,r,c,a);break e}else{switch(t=r.stateNode.containerInfo,t.nodeType){case 9:t=t.body;break;default:t=t.nodeName==="HTML"?t.ownerDocument.body:t}for(ht=Nn(t.firstChild),Ut=r,et=!0,Ao=null,On=!0,a=Lx(r,null,c,a),r.child=a;a;)a.flags=a.flags&-3|4096,a=a.sibling}else{if(Pa(),c===m){r=fr(t,r,a);break e}Rt(t,r,c,a)}r=r.child}return r;case 26:return rc(t,r),t===null?(a=lv(r.type,null,r.pendingProps,null))?r.memoizedState=a:et||(a=r.type,t=r.pendingProps,c=vc(fe.current).createElement(a),c[Ct]=r,c[It]=t,Ot(c,a,t),vt(c),r.stateNode=c):r.memoizedState=lv(r.type,t.memoizedProps,r.pendingProps,t.memoizedState),null;case 27:return le(r),t===null&&et&&(c=r.stateNode=sv(r.type,r.pendingProps,fe.current),Ut=r,On=!0,m=ht,Qr(r.type)?(xm=m,ht=Nn(c.firstChild)):ht=m),Rt(t,r,r.pendingProps.children,a),rc(t,r),t===null&&(r.flags|=4194304),r.child;case 5:return t===null&&et&&((m=c=ht)&&(c=NE(c,r.type,r.pendingProps,On),c!==null?(r.stateNode=c,Ut=r,ht=Nn(c.firstChild),On=!1,m=!0):m=!1),m||Mo(r)),le(r),m=r.type,x=r.pendingProps,C=t!==null?t.memoizedProps:null,c=x.children,mm(m,x)?c=null:C!==null&&mm(m,C)&&(r.flags|=32),r.memoizedState!==null&&(m=ff(t,r,P_,null,null,a),vi._currentValue=m),rc(t,r),Rt(t,r,c,a),r.child;case 6:return t===null&&et&&((t=a=ht)&&(a=_E(a,r.pendingProps,On),a!==null?(r.stateNode=a,Ut=r,ht=null,t=!0):t=!1),t||Mo(r)),null;case 13:return t0(t,r,a);case 4:return ae(r,r.stateNode.containerInfo),c=r.pendingProps,t===null?r.child=As(r,null,c,a):Rt(t,r,c,a),r.child;case 11:return Xx(t,r,r.type,r.pendingProps,a);case 7:return Rt(t,r,r.pendingProps,a),r.child;case 8:return Rt(t,r,r.pendingProps.children,a),r.child;case 12:return Rt(t,r,r.pendingProps.children,a),r.child;case 10:return c=r.pendingProps,Ir(r,r.type,c.value),Rt(t,r,c.children,a),r.child;case 9:return m=r.type._context,c=r.pendingProps.children,Ro(r),m=Ht(m),c=c(m),r.flags|=1,Rt(t,r,c,a),r.child;case 14:return Fx(t,r,r.type,r.pendingProps,a);case 15:return Zx(t,r,r.type,r.pendingProps,a);case 19:return r0(t,r,a);case 31:return c=r.pendingProps,a=r.mode,c={mode:c.mode,children:c.children},t===null?(a=oc(c,a),a.ref=r.ref,r.child=a,a.return=r,r=a):(a=sr(t.child,c),a.ref=r.ref,r.child=a,a.return=r,r=a),r;case 22:return Kx(t,r,a);case 24:return Ro(r),c=Ht(_t),t===null?(m=nf(),m===null&&(m=lt,x=ef(),m.pooledCache=x,x.refCount++,x!==null&&(m.pooledCacheLanes|=a),m=x),r.memoizedState={parent:c,cache:m},of(r),Ir(r,_t,m)):((t.lanes&a)!==0&&(sf(t,r),Ka(r,null,null,a),Za()),m=t.memoizedState,x=r.memoizedState,m.parent!==c?(m={parent:c,cache:c},r.memoizedState=m,r.lanes===0&&(r.memoizedState=r.updateQueue.baseState=m),Ir(r,_t,c)):(c=x.cache,Ir(r,_t,c),c!==m.cache&&Jd(r,[_t],a,!0))),Rt(t,r,r.pendingProps.children,a),r.child;case 29:throw r.pendingProps}throw Error(s(156,r.tag))}function mr(t){t.flags|=4}function s0(t,r){if(r.type!=="stylesheet"||(r.state.loading&4)!==0)t.flags&=-16777217;else if(t.flags|=16777216,!mv(r)){if(r=pn.current,r!==null&&((Ke&4194048)===Ke?zn!==null:(Ke&62914560)!==Ke&&(Ke&536870912)===0||r!==zn))throw Xa=rf,$g;t.flags|=8192}}function sc(t,r){r!==null&&(t.flags|=4),t.flags&16384&&(r=t.tag!==22?hl():536870912,t.lanes|=r,Ds|=r)}function ri(t,r){if(!et)switch(t.tailMode){case"hidden":r=t.tail;for(var a=null;r!==null;)r.alternate!==null&&(a=r),r=r.sibling;a===null?t.tail=null:a.sibling=null;break;case"collapsed":a=t.tail;for(var c=null;a!==null;)a.alternate!==null&&(c=a),a=a.sibling;c===null?r||t.tail===null?t.tail=null:t.tail.sibling=null:c.sibling=null}}function ft(t){var r=t.alternate!==null&&t.alternate.child===t.child,a=0,c=0;if(r)for(var m=t.child;m!==null;)a|=m.lanes|m.childLanes,c|=m.subtreeFlags&65011712,c|=m.flags&65011712,m.return=t,m=m.sibling;else for(m=t.child;m!==null;)a|=m.lanes|m.childLanes,c|=m.subtreeFlags,c|=m.flags,m.return=t,m=m.sibling;return t.subtreeFlags|=c,t.childLanes=a,r}function K_(t,r,a){var c=r.pendingProps;switch(Zd(r),r.tag){case 31:case 16:case 15:case 0:case 11:case 7:case 8:case 12:case 9:case 14:return ft(r),null;case 1:return ft(r),null;case 3:return a=r.stateNode,c=null,t!==null&&(c=t.memoizedState.cache),r.memoizedState.cache!==c&&(r.flags|=2048),cr(_t),xe(),a.pendingContext&&(a.context=a.pendingContext,a.pendingContext=null),(t===null||t.child===null)&&(Ua(r)?mr(r):t===null||t.memoizedState.isDehydrated&&(r.flags&256)===0||(r.flags|=1024,Ig())),ft(r),null;case 26:return a=r.memoizedState,t===null?(mr(r),a!==null?(ft(r),s0(r,a)):(ft(r),r.flags&=-16777217)):a?a!==t.memoizedState?(mr(r),ft(r),s0(r,a)):(ft(r),r.flags&=-16777217):(t.memoizedProps!==c&&mr(r),ft(r),r.flags&=-16777217),null;case 27:ce(r),a=fe.current;var m=r.type;if(t!==null&&r.stateNode!=null)t.memoizedProps!==c&&mr(r);else{if(!c){if(r.stateNode===null)throw Error(s(166));return ft(r),null}t=se.current,Ua(r)?zg(r):(t=sv(m,c,a),r.stateNode=t,mr(r))}return ft(r),null;case 5:if(ce(r),a=r.type,t!==null&&r.stateNode!=null)t.memoizedProps!==c&&mr(r);else{if(!c){if(r.stateNode===null)throw Error(s(166));return ft(r),null}if(t=se.current,Ua(r))zg(r);else{switch(m=vc(fe.current),t){case 1:t=m.createElementNS("http://www.w3.org/2000/svg",a);break;case 2:t=m.createElementNS("http://www.w3.org/1998/Math/MathML",a);break;default:switch(a){case"svg":t=m.createElementNS("http://www.w3.org/2000/svg",a);break;case"math":t=m.createElementNS("http://www.w3.org/1998/Math/MathML",a);break;case"script":t=m.createElement("div"),t.innerHTML="