From 3271dbff05b98e2a1b4b9742b317dfee1eab433a Mon Sep 17 00:00:00 2001 From: Shyam Namboodiripad Date: Mon, 22 Sep 2025 14:14:41 -0700 Subject: [PATCH] Increase output token limit for EquivalenceEvaluator EquivalenceEvaluator was specifying MaxOutputTokens = 1 since its prompt instructs the LLM to produce a response (score) that is a single digit (between 1 and 5). Turns out that while this works for most models (including the OpenAI models that were used to test the prompt), some models require more than one token for this. For example, looks like Claude requires two tokens for this - see https://github.com/dotnet/extensions/issues/6814). This PR bumps the MaxOutputTokens to 5 to address the above issue. Fixes #6814 --- .../EquivalenceEvaluator.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Libraries/Microsoft.Extensions.AI.Evaluation.Quality/EquivalenceEvaluator.cs b/src/Libraries/Microsoft.Extensions.AI.Evaluation.Quality/EquivalenceEvaluator.cs index 9d820aeffc0..e166f573573 100644 --- a/src/Libraries/Microsoft.Extensions.AI.Evaluation.Quality/EquivalenceEvaluator.cs +++ b/src/Libraries/Microsoft.Extensions.AI.Evaluation.Quality/EquivalenceEvaluator.cs @@ -52,7 +52,7 @@ public sealed class EquivalenceEvaluator : IEvaluator new ChatOptions { Temperature = 0.0f, - MaxOutputTokens = 1, + MaxOutputTokens = 5, // See https://github.com/dotnet/extensions/issues/6814. TopP = 1.0f, PresencePenalty = 0.0f, FrequencyPenalty = 0.0f,