From cb55683677b14cc8e48301f4b576d5bf222d018a Mon Sep 17 00:00:00 2001 From: Martin Tomka Date: Mon, 16 Oct 2023 11:08:29 +0200 Subject: [PATCH 1/3] [Docs] Fallback after retries --- docs/migration-v8.md | 2 +- docs/strategies/circuit-breaker.md | 4 +- docs/strategies/fallback.md | 45 +++++++++++++++++- docs/strategies/retry.md | 74 +++++++++++++++--------------- src/Snippets/Docs/Fallback.cs | 36 +++++++++++++++ 5 files changed, 121 insertions(+), 40 deletions(-) diff --git a/docs/migration-v8.md b/docs/migration-v8.md index d8aa72f86e2..175d332de9d 100644 --- a/docs/migration-v8.md +++ b/docs/migration-v8.md @@ -186,7 +186,7 @@ ResiliencePipeline pipeline = new ResiliencePipelineBuilder() > [!IMPORTANT] -> In v7, the policy wrap ordering is different; the policy added first was executed last (FILO). In v8, the execution order matches the order in which they were added (FIFO). +> In v7, the policy wrap ordering is different; the policy added first was executed last (FILO). In v8, the execution order matches the order in which they were added (FIFO). See [fallback after retries](strategies/fallback.md#fallback-after-retries) for an example on how the strategies are executed. ## Migrating retry policies diff --git a/docs/strategies/circuit-breaker.md b/docs/strategies/circuit-breaker.md index 7e31bf926c5..cc01e099f00 100644 --- a/docs/strategies/circuit-breaker.md +++ b/docs/strategies/circuit-breaker.md @@ -97,7 +97,7 @@ await manualControl.CloseAsync(); - [Circuit Breaker Pattern by Microsoft](https://msdn.microsoft.com/en-us/library/dn589784.aspx) - [Original Circuit Breaking Article](https://web.archive.org/web/20160106203951/http://thatextramile.be/blog/2008/05/the-circuit-breaker) -## Patterns and anti-patterns +## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and those to avoid. @@ -263,7 +263,7 @@ circuitBreaker = new ResiliencePipelineBuilder() ✅ DO -The `CircuitBreakerStartegyOptions` currently do not support defining break durations dynamically. This may be re-evaluated in the future. For now, refer to the first example for a potential workaround. However, please use it with caution. +The `CircuitBreakerStrategyOptions` currently do not support defining break durations dynamically. This may be re-evaluated in the future. For now, refer to the first example for a potential workaround. However, please use it with caution. ### 3 - Wrapping each endpoint with a circuit breaker diff --git a/docs/strategies/fallback.md b/docs/strategies/fallback.md index f01f46f1d52..ee9896be425 100644 --- a/docs/strategies/fallback.md +++ b/docs/strategies/fallback.md @@ -65,7 +65,50 @@ new ResiliencePipelineBuilder() | `FallbackAction` | `Null`, **Required** | Fallback action to be executed. | | `OnFallback` | `null` | Event that is raised when fallback happens. | -## Patterns and anti-patterns +## Patterns + +### Fallback after retries + +When designing resilient systems, a common pattern is to use a fallback after multiple retry attempts. This approach is especially relevant when a fallback strategy can provide a sensible default value. + + +```cs +var pipeline = new ResiliencePipelineBuilder() + .AddFallback(new() + { + ShouldHandle = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + FallbackAction = args => + { + // Resolve the fallback response + HttpResponseMessage fallbackResponse = ResolveFallbackResponse(args.Outcome); + + return Outcome.FromResultAsValueTask(fallbackResponse); + } + }) + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + MaxRetryAttempts = 3, + }) + .Build(); + +// Execution for demonstration purposes that always returns an invalid result +pipeline.Execute(() => new HttpResponseMessage(HttpStatusCode.InternalServerError)); +``` + + +Here's a breakdown of the behavior when the callback produces either an `HttpStatusCode.InternalServerError` or a `HttpRequestException`: + +- The fallback strategy initiates by executing the provided callback, then immediately passes the execution to the retry strategy. +- The retry strategy starts execution, makes 3 retry attempts and yields the outcome that represents an error. +- The fallback strategy resumes execution, assesses the outcome generated by the callback, and if necessary, supplies the fallback value. +- The fallback strategy completes its execution. + +## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and ones to avoid. diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index ddaa5fabafb..78993c5d83b 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -104,7 +104,44 @@ new ResiliencePipelineBuilder().AddRetry(new RetryStrategyOptions | `OnRetry` | `null` | Action executed when retry occurs. | | `MaxDelay` | `null` | Caps the calculated retry delay to a specified maximum duration. | -## Patterns and anti-patterns +## Patterns + +### Limiting the maximum delay + +In some cases, you might want to set a limit on the calculated delay. This is beneficial when multiple retries are anticipated, and you wish to prevent excessive wait times between these retries. + +Consider the following example of a long-running background job: + + +```cs +ResiliencePipeline pipeline = new ResiliencePipelineBuilder() + .AddRetry(new() + { + Delay = TimeSpan.FromSeconds(2), + MaxRetryAttempts = int.MaxValue, + + // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. + MaxDelay = TimeSpan.FromMinutes(15), + UseJitter = true + }) + .Build(); + +// Background processing +while (!cancellationToken.IsCancellationRequested) +{ + await pipeline.ExecuteAsync(async token => + { + // In the event of a prolonged service outage, we can afford to wait for a successful retry since this is a background task. + await SynchronizeDataAsync(token); + }, + cancellationToken); + + await Task.Delay(TimeSpan.FromMinutes(30)); // The sync runs every 30 minutes. +} +``` + + +## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and those to avoid. @@ -480,38 +517,3 @@ var retry = new ResiliencePipelineBuilder() **Reasoning**: As previously mentioned, always use the designated area to define retry conditions. Re-frame your original exit conditions to specify when a retry should be initiated. - -### Limiting the maximum delay - -In some cases, you might want to set a limit on the calculated delay. This is beneficial when multiple retries are anticipated, and you wish to prevent excessive wait times between these retries. - -Consider the following example of a long-running background job: - - -```cs -ResiliencePipeline pipeline = new ResiliencePipelineBuilder() - .AddRetry(new() - { - Delay = TimeSpan.FromSeconds(2), - MaxRetryAttempts = int.MaxValue, - - // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. - MaxDelay = TimeSpan.FromMinutes(15), - UseJitter = true - }) - .Build(); - -// Background processing -while (!cancellationToken.IsCancellationRequested) -{ - await pipeline.ExecuteAsync(async token => - { - // In the event of a prolonged service outage, we can afford to wait for a successful retry since this is a background task. - await SynchronizeDataAsync(token); - }, - cancellationToken); - - await Task.Delay(TimeSpan.FromMinutes(30)); // The sync runs every 30 minutes. -} -``` - diff --git a/src/Snippets/Docs/Fallback.cs b/src/Snippets/Docs/Fallback.cs index 254db8127d7..62b65bbbc1d 100644 --- a/src/Snippets/Docs/Fallback.cs +++ b/src/Snippets/Docs/Fallback.cs @@ -232,4 +232,40 @@ private static ValueTask ActionCore() return await pipeline.ExecuteAsync(CallExternalSystem, CancellationToken.None); #endregion } + + public static void FallbackAfterRetries() + { + #region fallback-after-retries + + var pipeline = new ResiliencePipelineBuilder() + .AddFallback(new() + { + ShouldHandle = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + FallbackAction = args => + { + // Try to resolve the fallback response + HttpResponseMessage fallbackResponse = ResolveFallbackResponse(args.Outcome); + + return Outcome.FromResultAsValueTask(fallbackResponse); + + } + }) + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + MaxRetryAttempts = 3, + }) + .Build(); + + // Demonstrative execution that always produces invalid result + pipeline.Execute(() => new HttpResponseMessage(HttpStatusCode.InternalServerError)); + + #endregion + } + + private static HttpResponseMessage ResolveFallbackResponse(Outcome outcome) => new(); } From 4919ed3fe7b8c3f351b40279034c2cb7cd751903 Mon Sep 17 00:00:00 2001 From: Martin Tomka Date: Mon, 16 Oct 2023 12:47:36 +0200 Subject: [PATCH 2/3] PR comments --- docs/strategies/fallback.md | 23 ++++++++++++++--------- docs/strategies/retry.md | 1 + src/Snippets/Docs/Fallback.cs | 13 +++++++------ src/Snippets/Docs/Retry.cs | 1 + 4 files changed, 23 insertions(+), 15 deletions(-) diff --git a/docs/strategies/fallback.md b/docs/strategies/fallback.md index ee9896be425..d8faba567e1 100644 --- a/docs/strategies/fallback.md +++ b/docs/strategies/fallback.md @@ -69,19 +69,23 @@ new ResiliencePipelineBuilder() ### Fallback after retries -When designing resilient systems, a common pattern is to use a fallback after multiple retry attempts. This approach is especially relevant when a fallback strategy can provide a sensible default value. +When designing resilient systems, a common pattern is to use a fallback after multiple failed retry attempts. This approach is especially relevant when a fallback strategy can provide a sensible default value. ```cs +// Define a common predicates re-used by both fallback and retries +var predicateBuilder = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError); + var pipeline = new ResiliencePipelineBuilder() .AddFallback(new() { - ShouldHandle = new PredicateBuilder() - .Handle() + ShouldHandle = predicateBuilder .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), FallbackAction = args => { - // Resolve the fallback response + // Try to resolve the fallback response HttpResponseMessage fallbackResponse = ResolveFallbackResponse(args.Outcome); return Outcome.FromResultAsValueTask(fallbackResponse); @@ -89,25 +93,26 @@ var pipeline = new ResiliencePipelineBuilder() }) .AddRetry(new() { - ShouldHandle = new PredicateBuilder() - .Handle() - .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + ShouldHandle = predicateBuilder, MaxRetryAttempts = 3, }) .Build(); -// Execution for demonstration purposes that always returns an invalid result +// Demonstrative execution that always produces invalid result pipeline.Execute(() => new HttpResponseMessage(HttpStatusCode.InternalServerError)); ``` -Here's a breakdown of the behavior when the callback produces either an `HttpStatusCode.InternalServerError` or a `HttpRequestException`: +Here's a breakdown of the behavior when the callback produces either an `HttpStatusCode.InternalServerError` or an `HttpRequestException`: - The fallback strategy initiates by executing the provided callback, then immediately passes the execution to the retry strategy. - The retry strategy starts execution, makes 3 retry attempts and yields the outcome that represents an error. - The fallback strategy resumes execution, assesses the outcome generated by the callback, and if necessary, supplies the fallback value. - The fallback strategy completes its execution. +> [!NOTE] +> The preceding example also demonstrates how to re-use `ResiliencePipelineBuilder` across multiple strategies. + ## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and ones to avoid. diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index 78993c5d83b..330bf91d682 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -119,6 +119,7 @@ ResiliencePipeline pipeline = new ResiliencePipelineBuilder() { Delay = TimeSpan.FromSeconds(2), MaxRetryAttempts = int.MaxValue, + BackoffType = DelayBackoffType.Exponential, // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. MaxDelay = TimeSpan.FromMinutes(15), diff --git a/src/Snippets/Docs/Fallback.cs b/src/Snippets/Docs/Fallback.cs index 62b65bbbc1d..df909c4f642 100644 --- a/src/Snippets/Docs/Fallback.cs +++ b/src/Snippets/Docs/Fallback.cs @@ -237,11 +237,15 @@ public static void FallbackAfterRetries() { #region fallback-after-retries + // Define a common predicates re-used by both fallback and retries + var predicateBuilder = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError); + var pipeline = new ResiliencePipelineBuilder() .AddFallback(new() { - ShouldHandle = new PredicateBuilder() - .Handle() + ShouldHandle = predicateBuilder .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), FallbackAction = args => { @@ -249,14 +253,11 @@ public static void FallbackAfterRetries() HttpResponseMessage fallbackResponse = ResolveFallbackResponse(args.Outcome); return Outcome.FromResultAsValueTask(fallbackResponse); - } }) .AddRetry(new() { - ShouldHandle = new PredicateBuilder() - .Handle() - .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + ShouldHandle = predicateBuilder, MaxRetryAttempts = 3, }) .Build(); diff --git a/src/Snippets/Docs/Retry.cs b/src/Snippets/Docs/Retry.cs index cd74bb5da7d..fc3d4cecb65 100644 --- a/src/Snippets/Docs/Retry.cs +++ b/src/Snippets/Docs/Retry.cs @@ -120,6 +120,7 @@ public static async Task MaxDelay() { Delay = TimeSpan.FromSeconds(2), MaxRetryAttempts = int.MaxValue, + BackoffType = DelayBackoffType.Exponential, // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. MaxDelay = TimeSpan.FromMinutes(15), From c5e51ce9cbe97078b893044a13ab085a1923c637 Mon Sep 17 00:00:00 2001 From: Martin Tomka Date: Mon, 16 Oct 2023 13:03:37 +0200 Subject: [PATCH 3/3] cleanup --- docs/strategies/fallback.md | 3 +-- src/Snippets/Docs/Fallback.cs | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/strategies/fallback.md b/docs/strategies/fallback.md index d8faba567e1..11cbae5e0c9 100644 --- a/docs/strategies/fallback.md +++ b/docs/strategies/fallback.md @@ -81,8 +81,7 @@ var predicateBuilder = new PredicateBuilder() var pipeline = new ResiliencePipelineBuilder() .AddFallback(new() { - ShouldHandle = predicateBuilder - .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + ShouldHandle = predicateBuilder, FallbackAction = args => { // Try to resolve the fallback response diff --git a/src/Snippets/Docs/Fallback.cs b/src/Snippets/Docs/Fallback.cs index df909c4f642..74ead75f911 100644 --- a/src/Snippets/Docs/Fallback.cs +++ b/src/Snippets/Docs/Fallback.cs @@ -245,8 +245,7 @@ public static void FallbackAfterRetries() var pipeline = new ResiliencePipelineBuilder() .AddFallback(new() { - ShouldHandle = predicateBuilder - .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError), + ShouldHandle = predicateBuilder, FallbackAction = args => { // Try to resolve the fallback response