diff --git a/Directory.Packages.props b/Directory.Packages.props
index 5b845fd..d488ddb 100644
--- a/Directory.Packages.props
+++ b/Directory.Packages.props
@@ -4,6 +4,7 @@
+
@@ -100,4 +101,4 @@
-
+
\ No newline at end of file
diff --git a/PiBox.Plugins/Jobs/Hangfire/README.md b/PiBox.Plugins/Jobs/Hangfire/README.md
index c49f74c..9db6b76 100644
--- a/PiBox.Plugins/Jobs/Hangfire/README.md
+++ b/PiBox.Plugins/Jobs/Hangfire/README.md
@@ -32,8 +32,11 @@ hangfire:
User: postgres
Password: postgres
InMemory: true
- DashboardUser: awesome-user #if you don't set this, you can't access the hangfire dashboard
- DashboardPassword: awesome-pw #if you don't set this, you can't access the hangfire dashboard
+ enableJobsByFeatureManagementConfig: false
+ allowedDashboardHost: localhost # you need to set this configuration to be able to access the dashboard from the specified host
+
+featureManagement: # we can conveniently can use the microsoft feature management system to enable jobs based on configuration
+ hangfireTestJob: true # if you have enabled the 'enableJobsByFeatureManagementConfig: true' then you can configure here if your jobs should run on execution or not, useful for multiple environments etc.
```
HangfireConfiguration.cs
@@ -47,9 +50,9 @@ public class HangfireConfiguration
public string? Database { get; set; }
public string? User { get; set; }
public string? Password { get; set; }
- public string? DashboardUser { get; set; }
- public string? DashboardPassword { get; set; }
public bool InMemory { get; set; }
+ public string AllowedDashboardHost { get; set; }
+ public bool EnableJobsByFeatureManagementConfig { get; set; }
public int? PollingIntervalInMs { get; set; }
public int? WorkerCount { get; set; }
public string ConnectionString => $"Host={Host};Port={Port};Database={Database};Username={User};Password={Password};";
@@ -151,10 +154,69 @@ BackgroundJob.Enqueue(x => x.Send("hangfire@example.com"));
BackgroundJob.Enqueue(() => Console.WriteLine("Hello, world!"));
```
-### Execution modes
+### Attributes
+#### UniquePerQueueAttribute
If you want the job only to be executed as one instance at any given point in time use the
+```csharp
+[UniquePerQueueAttribute("high")]
+```
+
+This ensures that there is only one job of the same type/name
+and method parameters in processing or queued at any given point
+
+#### JobCleanupExpirationTimeAttribute
+
+With this you can specify how many days the results of a job
+should be kept until it gets deleted
+
+```csharp
+[JobCleanupExpirationTimeAttribute(14)]
+```
+
+### Filters
+
+#### EnabledByFeatureFilter
+
+This filter works in conjunction with the [microsoft feature management system](https://github.com/microsoft/FeatureManagement-Dotnet).
+If you would like to be able to enable or disable the execution of your
+jobs based on configuration this is the right tool for it.
+
+**Default Feature management with file based configuration**
+```yaml
+hangfire:
+ enableJobsByFeatureManagementConfig: true
+
+featureManagement:
+ hangfireTestJob: true
+ neverRunThisJob: false
+```
+
+This allows you to enable jobs based on configuration files.
+If you have enabled the setting
+
+```yaml
+enableJobsByFeatureManagementConfig: true
+```
+then you can configure here, if your jobs should run
+on execution or not, useful for multiple environments etc.
+
+If your service supports hot reloading of configuration files,
+you can enable/disable jobs at run time.
+
+**Feature management with the [pibox unleash plugin](https://sia-digital.gitbook.io/pibox/plugins/management/unleash)**
+
+This works in conjunction with the plugin PiBox.Plugins.Management.Unleash.
+This replaces the ability of setting the features via files.
+Instead one can use the unleash api/service
+and use feature flags for enabling the jobs.
+Just make sure that the name of the job matches the name of the
+feature flag you are creating in unleash.
-UniquePerQueueAttribute
+The pibox unleash plugin then should do the rest of the heavy lifting.
-this ensures that there is only one job of the same type/name in processing or queued at any given point!
+Since the attribute resolves the feature on before executing the job,
+changes to the configuration can be done at runtime with a maximal delay
+based on how often the pibox unleash plugin refreshes its cache.
+You can find more information in the documentation of the
+[pibox unleash plugin](https://sia-digital.gitbook.io/pibox/plugins/management/unleash).
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/EnabledByFeatureFilter.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/EnabledByFeatureFilter.cs
new file mode 100644
index 0000000..e0b513a
--- /dev/null
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/EnabledByFeatureFilter.cs
@@ -0,0 +1,37 @@
+using Hangfire.Common;
+using Hangfire.Server;
+using Microsoft.Extensions.Logging;
+using Microsoft.FeatureManagement;
+
+namespace PiBox.Plugins.Jobs.Hangfire.Attributes
+{
+ ///
+ /// Decide if to execute a job by enabled featured
+ ///
+ internal class EnabledByFeatureFilter : JobFilterAttribute, IServerFilter
+ {
+ private readonly IFeatureManager _featureManager;
+ private readonly ILogger _logger;
+
+ public EnabledByFeatureFilter(IFeatureManager featureManager, ILogger logger)
+ {
+ _featureManager = featureManager;
+ _logger = logger;
+ Order = 0;
+ }
+
+ public void OnPerforming(PerformingContext context)
+ {
+ var jobName = context.BackgroundJob.Job.Type.Name;
+ if (_featureManager.IsEnabledAsync(jobName).Result) return;
+ _logger.LogWarning("Execution of job {JobName} was cancelled due to not enabled feature {FeatureName}",
+ jobName, jobName);
+ context.Canceled = true;
+ }
+
+ public void OnPerformed(PerformedContext context)
+ {
+ // do nothing
+ }
+ }
+}
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/JobCleanupExpirationTimeAttribute.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/JobCleanupExpirationTimeAttribute.cs
new file mode 100644
index 0000000..a432061
--- /dev/null
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/JobCleanupExpirationTimeAttribute.cs
@@ -0,0 +1,27 @@
+using Hangfire.Common;
+using Hangfire.States;
+using Hangfire.Storage;
+
+namespace PiBox.Plugins.Jobs.Hangfire.Attributes
+{
+ public class JobCleanupExpirationTimeAttribute : JobFilterAttribute, IApplyStateFilter
+ {
+ private readonly int _cleanUpAfterDays;
+
+ public JobCleanupExpirationTimeAttribute(int cleanUpAfterDays)
+ {
+ _cleanUpAfterDays = cleanUpAfterDays;
+ Order = 100;
+ }
+
+ public void OnStateApplied(ApplyStateContext context, IWriteOnlyTransaction transaction)
+ {
+ context.JobExpirationTimeout = TimeSpan.FromDays(_cleanUpAfterDays);
+ }
+
+ public void OnStateUnapplied(ApplyStateContext context, IWriteOnlyTransaction transaction)
+ {
+ // nothing to do here
+ }
+ }
+}
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/RecurringJobAttribute.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/RecurringJobAttribute.cs
similarity index 84%
rename from PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/RecurringJobAttribute.cs
rename to PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/RecurringJobAttribute.cs
index 16b372d..76abbba 100644
--- a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/RecurringJobAttribute.cs
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/RecurringJobAttribute.cs
@@ -1,4 +1,4 @@
-namespace PiBox.Plugins.Jobs.Hangfire
+namespace PiBox.Plugins.Jobs.Hangfire.Attributes
{
[AttributeUsage(AttributeTargets.Class)]
public class RecurringJobAttribute : Attribute
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/UniquePerQueueAttribute.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/UniquePerQueueAttribute.cs
new file mode 100644
index 0000000..28bdf36
--- /dev/null
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/Attributes/UniquePerQueueAttribute.cs
@@ -0,0 +1,95 @@
+using System.Text.Json;
+using Hangfire;
+using Hangfire.Common;
+using Hangfire.States;
+using Hangfire.Storage;
+using Hangfire.Storage.Monitoring;
+
+namespace PiBox.Plugins.Jobs.Hangfire.Attributes
+{
+ public class UniquePerQueueAttribute : JobFilterAttribute, IElectStateFilter
+ {
+ public string Queue { get; set; }
+
+ public bool CheckScheduledJobs { get; set; }
+
+ public bool CheckRunningJobs { get; set; }
+
+ public UniquePerQueueAttribute(string queue)
+ {
+ Queue = queue;
+ Order = 10;
+ }
+
+ private IEnumerable GetJobs(ElectStateContext context)
+ {
+ IMonitoringApi monitoringApi = context.Storage.GetMonitoringApi();
+ List jobs =
+ new List();
+ foreach ((string key, EnqueuedJobDto enqueuedJobDto1) in monitoringApi.EnqueuedJobs(Queue, 0, 500))
+ {
+ string id = key;
+ EnqueuedJobDto enqueuedJobDto2 = enqueuedJobDto1;
+ jobs.Add(JobEntity.Parse(id, enqueuedJobDto2.Job));
+ }
+
+ if (CheckScheduledJobs)
+ {
+ foreach (KeyValuePair pair in monitoringApi.ScheduledJobs(0, 500))
+ {
+ string id = pair.Key;
+ ScheduledJobDto scheduledJobDto3 = pair.Value;
+ jobs.Add(JobEntity.Parse(id, scheduledJobDto3.Job));
+ }
+ }
+
+ if (!CheckRunningJobs)
+ {
+ return jobs;
+ }
+
+ foreach (KeyValuePair pair in
+ monitoringApi.ProcessingJobs(0, 500))
+ {
+ string id = pair.Key;
+ ProcessingJobDto processingJobDto3 = pair.Value;
+ jobs.Add(JobEntity.Parse(id, processingJobDto3.Job));
+ }
+
+ return jobs;
+ }
+
+ public void OnStateElection(ElectStateContext context)
+ {
+ if (!(context.CandidateState is EnqueuedState candidateState))
+ {
+ return;
+ }
+
+ candidateState.Queue = Queue;
+ BackgroundJob job = context.BackgroundJob;
+ var filteredArguments = job.Job.Args.Where(x => x.GetType() != typeof(CancellationToken)).ToList();
+ var jobArgs = JsonSerializer.Serialize(filteredArguments,
+ new JsonSerializerOptions() { IncludeFields = false });
+ var jobs = GetJobs(context);
+ var jobsWithArgs = jobs
+ .Select(x => new { JobEntity = x, ArgAsString = jobArgs }).ToList();
+ var alreadyExists = jobsWithArgs.Exists(x =>
+ x.JobEntity.Value.Method == job.Job.Method && x.ArgAsString == jobArgs && x.JobEntity.Id != job.Id);
+ if (!alreadyExists)
+ {
+ return;
+ }
+
+ context.CandidateState =
+ new DeletedState() { Reason = "Instance of the same job is already queued." };
+ }
+
+ private sealed record JobEntity(string Id, global::Hangfire.Common.Job Value)
+ {
+ public static JobEntity
+ Parse(string id, global::Hangfire.Common.Job job) =>
+ new(id, job);
+ }
+ }
+}
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangFirePlugin.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangFirePlugin.cs
index 35429a2..63e2d6e 100644
--- a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangFirePlugin.cs
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangFirePlugin.cs
@@ -4,11 +4,14 @@
using Hangfire.PostgreSql;
using Microsoft.AspNetCore.Builder;
using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+using Microsoft.FeatureManagement;
using Newtonsoft.Json;
using PiBox.Hosting.Abstractions;
using PiBox.Hosting.Abstractions.Extensions;
using PiBox.Hosting.Abstractions.Plugins;
using PiBox.Hosting.Abstractions.Services;
+using PiBox.Plugins.Jobs.Hangfire.Attributes;
using PiBox.Plugins.Jobs.Hangfire.Job;
namespace PiBox.Plugins.Jobs.Hangfire
@@ -26,6 +29,7 @@ public HangFirePlugin(HangfireConfiguration configuration, IImplementationResolv
public void ConfigureServices(IServiceCollection serviceCollection)
{
+ serviceCollection.AddFeatureManagement();
serviceCollection.AddSingleton();
serviceCollection.AddSingleton(sp => sp.GetRequiredService());
serviceCollection.AddHangfire(conf =>
@@ -56,6 +60,13 @@ public void ConfigureServices(IServiceCollection serviceCollection)
public void ConfigureApplication(IApplicationBuilder applicationBuilder)
{
+ if (_hangfireConfig.EnableJobsByFeatureManagementConfig)
+ {
+ GlobalJobFilters.Filters.Add(new EnabledByFeatureFilter(
+ applicationBuilder.ApplicationServices.GetRequiredService(),
+ applicationBuilder.ApplicationServices.GetService>()));
+ }
+
var urlAuthFilter = new HostAuthorizationFilter(_hangfireConfig.AllowedDashboardHost);
applicationBuilder.UseHangfireDashboard(options: new() { Authorization = new List { urlAuthFilter } });
var jobRegister = applicationBuilder.ApplicationServices.GetRequiredService();
@@ -78,7 +89,7 @@ public void ConfigureHealthChecks(IHealthChecksBuilder healthChecksBuilder)
tags: new[] { HealthCheckTag.Readiness.Value });
}
- private class HostAuthorizationFilter : IDashboardAuthorizationFilter
+ internal class HostAuthorizationFilter : IDashboardAuthorizationFilter
{
private readonly string _allowedHost;
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangfireConfiguration.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangfireConfiguration.cs
index 48500db..84687d7 100644
--- a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangfireConfiguration.cs
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/HangfireConfiguration.cs
@@ -10,8 +10,9 @@ public class HangfireConfiguration
public string Database { get; set; }
public string User { get; set; }
public string Password { get; set; }
- public string AllowedDashboardHost { get; set; }
public bool InMemory { get; set; }
+ public string AllowedDashboardHost { get; set; }
+ public bool EnableJobsByFeatureManagementConfig { get; set; }
public int? PollingIntervalInMs { get; set; }
public int? WorkerCount { get; set; }
public string ConnectionString => $"Host={Host};Port={Port};Database={Database};Username={User};Password={Password};";
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/PiBox.Plugins.Jobs.Hangfire.csproj b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/PiBox.Plugins.Jobs.Hangfire.csproj
index bdcf939..6b2d533 100644
--- a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/PiBox.Plugins.Jobs.Hangfire.csproj
+++ b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/PiBox.Plugins.Jobs.Hangfire.csproj
@@ -14,6 +14,7 @@
+
diff --git a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/UniquePerQueueAttribute.cs b/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/UniquePerQueueAttribute.cs
deleted file mode 100644
index 8b7e219..0000000
--- a/PiBox.Plugins/Jobs/Hangfire/src/PiBox.Plugins.Jobs.Hangfire/UniquePerQueueAttribute.cs
+++ /dev/null
@@ -1,62 +0,0 @@
-using Hangfire.Common;
-using Hangfire.States;
-
-namespace PiBox.Plugins.Jobs.Hangfire
-{
- ///
- /// There can only be one job of the same type queued or in processing.
- ///
- public class UniquePerQueueAttribute : JobFilterAttribute, IElectStateFilter
- {
- public string Queue { get; set; }
- public bool CheckScheduledJobs { get; set; }
- public bool CheckRunningJobs { get; set; }
-
- public UniquePerQueueAttribute(string queue)
- {
- Queue = queue;
- }
-
- private IEnumerable GetJobs(ElectStateContext context)
- {
- var monitoringApi = context.Storage.GetMonitoringApi();
- var jobs = new List();
- foreach (var (key, value) in monitoringApi.EnqueuedJobs(Queue, 0, 500))
- jobs.Add(JobEntity.Parse(key, value.Job));
- if (CheckScheduledJobs)
- foreach (var (key, value) in monitoringApi.ScheduledJobs(0, 500))
- jobs.Add(JobEntity.Parse(key, value.Job));
-
- if (!CheckRunningJobs) return jobs;
-
- foreach (var (key, value) in monitoringApi.ProcessingJobs(0, 500))
- {
- jobs.Add(JobEntity.Parse(key, value.Job));
- }
-
- return jobs;
- }
-
- public void OnStateElection(ElectStateContext context)
- {
- if (context.CandidateState is not EnqueuedState enqueuedState)
- {
- return;
- }
-
- enqueuedState.Queue = Queue;
-
- var job = context.BackgroundJob;
- var jobs = GetJobs(context);
- if (jobs.Any(x => x.Value.Method == job.Job.Method && x.Value.Args.SequenceEqual(job.Job.Args) && x.Id != job.Id))
- {
- context.CandidateState = new DeletedState { Reason = "Instance of the same job is already queued." };
- }
- }
-
- private sealed record JobEntity(string Id, global::Hangfire.Common.Job Value)
- {
- public static JobEntity Parse(string id, global::Hangfire.Common.Job job) => new(id, job);
- }
- }
-}
diff --git a/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/EnabledByFeatureFilterTests.cs b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/EnabledByFeatureFilterTests.cs
new file mode 100644
index 0000000..7022ab4
--- /dev/null
+++ b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/EnabledByFeatureFilterTests.cs
@@ -0,0 +1,93 @@
+using FluentAssertions;
+using Hangfire;
+using Hangfire.MemoryStorage;
+using Hangfire.Server;
+using Hangfire.Storage;
+using Microsoft.FeatureManagement;
+using NSubstitute;
+using NUnit.Framework;
+using PiBox.Plugins.Jobs.Hangfire.Attributes;
+using PiBox.Testing.Assertions;
+
+namespace PiBox.Plugins.Jobs.Hangfire.Tests.Attributes
+{
+ public class EnabledByFeatureFilterTests
+ {
+ [Test]
+ public void JobIsNotCancelledWhenMatchingFeatureIsEnabled()
+ {
+ JobStorage.Current = new MemoryStorage();
+ var featureManager = Substitute.For();
+ featureManager.IsEnabledAsync(Arg.Is(x => x == nameof(TestJobAsync))).Returns(true);
+ var filter = new EnabledByFeatureFilter(featureManager,
+ new FakeLogger());
+
+ var job = new global::Hangfire.Common.Job(typeof(TestJobAsync),
+ typeof(TestJobAsync).GetMethod(nameof(TestJobAsync.ExecuteAsync)), CancellationToken.None);
+ var context = new PerformingContext(
+ new PerformContext(JobStorage.Current,
+ Substitute.For(),
+ new BackgroundJob("id1", job, DateTime.Now),
+ new JobCancellationToken(false)
+ )
+ );
+ filter.OnPerforming(
+ context
+ );
+
+ context.Canceled.Should().BeFalse();
+ }
+
+ [Test]
+ public void JobIsCancelledWhenMatchingFeatureIsDisabled()
+ {
+ JobStorage.Current = new MemoryStorage();
+ var featureManager = Substitute.For();
+ featureManager.IsEnabledAsync(Arg.Is(x => x == nameof(TestJobAsync))).Returns(false);
+ var filter = new EnabledByFeatureFilter(featureManager,
+ new FakeLogger());
+
+ var job = new global::Hangfire.Common.Job(typeof(TestJobAsync),
+ typeof(TestJobAsync).GetMethod(nameof(TestJobAsync.ExecuteAsync)), CancellationToken.None);
+ var context = new PerformingContext(
+ new PerformContext(JobStorage.Current,
+ Substitute.For(),
+ new BackgroundJob("id1", job, DateTime.Now),
+ new JobCancellationToken(false)
+ )
+ );
+ filter.OnPerforming(
+ context
+ );
+
+ context.Canceled.Should().BeTrue();
+ }
+
+ [Test]
+ public void JobIsCancelledWhenThereIsNoMatchingFeature()
+ {
+ JobStorage.Current = new MemoryStorage();
+ var featureManager = Substitute.For();
+ featureManager.IsEnabledAsync(Arg.Is(x => x == "asdf")).Returns(true);
+ var filter = new EnabledByFeatureFilter(featureManager,
+ new FakeLogger());
+
+ var job = new global::Hangfire.Common.Job(typeof(TestJobAsync),
+ typeof(TestJobAsync).GetMethod(nameof(TestJobAsync.ExecuteAsync)), CancellationToken.None);
+ var performContext = new PerformContext(JobStorage.Current,
+ Substitute.For(),
+ new BackgroundJob("id1", job, DateTime.Now),
+ new JobCancellationToken(false)
+ );
+ var context = new PerformingContext(
+ performContext
+ );
+ filter.OnPerforming(
+ context
+ );
+ filter.OnPerformed(new PerformedContext(performContext, null, false, null));
+
+ context.Canceled.Should().BeTrue();
+ }
+ }
+}
diff --git a/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/HostAuthorizationFilterTests.cs b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/HostAuthorizationFilterTests.cs
new file mode 100644
index 0000000..3ce7e08
--- /dev/null
+++ b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/HostAuthorizationFilterTests.cs
@@ -0,0 +1,28 @@
+using FluentAssertions;
+using Hangfire;
+using Hangfire.Dashboard;
+using Hangfire.MemoryStorage;
+using Microsoft.AspNetCore.Http;
+using NUnit.Framework;
+using PiBox.Testing;
+
+namespace PiBox.Plugins.Jobs.Hangfire.Tests.Attributes
+{
+ public class HostAuthorizationFilterTests
+ {
+ [Test]
+ [TestCase("localhost", "localhost", true)]
+ [TestCase("localhost1", "localhost", false)]
+ [TestCase("example.com", "localhost", false)]
+ [TestCase("example.com", "example.com", true)]
+ public void JobIsNotCancelledWhenMatchingFeatureIsEnabled(string actualHost, string allowedHost,
+ bool expectedResult)
+ {
+ var sc = TestingDefaults.ServiceProvider();
+ var filter = new HangFirePlugin.HostAuthorizationFilter(allowedHost);
+ var result = filter.Authorize(new AspNetCoreDashboardContext(new MemoryStorage(), new DashboardOptions(),
+ new DefaultHttpContext() { RequestServices = sc, Request = { Host = new HostString(actualHost) } }));
+ result.Should().Be(expectedResult);
+ }
+ }
+}
diff --git a/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/JobCleanupExpirationTimeAttributeTests.cs b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/JobCleanupExpirationTimeAttributeTests.cs
new file mode 100644
index 0000000..dbb3440
--- /dev/null
+++ b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/JobCleanupExpirationTimeAttributeTests.cs
@@ -0,0 +1,42 @@
+using FluentAssertions;
+using Hangfire;
+using Hangfire.MemoryStorage;
+using Hangfire.States;
+using Hangfire.Storage;
+using NSubstitute;
+using NUnit.Framework;
+using PiBox.Plugins.Jobs.Hangfire.Attributes;
+
+namespace PiBox.Plugins.Jobs.Hangfire.Tests.Attributes
+{
+ public class JobCleanupExpirationTimeAttributeTests
+ {
+ [Test]
+ public void JobExpirationTimeoutIsAppliedCorrectly()
+ {
+ JobStorage.Current = new MemoryStorage();
+
+ var filter = new JobCleanupExpirationTimeAttribute(9999);
+
+ var job = new global::Hangfire.Common.Job(typeof(TestJobAsync),
+ typeof(TestJobAsync).GetMethod(nameof(TestJobAsync.ExecuteAsync)), CancellationToken.None);
+
+ var writeOnlyTransaction = Substitute.For();
+ var context = new ApplyStateContext(
+ new MemoryStorage(),
+ Substitute.For(),
+ writeOnlyTransaction,
+ new BackgroundJob("id1", job, DateTime.Now),
+ new ScheduledState(DateTime.Now),
+ "oldState"
+ );
+ filter.OnStateApplied(
+ context,
+ writeOnlyTransaction
+ );
+ filter.OnStateUnapplied(context, writeOnlyTransaction);
+
+ context.JobExpirationTimeout.Should().Be(TimeSpan.FromDays(9999));
+ }
+ }
+}
diff --git a/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/UniquePerQueueAttributeTests.cs b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/UniquePerQueueAttributeTests.cs
similarity index 56%
rename from PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/UniquePerQueueAttributeTests.cs
rename to PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/UniquePerQueueAttributeTests.cs
index 3b8ccdb..bef276d 100644
--- a/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/UniquePerQueueAttributeTests.cs
+++ b/PiBox.Plugins/Jobs/Hangfire/test/PiBox.Plugins.Jobs.Hangfire.Tests/Attributes/UniquePerQueueAttributeTests.cs
@@ -6,9 +6,10 @@
using Hangfire.Storage.Monitoring;
using NSubstitute;
using NUnit.Framework;
+using PiBox.Plugins.Jobs.Hangfire.Attributes;
using PiBox.Plugins.Jobs.Hangfire.Job;
-namespace PiBox.Plugins.Jobs.Hangfire.Tests
+namespace PiBox.Plugins.Jobs.Hangfire.Tests.Attributes
{
public class UniquePerQueueAttributeTests
{
@@ -42,19 +43,6 @@ private void Setup(BackgroundJob backgroundJob, bool includeProcessingJobs = fal
};
}
- private static JobList GetJobList(params (string, T)[] jobs)
- {
- var entries = jobs.Select(x => new KeyValuePair(x.Item1, x.Item2));
- return new JobList(entries);
- }
-
- private static global::Hangfire.Common.Job CreateJob()
- {
- var job = new global::Hangfire.Common.Job(typeof(TestJob),
- typeof(TestJob).GetMethod(nameof(TestJob.ExecuteAsync)), CancellationToken.None);
- return job;
- }
-
[Test]
public void DoesNotRemoveItself()
{
@@ -132,6 +120,103 @@ public void DoesNothingOnWrongState()
_context.CandidateState.Should().Be(state);
}
+ [Test]
+ public void ParameterizedJobDoesNotRemoveItself()
+ {
+ var job = CreateParameterizedJob();
+ var backgroundJob = new BackgroundJob("1", job, DateTime.Now);
+ var enqueuedJob = new EnqueuedJobDto { Job = job };
+ Setup(backgroundJob);
+ _monitoringApi.EnqueuedJobs(Queue, Arg.Any(), Arg.Any())
+ .Returns(GetJobList(("1", enqueuedJob)));
+ _attribute.OnStateElection(_context);
+ _context.CandidateState.Should().BeOfType();
+ var state = _context.CandidateState as EnqueuedState;
+ state!.Queue.Should().Be(Queue);
+ }
+
+ [Test]
+ public void ParameterizedJobRemovesTheDuplicateFromEnqueuedOnes()
+ {
+ var job = CreateParameterizedJob();
+ var backgroundJob = new BackgroundJob("1", job, DateTime.Now);
+ var enqueuedJob = new EnqueuedJobDto { Job = job };
+ Setup(backgroundJob);
+ _monitoringApi.EnqueuedJobs(Queue, Arg.Any(), Arg.Any())
+ .Returns(GetJobList(("2", enqueuedJob)));
+ _attribute.OnStateElection(_context);
+ _context.CandidateState.Should().BeOfType();
+ var state = _context.CandidateState as DeletedState;
+ state!.Reason.Should().Be("Instance of the same job is already queued.");
+ }
+
+ [Test]
+ public void ParameterizedJobRemovesTheDuplicateFromProcessingOnes()
+ {
+ var job = CreateParameterizedJob();
+ var backgroundJob = new BackgroundJob("1", job, DateTime.Now);
+ var processingJob = new ProcessingJobDto { Job = job };
+ Setup(backgroundJob, true);
+ _monitoringApi.EnqueuedJobs(Queue, Arg.Any(), Arg.Any())
+ .Returns(GetJobList());
+ _monitoringApi.ProcessingJobs(Arg.Any(), Arg.Any())
+ .Returns(GetJobList(("2", processingJob)));
+ _attribute.OnStateElection(_context);
+ _context.CandidateState.Should().BeOfType();
+ var state = _context.CandidateState as DeletedState;
+ state!.Reason.Should().Be("Instance of the same job is already queued.");
+ }
+
+ [Test]
+ public void ParameterizedJobRemovesTheDuplicateFromScheduledOnes()
+ {
+ var job = CreateParameterizedJob();
+ var backgroundJob = new BackgroundJob("1", job, DateTime.Now);
+ var scheduledJob = new ScheduledJobDto { Job = job };
+ Setup(backgroundJob, false, true);
+ _monitoringApi.EnqueuedJobs(Queue, Arg.Any(), Arg.Any())
+ .Returns(GetJobList());
+ _monitoringApi.ScheduledJobs(Arg.Any(), Arg.Any())
+ .Returns(GetJobList(("2", scheduledJob)));
+ _attribute.OnStateElection(_context);
+ _context.CandidateState.Should().BeOfType();
+ var state = _context.CandidateState as DeletedState;
+ state!.Reason.Should().Be("Instance of the same job is already queued.");
+ }
+
+ [Test]
+ public void ParameterizedJobDoesNothingOnWrongState()
+ {
+ var job = CreateParameterizedJob();
+ var backgroundJob = new BackgroundJob("1", job, DateTime.Now);
+ Setup(backgroundJob, false, true);
+ var state = new ScheduledState(TimeSpan.FromMilliseconds(100));
+ _context.CandidateState = state;
+ _attribute.OnStateElection(_context);
+ _context.CandidateState.Should().BeOfType();
+ _context.CandidateState.Should().Be(state);
+ }
+
+ private static JobList GetJobList(params (string, T)[] jobs)
+ {
+ var entries = jobs.Select(x => new KeyValuePair(x.Item1, x.Item2));
+ return new JobList(entries);
+ }
+
+ private static global::Hangfire.Common.Job CreateJob()
+ {
+ var job = new global::Hangfire.Common.Job(typeof(TestJob),
+ typeof(TestJob).GetMethod(nameof(TestJob.ExecuteAsync)), CancellationToken.None);
+ return job;
+ }
+
+ private static global::Hangfire.Common.Job CreateParameterizedJob()
+ {
+ var job = new global::Hangfire.Common.Job(typeof(ParameterizedTestJob),
+ typeof(ParameterizedTestJob).GetMethod(nameof(ParameterizedTestJob.ExecuteAsync)), new TestJobPayload(), CancellationToken.None);
+ return job;
+ }
+
[ExcludeFromCodeCoverage]
private class TestJob : IAsyncJob
{
@@ -153,5 +238,32 @@ public Task