diff --git a/.gitignore b/.gitignore index fc577ea2229..cea1d12f163 100644 --- a/.gitignore +++ b/.gitignore @@ -206,3 +206,17 @@ build/ /src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.received.txt launchSettings.json .idea/ + +# GhostDoc is a C# XML comment helper +*.[Gg]host[Dd]oc.xml +*.[Gg]host[Dd]oc.user.dic + +# CodeRush +.cr/ + +# Visual Studio Code +.vscode/ + +# NDepend +*.ndproj +/[Nn][Dd]epend[Oo]ut diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 147890e1035..3e46819ce78 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -1,3 +1,87 @@ +#### 1.3.9 August 22 2018 #### +**Maintenance Release for Akka.NET 1.3** + +Akka.NET v1.3.9 features some major changes to Akka.Cluster.Sharding, additional Akka.Streams stages, and some general bug fixes across the board. + +**Akka.Cluster.Sharding Improvements** +The [Akka.Cluster.Sharding documentation](http://getakka.net/articles/clustering/cluster-sharding.html#quickstart) already describes some of the major changes in Akka.NET v1.3.9, but we figured it would be worth calling special attention to those changes here. + +**Props Factory for Entity Actors** + +> In some cases, the actor may need to know the `entityId` associated with it. This can be achieved using the `entityPropsFactory` parameter to `ClusterSharding.Start` or `ClusterSharding.StartAsync`. The entity ID will be passed to the factory as a parameter, which can then be used in the creation of the actor. + +In addition to the existing APIs we've always had for defining sharded entities via `Props`, Akka.NET v1.3.9 introduces [a new method overload for `Start`](http://getakka.net/api/Akka.Cluster.Sharding.ClusterSharding.html#Akka_Cluster_Sharding_ClusterSharding_Start_System_String_System_Func_System_String_Akka_Actor_Props__Akka_Cluster_Sharding_ClusterShardingSettings_Akka_Cluster_Sharding_ExtractEntityId_Akka_Cluster_Sharding_ExtractShardId_) and [`StartAsync`](http://getakka.net/api/Akka.Cluster.Sharding.ClusterSharding.html#Akka_Cluster_Sharding_ClusterSharding_StartAsync_System_String_System_Func_System_String_Akka_Actor_Props__Akka_Cluster_Sharding_ClusterShardingSettings_Akka_Cluster_Sharding_ExtractEntityId_Akka_Cluster_Sharding_ExtractShardId_) which allows users to pass in the `entityId` of each entity actor as a constructor argument to those entities when they start. + +For example: + +``` +var anotherCounterShard = ClusterSharding.Get(Sys).Start( + typeName: "AnotherCounter", + entityProps: Props.Create(), + typeName: AnotherCounter.ShardingTypeName, + entityPropsFactory: entityId => AnotherCounter.Props(entityId), + settings: ClusterShardingSettings.Create(Sys), + extractEntityId: Counter.ExtractEntityId, + extractShardId: Counter.ExtractShardId); +``` + +This will give you the opportunity to pass in the `entityId` for each actor as a constructor argument into the `Props` of your entity actor and possibly other use cases too. + +**Improvements to Starting and Querying Existing Shard Entity Types** +Two additional major usability improvements to Cluster.Sharding come from some API additions and changes. + +The first is that it's now possible to look up all of the currently registered shard types via the [`ClusterSharding.ShardTypeNames` property](http://getakka.net/api/Akka.Cluster.Sharding.ClusterSharding.html#Akka_Cluster_Sharding_ClusterSharding_ShardTypeNames). So long as a `ShardRegion` of that type has been started in the cluster, that entity type name will be added to the collection exposed by this property. + +The other major usability improvement is a change to the `ClusterSharding.Start` property itself. Historically, you used to have to know whether or not the node you wanted to use sharding on was going to be hosting shards (call `ClusterSharding.Start`) or simply communicated with shards hosted on a different cluster role type (call `ClusterSharding.StartProxy`). Going forward, it's safe to call `ClusterSharding.Start` on any node and you will either receive an `IActorRef` to active `ShardRegion` or a `ShardRegion` running in "proxy only" mode; this is determined by looking at the `ClusterShardingSettings` and determining if the current node is in a role that is allowed to host shards of this type. + +* [Akka.Cluster.Sharding: Sharding API Updates](https://github.com/akkadotnet/akka.net/pull/3524) +* [Akka.Cluster.Sharding: sharding rebalance fix](https://github.com/akkadotnet/akka.net/pull/3518) +* [Akka.Cluster.Sharding: log formatting fix](https://github.com/akkadotnet/akka.net/pull/3554) +* [Akka.Cluster.Sharding: `RestartShard` escapes into userspace](https://github.com/akkadotnet/akka.net/pull/3509) + +**Akka.Streams Additions and Changes** +In Akka.NET v1.3.9 we've added some new built-in stream stages and API methods designed to help improve developer productivity and ease of use. + +* [Akka.Streams: add CombineMaterialized method to Source](https://github.com/akkadotnet/akka.net/pull/3489) +* [Akka.Streams: +KillSwitches: flow stage from CancellationToken](https://github.com/akkadotnet/akka.net/pull/3568) +* [Akka.Streams: Port KeepAliveConcat and UnfoldFlow](https://github.com/akkadotnet/akka.net/pull/3560) +* [Akka.Streams: Port PagedSource & IntervalBasedRateLimiter](https://github.com/akkadotnet/akka.net/pull/3570) + +**Other Updates, Additions, and Bugfixes** +* [Akka.Cluster: cluster coordinated leave fix for empty cluster](https://github.com/akkadotnet/akka.net/pull/3516) +* [Akka.Cluster.Tools: bumped ClusterClient message drop log messages from DEBUG to WARNING](https://github.com/akkadotnet/akka.net/pull/3513) +* [Akka.Cluster.Tools: Singleton - confirm TakeOverFromMe when singleton already in oldest state](https://github.com/akkadotnet/akka.net/pull/3553) +* [Akka.Remote: RemoteWatcher race-condition fix](https://github.com/akkadotnet/akka.net/pull/3519) +* [Akka: fix concurrency bug in CircuitBreaker](https://github.com/akkadotnet/akka.net/pull/3505) +* [Akka: Fixed ReceiveTimeout not triggered in some case when combined with NotInfluenceReceiveTimeout messages](https://github.com/akkadotnet/akka.net/pull/3555) +* [Akka.Persistence: Optimized recovery](https://github.com/akkadotnet/akka.net/pull/3549) +* [Akka.Persistence: Allow persisting events when recovery has completed](https://github.com/akkadotnet/akka.net/pull/3366) + +To [see the full set of changes for Akka.NET v1.3.9, click here](https://github.com/akkadotnet/akka.net/milestone/27). + +| COMMITS | LOC+ | LOC- | AUTHOR | +| --- | --- | --- | --- | +| 28 | 2448 | 5691 | Aaron Stannard | +| 11 | 1373 | 230 | zbynek001 | +| 8 | 4590 | 577 | Bartosz Sypytkowski | +| 4 | 438 | 99 | Ismael Hamed | +| 4 | 230 | 240 | Sean Gilliam | +| 2 | 1438 | 0 | Oleksandr Bogomaz | +| 1 | 86 | 79 | Nick Polideropoulos | +| 1 | 78 | 0 | v1rusw0rm | +| 1 | 4 | 4 | Joshua Garnett | +| 1 | 32 | 17 | Jarl Sveinung Flø Rasmussen | +| 1 | 27 | 1 | Sam13 | +| 1 | 250 | 220 | Maxim Cherednik | +| 1 | 184 | 124 | Josh Taylor | +| 1 | 14 | 0 | Peter Shrosbree | +| 1 | 1278 | 42 | Marc Piechura | +| 1 | 1 | 1 | Vasily Kirichenko | +| 1 | 1 | 1 | Samuel Kelemen | +| 1 | 1 | 1 | Nyola Mike | +| 1 | 1 | 1 | Fábio Beirão | + #### 1.3.8 June 04 2018 #### **Maintenance Release for Akka.NET 1.3** diff --git a/docs/articles/actors/dependency-injection.md b/docs/articles/actors/dependency-injection.md index 7d3f707855c..f38d8618a6a 100644 --- a/docs/articles/actors/dependency-injection.md +++ b/docs/articles/actors/dependency-injection.md @@ -45,7 +45,7 @@ protected override void PreStart() } ``` -> [!INFO] +> [!NOTE] > There is currently still an extension method available for the actor Context. `Context.DI().ActorOf<>`. However this has been officially **deprecated** and will be removed in future versions. ## Notes @@ -77,7 +77,7 @@ guideline. Currently the following Akka.NET Dependency Injection plugins are available: -## AutoFac +### AutoFac In order to use this plugin, install the Nuget package with `Install-Package Akka.DI.AutoFac`, then follow the instructions: @@ -94,7 +94,7 @@ var system = ActorSystem.Create("MySystem"); var propsResolver = new AutoFacDependencyResolver(container, system); ``` -## CastleWindsor +### CastleWindsor In order to use this plugin, install the Nuget package with `Install-Package Akka.DI.CastleWindsor`, then follow the instructions: @@ -110,7 +110,7 @@ var system = ActorSystem.Create("MySystem"); var propsResolver = new WindsorDependencyResolver(container, system); ``` -## Ninject +### Ninject In order to use this plugin, install the Nuget package with `Install-Package Akka.DI.Ninject`, then follow the instructions: @@ -126,14 +126,8 @@ var system = ActorSystem.Create("MySystem"); var propsResolver = new NinjectDependencyResolver(container,system); ``` -## Other frameworks +### Other frameworks Support for additional dependency injection frameworks may be added in the future, but you can easily implement your own by implementing an -[Actor Producer Extension](DI Core). - - - - - - +[Actor Producer Extension](xref:di-core). diff --git a/docs/articles/actors/dispatchers.md b/docs/articles/actors/dispatchers.md index ff80f4b6df1..4831c265d6a 100644 --- a/docs/articles/actors/dispatchers.md +++ b/docs/articles/actors/dispatchers.md @@ -69,100 +69,100 @@ system.ActorOf(Props.Create().WithDispatcher("my-dispatcher"), "my-acto Some dispatcher configurations are available out-of-the-box for convenience. You can use them during actor deployment, [as described above](#configuring-dispatchers). -* **default-dispatcher** - A configuration that uses the [ThreadPoolDispatcher](#ThreadPoolDispatcher). As the name says, this is the default dispatcher configuration used by the global dispatcher, and you don't need to define anything during deployment to use it. -* **task-dispatcher** - A configuration that uses the [TaskDispatcher](#TaskDispatcher). -* **default-fork-join-dispatcher** - A configuration that uses the [ForkJoinDispatcher]. -* **synchronized-dispatcher** - A configuration that uses the [SynchronizedDispatcher](#SynchronizedDispatcher). +* **default-dispatcher** - A configuration that uses the [ThreadPoolDispatcher](#threadpooldispatcher). As the name says, this is the default dispatcher configuration used by the global dispatcher, and you don't need to define anything during deployment to use it. +* **task-dispatcher** - A configuration that uses the [TaskDispatcher](#taskdispatcher). +* **default-fork-join-dispatcher** - A configuration that uses the [ForkJoinDispatcher](#forkjoindispatcher). +* **synchronized-dispatcher** - A configuration that uses the [SynchronizedDispatcher](#synchronizeddispatcher). ## Built-in Dispatchers These are the underlying dispatchers built-in to Akka.NET: -* ### ThreadPoolDispatcher +### ThreadPoolDispatcher - It schedules code to run in the [.NET Thread Pool](https://msdn.microsoft.com/en-us/library/System.Threading.ThreadPool.aspx), which is ***good enough* for most cases.** +It schedules code to run in the [.NET Thread Pool](https://msdn.microsoft.com/en-us/library/System.Threading.ThreadPool.aspx), which is ***good enough* for most cases.** - The `type` used in the HOCON configuration for this dispatcher is just `Dispatcher`. +The `type` used in the HOCON configuration for this dispatcher is just `Dispatcher`. - ```hocon - custom-dispatcher { - type = Dispatcher - throughput = 100 - } - ``` +```hocon +custom-dispatcher { +type = Dispatcher +throughput = 100 +} +``` > [!NOTE] > While each configuration can have it's own throughput settings, all dispatchers using this type will run in the same default .NET Thread Pool. -* ### TaskDispatcher +### TaskDispatcher - The TaskDispatcher uses the [TPL](https://msdn.microsoft.com/en-us/library/dd460717.aspx) infrastructure to schedule code execution. This dispatcher is very similar to the Thread PoolDispatcher, but may be used in some rare scenarios where the thread pool isn't available. +The TaskDispatcher uses the [TPL](https://msdn.microsoft.com/en-us/library/dd460717.aspx) infrastructure to schedule code execution. This dispatcher is very similar to the Thread PoolDispatcher, but may be used in some rare scenarios where the thread pool isn't available. - ```hocon - custom-task-dispatcher { - type = TaskDispatcher - throughput = 100 - } - ``` +```hocon +custom-task-dispatcher { + type = TaskDispatcher + throughput = 100 +} +``` -* ### PinnedDispatcher +### PinnedDispatcher - The `PinnedDispatcher` uses a single dedicated thread to schedule code executions. Ideally, this dispatcher should be using sparingly. +The `PinnedDispatcher` uses a single dedicated thread to schedule code executions. Ideally, this dispatcher should be using sparingly. - ```hocon - custom-dedicated-dispatcher { - type = PinnedDispatcher - } - ``` +```hocon +custom-dedicated-dispatcher { + type = PinnedDispatcher +} +``` -* ### ForkJoinDispatcher +### ForkJoinDispatcher - The ForkJoinDispatcher uses a dedicated threadpool to schedule code execution. You can use this scheduler isolate some actors from the rest of the system. Each dispatcher configuration will have it's own thread pool. +The ForkJoinDispatcher uses a dedicated threadpool to schedule code execution. You can use this scheduler isolate some actors from the rest of the system. Each dispatcher configuration will have it's own thread pool. - This is the configuration for the [*default-fork-join-dispatcher*](#built-in-dispatcher-configurations). You may use this as example for custom fork-join dispatchers. +This is the configuration for the [*default-fork-join-dispatcher*](#built-in-dispatcher-configurations). You may use this as example for custom fork-join dispatchers. - ```hocon - default-fork-join-dispatcher { - type = ForkJoinDispatcher - throughput = 100 - dedicated-thread-pool { - thread-count = 3 - deadlock-timeout = 3s - threadtype = background - } +```hocon +default-fork-join-dispatcher { + type = ForkJoinDispatcher + throughput = 100 + dedicated-thread-pool { + thread-count = 3 + deadlock-timeout = 3s + threadtype = background } - ``` +} +``` - * `thread-count` - The number of threads dedicated to this dispatcher. - * `deadlock-timeout` - The amount of time to wait before considering the thread as deadlocked. By default no timeout is set, meaning code can run in the threads for as long as they need. If you set a value, once the timeout is reached the thread will be aborted and a new threads will take it's place. Set this value carefully, as very low values may cause loss of work. - * `threadtype` - Must be `background` or `foreground`. This setting helps define [how .NET handles](https://msdn.microsoft.com/en-us/library/system.threading.thread.isbackground.aspx) the thread. +* `thread-count` - The number of threads dedicated to this dispatcher. +* `deadlock-timeout` - The amount of time to wait before considering the thread as deadlocked. By default no timeout is set, meaning code can run in the threads for as long as they need. If you set a value, once the timeout is reached the thread will be aborted and a new threads will take it's place. Set this value carefully, as very low values may cause loss of work. +* `threadtype` - Must be `background` or `foreground`. This setting helps define [how .NET handles](https://msdn.microsoft.com/en-us/library/system.threading.thread.isbackground.aspx) the thread. -* ### SynchronizedDispatcher +### SynchronizedDispatcher - The `SynchronizedDispatcher` uses the *current* [SynchronizationContext](https://msdn.microsoft.com/en-us/magazine/gg598924.aspx) to schedule executions. +The `SynchronizedDispatcher` uses the *current* [SynchronizationContext](https://msdn.microsoft.com/en-us/magazine/gg598924.aspx) to schedule executions. - You may use this dispatcher to create actors that update UIs in a reactive manner. An application that displays real-time updates of stock prices may have a dedicated actor to update the UI controls directly for example. +You may use this dispatcher to create actors that update UIs in a reactive manner. An application that displays real-time updates of stock prices may have a dedicated actor to update the UI controls directly for example. > [!NOTE] > As a general rule, actors running in this dispatcher shouldn't do much work. Avoid doing any extra work that may be done by actors running in other pools. - This is the configuration for the [*synchronized-dispatcher*](#built-in-dispatcher-configurations). You may use this as example for custom fork-join dispatchers. +This is the configuration for the [*synchronized-dispatcher*](#built-in-dispatcher-configurations). You may use this as example for custom fork-join dispatchers. - ```hocon - synchronized-dispatcher { - type = "SynchronizedDispatcher" - throughput = 10 - } - ``` +```hocon +synchronized-dispatcher { + type = "SynchronizedDispatcher" + throughput = 10 +} +``` - In order to use this dispatcher, you must create the actor from the syncrhonization context you want to run-it. For example: +In order to use this dispatcher, you must create the actor from the synchronization context you want to run-it. For example: - ```cs - private void Form1_Load(object sender, System.EventArgs e) - { - system.ActorOf(Props.Create().WithDispatcher("synchronized-dispatcher"), "ui-worker"); - } - ``` +```csharp +private void Form1_Load(object sender, System.EventArgs e) +{ + system.ActorOf(Props.Create().WithDispatcher("synchronized-dispatcher"), "ui-worker"); +} +``` #### Common Dispatcher Configuration diff --git a/docs/articles/actors/mailboxes.md b/docs/articles/actors/mailboxes.md index 3e4d826e712..4ebb381dbd9 100644 --- a/docs/articles/actors/mailboxes.md +++ b/docs/articles/actors/mailboxes.md @@ -13,7 +13,7 @@ A mailbox can be described as a queue of messages. Messages are usually then del Normally every actor has its own mailbox, but this is not a requirement. There are implementations of [routers](xref:routers) where all routees share a single mailbox. -### Using a Mailbox +## Using a Mailbox To make an actor use a specific mailbox, you can set it up one of the following locations: @@ -35,7 +35,7 @@ To make an actor use a specific mailbox, you can set it up one of the following The `my-custom-mailbox` is a key that was setup using the [mailbox configuration](#configuring-custom-mailboxes). -### Configuring Custom Mailboxes +## Configuring Custom Mailboxes In order to use a custom mailbox, it must be first configured with a key that the actor system can lookup. You can do this using a custom HOCON setting. @@ -48,40 +48,41 @@ my-custom-mailbox { } ``` -### Built-in Mailboxes +## Built-in Mailboxes -* #### UnboundedMailbox +### UnboundedMailbox - **This is the default mailbox** used by Akka.NET. It's a non-blocking unbounded mailbox, and should be good enough for most cases. +**This is the default mailbox** used by Akka.NET. It's a non-blocking unbounded mailbox, and should be good enough for most cases. -* #### UnboundedPriorityMailbox +### UnboundedPriorityMailbox - The unbounded mailbox priority mailbox is blocking mailbox that allows message prioritization, so that you can choose the order the actor should process messages that are already in the mailbox. +The unbounded mailbox priority mailbox is blocking mailbox that allows message prioritization, so that you can choose the order the actor should process messages that are already in the mailbox. - In order to use this mailbox, you need to extend the `UnboundedPriorityMailbox` class and provide an ordering logic. The value returned by the `PriorityGenerator` method will be used to order the message in the mailbox. Lower values will be delivered first. Delivery order for messages of equal priority is undefined. +In order to use this mailbox, you need to extend the `UnboundedPriorityMailbox` class and provide an ordering logic. The value returned by the `PriorityGenerator` method will be used to order the message in the mailbox. Lower values will be delivered first. Delivery order for messages of equal priority is undefined. - ```cs - public class IssueTrackerMailbox : UnboundedPriorityMailbox +```cs +public class IssueTrackerMailbox : UnboundedPriorityMailbox +{ + protected override int PriorityGenerator(object message) { - protected override int PriorityGenerator(object message) - { - var issue = message as Issue; + var issue = message as Issue; - if (issue != null) - { - if (issue.IsSecurityFlaw) - return 0; + if (issue != null) + { + if (issue.IsSecurityFlaw) + return 0; - if (issue.IsBug) - return 1; - } + if (issue.IsBug) + return 1; + } - return 2; - } + return 2; } - ``` +} +``` + +Once the class is implemented, you should set it up using the [instructions above](#using-a-mailbox). - Once the class is implemented, you should set it up using the [instructions above](#using-a-mailbox). -##### Special note -While we have updated the UnboundedPriorityMailbox to support Stashing. We don't recommend using it. +> [!WARNING] +> While we have updated the `UnboundedPriorityMailbox` to support Stashing. We don't recommend using it. Once you stash messages, they are no longer part of the prioritization process that your PriorityMailbox uses. Once you unstash all messages, they are fed to the Actor, in the order of stashing. diff --git a/docs/articles/actors/routers.md b/docs/articles/actors/routers.md index ea87916dac3..0ae2fe33b05 100644 --- a/docs/articles/actors/routers.md +++ b/docs/articles/actors/routers.md @@ -576,9 +576,9 @@ When an actor received `PoisonPill` message, that actor will be stopped. (see [P For a router, which normally passes on messages to routees, the `PoisonPill` messages are processed __by the router only__. `PoisonPill` messages sent to a router will __not__ be sent on to its routees. -However, a `PisonPill` message sent to a router may still affect its routees, as it will stop the router whhich in turns stop children the router has created. Each child will process its current message and then stop. This could lead to some messages being unprocessed. +However, a `PoisonPill` message sent to a router may still affect its routees, as it will stop the router which in turns stop children the router has created. Each child will process its current message and then stop. This could lead to some messages being unprocessed. -If you wish to stop a router and its routees, but you would like the routees to first process all the messages in their mailbxes, then you should send a `PoisonPill` message wrapped inside a `Broadcast` message so that each routee will receive the `PoisonPill` message. +If you wish to stop a router and its routees, but you would like the routees to first process all the messages in their mailboxes, then you should send a `PoisonPill` message wrapped inside a `Broadcast` message so that each routee will receive the `PoisonPill` message. > [!NOTE] > The above method will stop all routees, even if they are not created by the router. E.g. routees programatically provided to the router. diff --git a/docs/articles/actors/testing-actor-systems.md b/docs/articles/actors/testing-actor-systems.md index 2bb05f27073..374227a035a 100644 --- a/docs/articles/actors/testing-actor-systems.md +++ b/docs/articles/actors/testing-actor-systems.md @@ -74,8 +74,8 @@ Since an integration test does not allow to the internal processing of the parti If a number of occurrences is specific --as demonstrated above-- then `intercept` will block until that number of matching messages have been received or the timeout configured in `akka.test.filter-leeway` is used up (time starts counting after the passed-in block of code returns). In case of a timeout the test fails. ->[NOTE] ->By default the TestKit already loads the TestEventListener as a logger. Be aware that if you want to specify your own config. Use the `DefaultConfig` property to apply overrides. +> [!NOTE] +> By default the TestKit already loads the TestEventListener as a logger. Be aware that if you want to specify your own config. Use the `DefaultConfig` property to apply overrides. ## Timing Assertions Another important part of functional testing concerns timing: certain events must not happen immediately (like a timer), others need to happen before a deadline. Therefore, all examination methods accept an upper time limit within the positive or negative result must be obtained. Lower time limits need to be checked external to the examination, which is facilitated by a new construct for managing time constraints: @@ -121,8 +121,8 @@ Probes may also be equipped with custom assertions to make your test code even m You have complete flexibility here in mixing and matching the `TestKit` facilities with your own checks and choosing an intuitive name for it. In real life your code will probably be a bit more complicated than the example given above; just use the power! ->[WARNING] ->Any message send from a `TestProbe` to another actor which runs on the `CallingThreadDispatcher` runs the risk of dead-lock, if that other actor might also send to this probe. The implementation of `TestProbe.Watch` and `TestProbe.Unwatch` will also send a message to the watchee, which means that it is dangerous to try watching e.g. `TestActorRef` from a `TestProbe`. +> [!WARNING] +> Any message send from a `TestProbe` to another actor which runs on the `CallingThreadDispatcher` runs the risk of dead-lock, if that other actor might also send to this probe. The implementation of `TestProbe.Watch` and `TestProbe.Unwatch` will also send a message to the watchee, which means that it is dangerous to try watching e.g. `TestActorRef` from a `TestProbe`. ###Watching Other Actors from probes A `TestProbe` can register itself for DeathWatch of any other actor: @@ -263,7 +263,7 @@ Testing the business logic inside `Actor` classes can be divided into two parts: Normally, the `IActorRef` shields the underlying `Actor` instance from the outside, the only communications channel is the actor's mailbox. This restriction is an impediment to unit testing, which led to the inception of the `TestActorRef`. This special type of reference is designed specifically for test purposes and allows access to the actor in two ways: either by obtaining a reference to the underlying actor instance, or by invoking or querying the actor's behaviour (receive). Each one warrants its own section below. > [!NOTE] -> It is highly recommended to stick to traditional behavioural testing (using messaging to ask the `Actor` to reply with the state you want to run assertions against), instead of using `TestActorRef` whenever possible. +> It is highly recommended to stick to traditional behavioral testing (using messaging to ask the `Actor` to reply with the state you want to run assertions against), instead of using `TestActorRef` whenever possible. ## Obtaining a Reference to an Actor Having access to the actual `Actor` object allows application of all traditional unit testing techniques on the contained methods. Obtaining a reference is done like this: diff --git a/docs/articles/clustering/cluster-configuration.md b/docs/articles/clustering/cluster-configuration.md index 5b6acdbb135..508cf59dd1c 100644 --- a/docs/articles/clustering/cluster-configuration.md +++ b/docs/articles/clustering/cluster-configuration.md @@ -82,4 +82,4 @@ akka { This tells the role leader for `crawlerV1` to not mark any of those nodes as up until at least three nodes with role `crawlerV1` have joined the cluster. ## Additional Resources -- [`Cluster.conf`](https://github.com/akkadotnet/akka.net/blob/dev/src/core/Akka.Cluster/Configuration/Cluster.conf): the full set of configuration options +- [Cluster.conf](../configuration/akka.cluster.md): the full set of configuration options diff --git a/docs/articles/clustering/cluster-extension.md b/docs/articles/clustering/cluster-extension.md index a611f5b9d0d..40f0d455820 100644 --- a/docs/articles/clustering/cluster-extension.md +++ b/docs/articles/clustering/cluster-extension.md @@ -4,7 +4,7 @@ title: Accessing the Cluster `ActorSystem` Extension --- # Using the Cluster `ActorSystem` Extension Object -`Akka.Cluster` is actually an "`ActorSystem` extension" that you can use to access membership information and [cluster gossip](cluster-overview.md#cluster-gossip) directly. +`Akka.Cluster` is actually an `ActorSystem` extension that you can use to access membership information and [cluster gossip](cluster-overview.md#cluster-gossip) directly. ## Getting a Reference to the `Cluster` You can get a direct reference to the `Cluster` extension like so (drawn from the [`SimpleClusterListener` example in the Akka.NET project](https://github.com/akkadotnet/akka.net/blob/dev/src/examples/Cluster/Samples.Cluster.Simple/SimpleClusterListener.cs)): diff --git a/docs/articles/clustering/cluster-sharding.md b/docs/articles/clustering/cluster-sharding.md index 4217a54da34..c4eaf3336a9 100644 --- a/docs/articles/clustering/cluster-sharding.md +++ b/docs/articles/clustering/cluster-sharding.md @@ -9,7 +9,7 @@ Cluster sharding is useful in cases when you want to contact with cluster actors Cluster sharding can operate in 2 modes, configured via `akka.cluster.sharding.state-store-mode` HOCON configuration: 1. `persistence` (**default**) depends on Akka.Persistence module. In order to use it, you'll need to specify an event journal accessible by all of the participating nodes. An information about the particular shard placement is stored in a persistent cluster singleton actor known as *coordinator*. In order to guarantee consistent state between different incarnations, coordinator stores its own state using Akka.Persistence event journals. -2. `ddata` (**experimental**, available in versions above 1.3.2) depends on Akka.DistributedData module. It uses Conflict-free Replicated Data Types (CRDT) to ensure eventualy consistent shard placement and global availability via node-to-node replication and automatic conflict resolution. In this mode event journals don't have to be configured. At this moment this mode doesn't support `akka.cluster.sharding.remember-entities` option. +2. `ddata` (**experimental**, available in versions above 1.3.2) depends on Akka.DistributedData module. It uses Conflict-free Replicated Data Types (CRDT) to ensure eventually consistent shard placement and global availability via node-to-node replication and automatic conflict resolution. In this mode event journals don't have to be configured. At this moment this mode doesn't support `akka.cluster.sharding.remember-entities` option. Cluster sharding may be active only on nodes in `Up` status - so the ones fully recognized and acknowledged by every other node in a cluster. @@ -55,6 +55,8 @@ In this example, we first specify way to resolve our message recipients in conte Second part of an example is registering custom actor type as sharded entity using `ClusterSharding.Start` or `ClusterSharding.StartAsync` methods. Result is the `IActorRef` to shard region used to communicate between current actor system and target entities. Shard region must be specified once per each type on each node, that is expected to participate in sharding entities of that type. Keep in mind, that it's recommended to wait for the current node to first fully join the cluster before initializing a shard regions in order to avoid potential timeouts. +In some cases, the actor may need to know the `entityId` associated with it. This can be achieved using the `entityPropsFactory` parameter to `ClusterSharding.Start` or `ClusterSharding.StartAsync`. The entity ID will be passed to the factory as a parameter, which can then be used in the creation of the actor. + In case when you want to send message to entities from specific node, but you don't want that node to participate in sharding itself, you can use `ShardRegionProxy` for that. Example: @@ -80,7 +82,7 @@ To reduce memory consumption, you may decide to stop entities after some period ## Remembering entities -By default, when a shard is rebalanced to another node, the entities it stored before migration, are NOT started immediatelly after. Instead they are recreated ad-hoc, when new messages are incoming. This behavior can be modified by `akka.cluster.sharding.remember-entities = true` configuration. It will instruct shards to keep their state between rebalances - it also comes with extra cost due to necessity of persisting information about started/stopped entities. Additionally a message extractor logic must be aware of `ShardRegion.StartEntity` message: +By default, when a shard is rebalanced to another node, the entities it stored before migration, are NOT started immediately after. Instead they are recreated ad-hoc, when new messages are incoming. This behavior can be modified by `akka.cluster.sharding.remember-entities = true` configuration. It will instruct shards to keep their state between rebalances - it also comes with extra cost due to necessity of persisting information about started/stopped entities. Additionally a message extractor logic must be aware of `ShardRegion.StartEntity` message: ```csharp public sealed class ShardEnvelope diff --git a/docs/articles/clustering/distributed-data.md b/docs/articles/clustering/distributed-data.md index 7893b800538..1980d62d3ee 100644 --- a/docs/articles/clustering/distributed-data.md +++ b/docs/articles/clustering/distributed-data.md @@ -42,14 +42,14 @@ In response, you should receive `Replicator.IGetResponse` message. There are sev - `GetSuccess` when a value for provided key has been received successfully. To get the value, you need to call `response.Get(key)` with the key, you've sent with the request. - `NotFound` when no value was found under provided key. -- `GetFailure` when a replicator failed to retrieve value withing specified consistency and timeout constraints. +- `GetFailure` when a replicator failed to retrieve value within specified consistency and timeout constraints. - `DataDeleted` when a value for the provided key has been deleted. -All `Get` requests follows the read-your-own-write rule - if you updated the data, and want to read the state under the same key immediatelly after, you'll always retrieve modified value, even if the `IGetResponse` message will arrive before `IUpdateResponse`. +All `Get` requests follows the read-your-own-write rule - if you updated the data, and want to read the state under the same key immediately after, you'll always retrieve modified value, even if the `IGetResponse` message will arrive before `IUpdateResponse`. #### Read consistency -What is a mentioned read consistency? As we said at the beginning, all updates perfomed within distributed data module will eventually converge. This means, we're not speaking about immediate consistency of a given value across all nodes. Therefore we can precise, what degree of consistency are we expecting: +What is a mentioned read consistency? As we said at the beginning, all updates performed within distributed data module will eventually converge. This means, we're not speaking about immediate consistency of a given value across all nodes. Therefore we can precise, what degree of consistency are we expecting: - `ReadLocal` - we take value based on replica living on a current node. - `ReadFrom` - value will be merged from states retrieved from some number of nodes, including local one. @@ -79,17 +79,17 @@ Just like in case of reads, there are several possible responses: - `UpdateSuccess` when a value for provided key has been replicated successfully within provided write consistency constraints. - `ModifyFailure` when update failed because of an exception within modify function used inside `Update` command. -- `UpdateTimeout` when a write consistency constraints has not been fulfilled on time. **Warning**: this doesn't mean, that update has been rolled back! Provided value will eventually propage its replicas across nodes using gossip protocol, causing the altered state to eventually converge across all of them. +- `UpdateTimeout` when a write consistency constraints has not been fulfilled on time. **Warning**: this doesn't mean, that update has been rolled back! Provided value will eventually propagate its replicas across nodes using gossip protocol, causing the altered state to eventually converge across all of them. - `DataDeleted` when a value under provided key has been deleted. You'll always see updates done on local node. When you perform two updates on the same key, second modify function will always see changes done by the first one. #### Write consistency -Just like in case of reads, write consistency allows us to specify level of certainity of our updates before proceeding: +Just like in case of reads, write consistency allows us to specify level of certainty of our updates before proceeding: -- `WriteLocal` - while value will be disseminated later using gossip, the response will return immediatelly after local replica update has been acknowledged. -- `WriteTo` - update will immediatelly be propagated to a given number of replicas, including local one. +- `WriteLocal` - while value will be disseminated later using gossip, the response will return immediately after local replica update has been acknowledged. +- `WriteTo` - update will immediately be propagated to a given number of replicas, including local one. - `WriteMajority` - update will propagate to more than a half nodes in a cluster (or nodes given a configured role) before response will be emitted. - `WriteAll` - update will propagate to all nodes in a cluster (or nodes given a configured role) before response will be emitted. @@ -112,7 +112,7 @@ Delete may return one of the 3 responses: - `DeleteSuccess` when key deletion succeeded within provided consistency constraints. - `DataDeleted` when data has been deleted already. Once deleted, key can no longer be reused and `DataDeleted` response will be send to all subsequent requests (either reads, updates or deletes). This message will also be used as notification for subscribers. -- `ReplicationDeleteFailure` when operation failed to satisfy specified consistency constraints. **Warning**: this doesn't mean, that delete has been rolled back! Provided operation will eventually propage its replicas across nodes using gossip protocol, causing the altered state to eventually converge across all of them. +- `ReplicationDeleteFailure` when operation failed to satisfy specified consistency constraints. **Warning**: this doesn't mean, that delete has been rolled back! Provided operation will eventually propagate its replicas across nodes using gossip protocol, causing the altered state to eventually converge across all of them. Deletes doesn't specify it's own consistency - it uses the same `IWriteConsistency` interface as updates. @@ -148,12 +148,12 @@ All subscribers are removed automatically when terminated. This can be also done ## Available replicated data types -Akka.DistributedData specifies several data types, sharing the same `IReplicatedData` interface. All of them share some common members, such as (default) empty value or `Merge` method used to merge two replicas of the same data with automatic conflic resolution. All of those values are also immutable - this means, that any operations, which are supposed to change their state, produce new instance in result: +Akka.DistributedData specifies several data types, sharing the same `IReplicatedData` interface. All of them share some common members, such as (default) empty value or `Merge` method used to merge two replicas of the same data with automatic conflict resolution. All of those values are also immutable - this means, that any operations, which are supposed to change their state, produce new instance in result: - `Flag` is a boolean CRDT flag, which default value is always `false`. When a merging replicas have conflicting state, `true` will always win over `false`. - `GCounter` (also known as growing-only counter) allows only for addition/increment of its state. Trying to add a negative value is forbidden here. Total value of the counter is a sum of values across all of the replicas. In case of conflicts during merge operation, a copy of replica with greater value will always win. -- `PNCounter` allows for both increments and decrements. A total value of the counter is a sum of increments across all replcias decreased by the sum of all decrements. -- `GSet` is an add-only set, which disallows to remove elements once added to it. Merges of GSets are simple unions of their elements. This data type doesn't produce any garbadge. +- `PNCounter` allows for both increments and decrements. A total value of the counter is a sum of increments across all replicas decreased by the sum of all decrements. +- `GSet` is an add-only set, which disallows to remove elements once added to it. Merges of GSets are simple unions of their elements. This data type doesn't produce any garbage. - `ORSet` is implementation of an observed remove add-wins set. It allows to both add and remove its elements any number of times. In case of conflicts when merging replicas, added elements always wins over removed ones. - `ORDictionary` (also knowns as OR-Map or Observed Remove Map) has similar semantics to OR-Set, however it allows to merge values (which must be CRDTs themselves) in case of concurrent updates. - `ORMultiDictionary` is a multi-map implementation based on `ORDictionary`, where values are represented as OR-Sets. Use `AddItem` or `RemoveItem` to add or remove elements to the bucket under specified keys. @@ -165,7 +165,7 @@ Keep in mind, that most of the replicated collections add/remove methods require ## Tombstones -One of the issue of CRDTs, is that they accumulate history of changes (including removed elements), producing a garbadge, that effectivelly pile up in memory. While this is still a problem, it can be limited by replicator, which is able to remove data associated with nodes, that no longer exist in the cluster. This process is known as a pruning. +One of the issue of CRDTs, is that they accumulate history of changes (including removed elements), producing a garbage, that effectively pile up in memory. While this is still a problem, it can be limited by replicator, which is able to remove data associated with nodes, that no longer exist in the cluster. This process is known as a pruning. ## Settings diff --git a/docs/articles/clustering/split-brain-resolver.md b/docs/articles/clustering/split-brain-resolver.md index 31eab427c06..5bc39dade5f 100644 --- a/docs/articles/clustering/split-brain-resolver.md +++ b/docs/articles/clustering/split-brain-resolver.md @@ -4,7 +4,8 @@ title: Split Brain Resolver --- # Split Brain Resolver -> Note: while this feature is based on [Lightbend Reactive Platform Split Brain Resolver](https://doc.akka.io/docs/akka/rp-16s01p02/scala/split-brain-resolver.html) feature description, however its implementation is a result of free contribution and interpretation of Akka.NET team. Lightbend doesn't take any responsibility for the state and correctness of it. +> [!NOTE] +> While this feature is based on [Lightbend Reactive Platform Split Brain Resolver](https://doc.akka.io/docs/akka/rp-16s01p02/scala/split-brain-resolver.html) feature description, however its implementation is a result of free contribution and interpretation of Akka.NET team. Lightbend doesn't take any responsibility for the state and correctness of it. When working with an Akka.NET cluster, you must consider how to handle [network partitions](https://en.wikipedia.org/wiki/Network_partition) (a.k.a. split brain scenarios) and machine crashes (including .NET CLR/Core and hardware failures). This is crucial for correct behavior of your cluster, especially if you use Cluster Singleton or Cluster Sharding. @@ -21,7 +22,7 @@ To solve this kind of problems we need to determine a common strategy, in which Since Akka.NET cluster is working in peer-to-peer mode, it means that there is no single *global* entity which is able to arbitrary define one true state of the cluster. Instead each node has so called failure detector, which tracks the responsiveness and checks health of other connected nodes. This allows us to create a *local* node perspective on the overall cluster state. -In the past the only available opt-in stategy was an auto-down, in which each node was automatically downing others after reaching a certain period of unreachability. While this approach was enough to react on machine crashes, it was failing in face of network partitions: if cluster was split into two or more parts due to network connectivity issues, each one of them would simply consider others as down. This would lead to having several independent clusters not knowning about each other. It is especially disastrous in case of Cluster Singleton and Cluster Sharding features, both relying on having only one actor instance living in the cluster at the same time. +In the past the only available opt-in strategy was an auto-down, in which each node was automatically downing others after reaching a certain period of unreachability. While this approach was enough to react on machine crashes, it was failing in face of network partitions: if cluster was split into two or more parts due to network connectivity issues, each one of them would simply consider others as down. This would lead to having several independent clusters not knowning about each other. It is especially disastrous in case of Cluster Singleton and Cluster Sharding features, both relying on having only one actor instance living in the cluster at the same time. Split brain resolver feature brings ability to apply different strategies for managing node lifecycle in face of network issues and machine crashes. It works as a custom downing provider. Therefore in order to use it, **all of your Akka.NET cluster nodes must define it with the same configuration**. Here's how minimal configuration looks like: @@ -48,7 +49,7 @@ To decide which strategy to use, you can set `akka.cluster.split-brain-resolver. - `keep-oldest` - `keep-referee` -All strategies will be applied only after cluster state has reached stability for specified time treshold (no nodes transitioning between different states for some time), specified by `stable-after` setting. Nodes which are joining will not affect this treshold, as they won't be promoted to UP status in face unreachable nodes. For the same reason they won't be taken into account, when a strategy will be applied. +All strategies will be applied only after cluster state has reached stability for specified time threshold (no nodes transitioning between different states for some time), specified by `stable-after` setting. Nodes which are joining will not affect this treshold, as they won't be promoted to UP status in face unreachable nodes. For the same reason they won't be taken into account, when a strategy will be applied. ```hocon akka.cluster.split-brain-resolver { @@ -134,7 +135,7 @@ akka.cluster.split-brain-resolver { The `keep-oldest` strategy, when a network split has happened, will down a part of the cluster which doesn't contain the oldest node. -When to use it? This approach is particularly good in combination with Cluster Singleton, which usually is running on the oldest cluster member. It's also usefull, when you have a one starter node configured as `akka.cluster.seed-nodes` for others, which will still allow you to add and remove members using its address. +When to use it? This approach is particularly good in combination with Cluster Singleton, which usually is running on the oldest cluster member. It's also useful, when you have a one starter node configured as `akka.cluster.seed-nodes` for others, which will still allow you to add and remove members using its address. Keep in mind, that: @@ -143,7 +144,7 @@ Keep in mind, that: 3. There is a risk, that if partition will split cluster into two unequal parts i.e. 2 nodes with the oldest one present and 20 remaining ones, the majority of the cluster will go down. 4. Since the oldest node is determined on the latest known state of the cluster, there is a small risk that during partition, two parts of the cluster will both consider themselves having the oldest member on their side. While this is very rare situation, you still may end up having two independent clusters after split occurrence. -Just like in previous cases, a `role` setting can be used to detemine the oldest member across all having specified role. +Just like in previous cases, a `role` setting can be used to determine the oldest member across all having specified role. Configuration: @@ -170,10 +171,10 @@ When to use it? If you have a single node which is running processes crucial to Things to keep in mind: -1. With this strategy, cluster will never split into two indenpendent ones, under any circumstances. +1. With this strategy, cluster will never split into two independent ones, under any circumstances. 2. A referee node is a single point of failure for the cluster. -You can configure a minimum required amount of reachable nodes to maintain operability by using `down-all-if-less-than-nodes`. If a strategy will detect that the number of reachable nodes will go below that minimun it will down the entire partition even when referee node was reachable. +You can configure a minimum required amount of reachable nodes to maintain operability by using `down-all-if-less-than-nodes`. If a strategy will detect that the number of reachable nodes will go below that minimum it will down the entire partition even when referee node was reachable. Configuration: diff --git a/docs/articles/concepts/actor-systems.md b/docs/articles/concepts/actor-systems.md index d2872da243e..922422fd0eb 100644 --- a/docs/articles/concepts/actor-systems.md +++ b/docs/articles/concepts/actor-systems.md @@ -49,7 +49,7 @@ The non-exhaustive list of adequate solutions to the "blocking problem" includes The first possibility is especially well-suited for resources which are single-threaded in nature, like database handles which traditionally can only execute one outstanding query at a time and use internal synchronization to ensure this. A common pattern is to create a router for N actors, each of which wraps a single DB connection and handles queries as sent to the router. The number N must then be tuned for maximum throughput, which will vary depending on which DBMS is deployed on what hardware. > [!NOTE] ->Configuring thread pools is a task best delegated to Akka.NET, simply configure in the application.conf and instantiate through an ActorSystem. +> Configuring thread pools is a task best delegated to Akka.NET, simply configure in the application.conf and instantiate through an ActorSystem. ## What you should not concern yourself with An actor system manages the resources it is configured to use in order to run the actors which it contains. There may be millions of actors within one such system, after all the mantra is to view them as abundant and they weigh in at an overhead of only roughly 300 bytes per instance. Naturally, the exact order in which messages are processed in large systems is not controllable by the application author, but this is also not intended. Take a step back and relax while Akka.NET does the heavy lifting under the hood. diff --git a/docs/articles/intro/tutorial-1.md b/docs/articles/intro/tutorial-1.md index 5770a67f949..5a268e4f00a 100644 --- a/docs/articles/intro/tutorial-1.md +++ b/docs/articles/intro/tutorial-1.md @@ -5,32 +5,45 @@ title: Part 1. Top-level Architecture # Part 1: Top-level Architecture -In this and the following chapters, we will build a sample Akka.NET application to introduce you to the language of -actors and how solutions can be formulated with them. It is a common hurdle for beginners to translate their project -into actors even though they don't understand what they do on the high-level. We will build the core logic of a small -application and this will serve as a guide for common patterns that will help to kickstart Akka.NET projects. - -The application we aim to write will be a simplified IoT system where devices, installed at the home of users, can report temperature data from sensors. Users will be able to query the current state of these sensors. To keep -things simple, we will not actually expose the application via HTTP or any other external API, we will, instead, concentrate only on the -core logic. However, we will write tests for the pieces of the application to get comfortable and -proficient with testing actors early on. +In this and the following chapters, we will build a sample Akka.NET application +to introduce you to the language of actors and how solutions can be formulated +with them. It is a common hurdle for beginners to translate their project into +actors even though they don't understand what they do on the high-level. We will +build the core logic of a small application and this will serve as a guide for +common patterns that will help to kickstart Akka.NET projects. + +The application we aim to write will be a simplified IoT system where devices, +installed at the home of users, can report temperature data from sensors. Users +will be able to query the current state of these sensors. To keep things simple, +we will not actually expose the application via HTTP or any other external API, +we will, instead, concentrate only on the core logic. However, we will write +tests for the pieces of the application to get comfortable and proficient with +testing actors early on. ## Our Goals for the IoT System -We will build a simple IoT application with the bare essentials to demonstrate designing an Akka.NET-based system. The application will consist of two main components: - - * **Device data collection:** This component has the responsibility to maintain a local representation of the - otherwise remote devices. The devices will be organized into device groups, grouping together sensors belonging to a home. - * **User dashboards:** This component has the responsibility to periodically collect data from the devices for a - logged in user and present the results as a report. - -For simplicity, we will only collect temperature data for the devices, but in a real application our local representations -for a remote device, which we will model as an actor, would have many more responsibilities. Among others; reading the -configuration of the device, changing the configuration, checking if the devices are unresponsive, etc. We leave -these complexities for now as they can be easily added as an exercise. - -We will also not address the means by which the remote devices communicate with the local representations (actors). Instead, -we just build an actor based API that such a network protocol could use. We will use tests for our API everywhere though. +We will build a simple IoT application with the bare essentials to demonstrate +designing an Akka.NET-based system. The application will consist of two main +components: + + * **Device data collection:** This component has the responsibility to maintain + a local representation of the otherwise remote devices. The devices will be + organized into device groups, grouping together sensors belonging to a home. + * **User dashboards:** This component has the responsibility to periodically + collect data from the devices for a logged in user and present the results as + a report. + +For simplicity, we will only collect temperature data for the devices, but in a +real application our local representations for a remote device, which we will +model as an actor, would have many more responsibilities. Among others; reading +the configuration of the device, changing the configuration, checking if the +devices are unresponsive, etc. We leave these complexities for now as they can +be easily added as an exercise. + +We will also not address the means by which the remote devices communicate with +the local representations (actors). Instead, we just build an actor based API +that such a network protocol could use. We will use tests for our API everywhere +though. The architecture of the application will look like this: @@ -38,50 +51,67 @@ The architecture of the application will look like this: ## Top Level Architecture -When writing prose, the hardest part is usually to write the first couple of sentences. There is a similar feeling -when trying to build an Akka.NET system: What should be the first actor? Where should it live? What should it do? -Fortunately, unlike with prose, there are established best practices that can guide us through these initial steps. - -When one creates an actor in Akka.NET it always belongs to a certain parent. This means that actors are always organized -into a tree. In general, creating an actor can only happen from inside another actor. This creator actor becomes the -_parent_ of the newly created _child_ actor. You might ask then, who is the parent of the _first_ actor you create? -As we have seen in the previous chapters, to create a top-level actor one must call `System.ActorOf()`. This does -not create a "freestanding" actor though, instead, it injects the corresponding actor as a child into an already -existing tree: +When writing prose, the hardest part is usually to write the first couple of +sentences. There is a similar feeling when trying to build an Akka.NET system: +What should be the first actor? Where should it live? What should it do? +Fortunately, unlike with prose, there are established best practices that can +guide us through these initial steps. + +When one creates an actor in Akka.NET it always belongs to a certain parent. +This means that actors are always organized into a tree. In general, creating an +actor can only happen from inside another actor. This 'creator' actor becomes +the _parent_ of the newly created _child_ actor. You might ask then, who is the +parent of the _first_ actor you create? To create a top-level actor one must +first initialise an _actor system_, let's refer to this as the object `System`. +This is followed by a call to `System.ActorOf()` which returns a reference to +the newly created actor. This does not create a "freestanding" actor though, +instead, it injects the corresponding actor as a child into an already existing +tree: ![box diagram of the architecture](/images/actor_top_tree.png) -As you see, creating actors from the "top" injects those actors under the path `/user/`, so for example creating -an actor named `myActor` will end up having the path `/user/myActor`. In fact, there are three already existing -actors in the system: - - - `/` the so-called _root guardian_. This is the parent of all actors in the system, and the last one to stop - when the system itself is terminated. - - `/user` the _guardian_. **This is the parent actor for all user created actors**. The name `user` should not confuse - you, it has nothing to do with the logged in user, nor user handling in general. This name really means _userspace_ - as this is the place where actors that do not access Akka.NET internals live, i.e. all the actors created by users of the Akka.NET library. Every actor you will create will have the constant path `/user/` prepended to it. +As you see, creating actors from the "top" injects those actors under the path +`/user/`, so for example creating an actor named `myActor` will end up having +the path `/user/myActor`. In fact, there are three already existing actors in +the system: + + - `/` the so-called _root guardian_. This is the parent of all actors in the + system, and the last one to stop when the system itself is terminated. + - `/user` the _guardian_. **This is the parent actor for all user created + actors**. The name `user` should not confuse you, it has nothing to do with + the logged in user, nor user handling in general. This name really means + _userspace_ as this is the place where actors that do not access Akka.NET + internals live, i.e. all the actors created by users of the Akka.NET library. + Every actor you will create will have the constant path `/user/` prepended to + it. - `/system` the _system guardian_. -The names of these built-in actors contain _guardian_ because these are _supervising_ every actor living as a child -of them, i.e. under their path. We will explain supervision in more detail, all you need to know now is that every -unhandled failure from actors bubbles up to their parent that, in turn, can decide how to handle this failure. These -predefined actors are guardians in the sense that they are the final lines of defense, where all unhandled failures +The names of these built-in actors contain _guardian_ because these are +_supervising_ every actor living as a child of them, i.e. under their path. We +will explain supervision in more detail, all you need to know now is that every +unhandled failure from actors bubbles up to their parent that, in turn, can +decide how to handle this failure. These predefined actors are guardians in the +sense that they are the final lines of defence, where all unhandled failures from user, or system, actors end up. -> Does the root guardian (the root path `/`) have a parent? As it turns out, it has. This special entity is called -> the "Bubble-Walker". This special entity is invisible for the user and only has uses internally. +> Does the root guardian (the root path `/`) have a parent? As it turns out, it +> has. This special entity is called the "Bubble-Walker". This special entity is +> invisible for the user and only has uses internally. ### Structure of an IActorRef and Paths of Actors -The easiest way to see this in action is to simply print `IActorRef` instances. In this small experiment, we print -the reference of the first actor we create and then we create a child of this actor, and print its reference. We have -already created actors with `System.ActorOf()`, which creates an actor under `/user` directly. We call this kind -of actors _top level_, even though in practice they are not on the top of the hierarchy, only on the top of the -_user defined_ hierarchy. Since in practice we usually concern ourselves about actors under `/user` this is still a -convenient terminology, and we will stick to it. +The easiest way to see this in action is to simply print `IActorRef` instances. +In this small experiment, we print the reference of the first actor we create +and then we create a child of this actor, and print its reference. We have +already created actors with `System.ActorOf()`, which creates an actor under +`/user` directly. We call this kind of actors _top level_, even though in +practice they are not on the top of the hierarchy, only on the top of the _user +defined_ hierarchy. Since in practice we usually concern ourselves about actors +under `/user` this is still a convenient terminology, and we will stick to it. -Creating a non-top-level actor is possible from any actor, by invoking `Context.ActorOf()` which has the exact same -signature as its top-level counterpart. This is how it looks like in practice: +Creating a non-top-level actor is possible from any actor, by invoking +`Context.ActorOf()` which has the exact same signature as its top-level +counterpart. This is how it looks like in practice: [!code-csharp[Main](../../examples/Tutorials/Tutorial1/ActorHierarchyExperiments.cs?name=print-refs)] [!code-csharp[Main](../../examples/Tutorials/Tutorial1/ActorHierarchyExperiments.cs?name=print-refs2)] @@ -93,42 +123,55 @@ First : Actor[akka://testSystem/user/first-actor#1053618476] Second: Actor[akka://testSystem/user/first-actor/second-actor#-1544706041] ``` -First, we notice that all of the paths start with `akka://testSystem/`. Since all actor references are valid URLs, there -is a protocol field needed, which is `akka://` in the case of actors. Then, just like on the World Wide Web, the system -is identified. In our case, this is `testSystem`, but could be any other name (if remote communication between multiple -systems is enabled this name is the hostname of the system so other systems can find it on the network). Our two actors, -as we have discussed before, live under user, and form a hierarchy: +First, we notice that all of the paths start with `akka://testSystem/`. Since +all actor references are valid URLs, there is a protocol field needed, which is +`akka://` in the case of actors. Then, just like on the World Wide Web, the +system is identified. In our case, this is `testSystem`, but could be any other +name (if remote communication between multiple systems is enabled this name is +the hostname of the system so other systems can find it on the network). Our two +actors, as we have discussed before, live under user, and form a hierarchy: - * `akka://testSystem/user/first-actor` is the first actor we created, which lives directly under the user guardian, - `/user` - * `akka://testSystem/user/first-actor/second-actor` is the second actor we created, using `Context.ActorOf`. As we - see it lives directly under the first actor. + * `akka://testSystem/user/first-actor` is the first actor we created, which + lives directly under the user guardian, `/user` + * `akka://testSystem/user/first-actor/second-actor` is the second actor we + created, using `Context.ActorOf`. As we see it lives directly under the first + actor. -The last part of the actor reference, like `#1053618476` is a unique identifier of the actor living under the path. -This is usually not something the user needs to be concerned with, and we leave the discussion of this field for later. +The last part of the actor reference, like `#1053618476` is a unique identifier +of the actor living under the path. This is usually not something the user needs +to be concerned with, and we leave the discussion of this field for later. ### Hierarchy and Lifecycle of Actors -We have so far seen that actors are organized into a **strict hierarchy**. This hierarchy consists of a predefined -upper layer of three actors (the root guardian, the user guardian, and the system guardian), thereafter the user created -top-level actors (those directly living under `/user`) and the children of those. We now understand what the hierarchy -looks like, but there are some nagging unanswered questions: _Why do we need this hierarchy? What is it used for?_ - -The first use of the hierarchy is to manage the lifecycle of actors. Actors pop into existence when created, then later, -at user requests, they are stopped. Whenever an actor is stopped, all of its children are _recursively stopped_ too. -This is a very useful property and greatly simplifies cleaning up resources and avoiding resource leaks (like open -sockets files, etc.). In fact, one of the overlooked difficulties when dealing with low-level multi-threaded code is -the lifecycle management of various concurrent resources. - -Stopping an actor can be done by calling `Context.Stop(actorRef)`. **It is considered a bad practice to stop arbitrary -actors this way**. The recommended pattern is to call `Context.Stop(self)` inside an actor to stop itself, usually as -a response to some user defined stop message or when the actor is done with its job. - -The actor API exposes many lifecycle hooks that the actor implementation can override. The most commonly used are -`PreStart()` and `PostStop()`. - - * `PreStart()` is invoked after the actor has started but before it processes its first message. - * `PostStop()` is invoked just before the actor stops. No messages are processed after this point. +We have so far seen that actors are organized into a **strict hierarchy**. This +hierarchy consists of a predefined upper layer of three actors (the root +guardian, the user guardian, and the system guardian), thereafter the user +created top-level actors (those directly living under `/user`) and the children +of those. We now understand what the hierarchy looks like, but there are some +nagging unanswered questions: _Why do we need this hierarchy? What is it used +for?_ + +The first use of the hierarchy is to manage the lifecycle of actors. Actors pop +into existence when created, then later, at user requests, they are stopped. +Whenever an actor is stopped, all of its children are _recursively stopped_ too. +This is a very useful property and greatly simplifies cleaning up resources and +avoiding resource leaks (like open sockets files, etc.). In fact, one of the +overlooked difficulties when dealing with low-level multi-threaded code is the +lifecycle management of various concurrent resources. + +Stopping an actor can be done by calling `Context.Stop(actorRef)`. **It is +considered a bad practice to stop arbitrary actors this way**. The recommended +pattern is to call `Context.Stop(self)` inside an actor to stop itself, usually +as a response to some user defined stop message or when the actor is done with +its job. + +The actor API exposes many lifecycle hooks that the actor implementation can +override. The most commonly used are `PreStart()` and `PostStop()`. + + * `PreStart()` is invoked after the actor has started but before it processes + its first message. + * `PostStop()` is invoked just before the actor stops. No messages are + processed after this point. Again, we can try out all this with a simple experiment: @@ -144,18 +187,23 @@ second stopped first stopped ``` -We see that when we stopped actor `first` it recursively stopped actor `second` and thereafter it stopped itself. -This ordering is strict, _all_ `PostStop()` hooks of the children are called before the `PostStop()` hook of the parent -is called. +We see that when we stopped actor `first` it recursively stopped actor `second` +and thereafter it stopped itself. This ordering is strict, _all_ `PostStop()` +hooks of the children are called before the `PostStop()` hook of the parent is +called. -The family of these lifecycle hooks is rich, and we recommend reading [the actor lifecycle](xref:untyped-actor-api#actor-lifecycle) section of the reference for all details. +The family of these lifecycle hooks is rich, and we recommend reading [the actor +lifecycle](xref:untyped-actor-api#actor-lifecycle) section of the reference for +all details. ### Hierarchy and Failure Handling (Supervision) -Parents and children are not only connected by their lifecycles. Whenever an actor fails (throws an exception or -an unhandled exception bubbles out from `receive`) it is temporarily suspended. The failure information is propagated -to the parent, which decides how to handle the exception caused by the child actor. The default _supervisor strategy_ is to -stop and restart the child. If you don't change the default strategy all failures result in a restart. We won't change +Parents and children are not only connected by their lifecycles. Whenever an +actor fails (throws an exception or an unhandled exception bubbles out from +`receive`) it is temporarily suspended. The failure information is propagated to +the parent, which decides how to handle the exception caused by the child actor. +The default _supervisor strategy_ is to stop and restart the child. If you don't +change the default strategy all failures result in a restart. We won't change the default strategy in this simple experiment: [!code-csharp[Main](../../examples/Tutorials/Tutorial1/ActorHierarchyExperiments.cs?name=supervise)] @@ -176,40 +224,51 @@ Cause: System.Exception: I failed! at Akka.Actor.ActorCell.Invoke(Envelope envelope) ``` -We see that after failure the actor is stopped and immediately started. We also see a log entry reporting the -exception that was handled, in this case, our test exception. In this example we use `PreStart()` and `PostStop()` hooks -which are the default to be called after and before restarts, so we cannot distinguish from inside the actor if it -was started for the first time or restarted. This is usually the right thing to do, the purpose of the restart is to -set the actor in a known-good state, which usually means a clean starting stage. **What actually happens though is -that the `PreRestart()` and `PostRestart()` methods are called which, if not overridden, by default delegate to -`PostStop()` and `PreStart()` respectively**. You can experiment with overriding these additional methods and see -how the output changes. - -For the impatient, we also recommend looking into the [supervision reference page](xref:supervision) for more in-depth -details. +We see that after failure the actor is stopped and immediately started. We also +see a log entry reporting the exception that was handled, in this case, our test +exception. In this example we use `PreStart()` and `PostStop()` hooks which are +the default to be called after and before restarts, so we cannot distinguish +from inside the actor if it was started for the first time or restarted. This is +usually the right thing to do, the purpose of the restart is to set the actor in +a known-good state, which usually means a clean starting stage. **What actually +happens though is that the `PreRestart()` and `PostRestart()` methods are called +which, if not overridden, by default delegate to `PostStop()` and `PreStart()` +respectively**. You can experiment with overriding these additional methods and +see how the output changes. + +For the impatient, we also recommend looking into the [supervision reference +page](xref:supervision) for more in-depth details. ### The First Actor -Actors are organized into a strict tree, where the lifecycle of every child is tied to the parent and where parents -are responsible for deciding the fate of failed children. At first, it might not be evident how to map our problem -to such a tree, but in practice, this is easier than it looks. All we need to do is to rewrite our architecture diagram -that contained nested boxes into a tree: +Actors are organized into a strict tree, where the lifecycle of every child is +tied to the parent and where parents are responsible for deciding the fate of +failed children. At first, it might not be evident how to map our problem to +such a tree, but in practice, this is easier than it looks. All we need to do is +to rewrite our architecture diagram that contained nested boxes into a tree: ![actor tree diagram of the architecture](/images/arch_tree_diagram.png) -In simple terms, every component manages the lifecycle of the subcomponents. No subcomponent can outlive the parent -component. This is exactly how the actor hierarchy works. Furthermore, it is desirable that a component handles the failure -of its subcomponents. Together, these two desirable properties lead to the conclusion that the "contained-in" relationship of components should be mapped to the -"children-of" relationship of actors. - -The remaining question is how to map the top-level components to actors. It might be tempting to create the actors -representing the main components as top-level actors. We instead, recommend creating an explicit component that -represents the whole application. In other words, we will have a single top-level actor in our actor system and have -the main components as children of this actor. - -The first actor happens to be rather simple now, as we have not implemented any of the components yet. What is new -is that we have dropped using `Console.WriteLine()` and instead use `ILoggingAdapter` which allows us to use the -logging facility built into Akka.NET directly. Furthermore, we are using a recommended creational pattern for actors; define a static `Props()` method in the the actor: +In simple terms, every component manages the lifecycle of the subcomponents. No +subcomponent can outlive the parent component. This is exactly how the actor +hierarchy works. Furthermore, it is desirable that a component handles the +failure of its subcomponents. Together, these two desirable properties lead to +the conclusion that the "contained-in" relationship of components should be +mapped to the "children-of" relationship of actors. + +The remaining question is how to map the top-level components to actors. It +might be tempting to create the actors representing the main components as +top-level actors. We instead, recommend creating an explicit component that +represents the whole application. In other words, we will have a single +top-level actor in our actor system and have the main components as children of +this actor. + +The first actor happens to be rather simple now, as we have not implemented any +of the components yet. What is new is that we have dropped using +`Console.WriteLine()` and instead use `ILoggingAdapter` which allows us to use +the logging facility built into Akka.NET directly. Furthermore, we are using a +recommended creational pattern for actors; define a static `Props()` method in +the the actor: [!code-csharp[Main](../../examples/Tutorials/Tutorial1/IotSupervisor.cs?name=iot-supervisor)] @@ -217,7 +276,8 @@ All we need now is to tie this up with a class with the `main` entry point: [!code-csharp[Main](../../examples/Tutorials/Tutorial1/IotApp.cs?name=iot-app)] -This application does very little for now, but we have the first actor in place and we are ready to extend it further. +This application does very little for now, but we have the first actor in place +and we are ready to extend it further. ## What is next? diff --git a/docs/articles/intro/tutorial-3.md b/docs/articles/intro/tutorial-3.md index ac2337751d7..5d0fbbc1033 100644 --- a/docs/articles/intro/tutorial-3.md +++ b/docs/articles/intro/tutorial-3.md @@ -84,7 +84,7 @@ is known up front: device groups and device actors are created on-demand. The st the acknowledgment, the receiver, i.e. the device, will be able to learn its `IActorRef` and send direct messages to its device actor in the future. Now that the steps are defined, we only need to define the messages that we will use to communicate requests and -their acknowledgement: +their acknowledgment: [!code-csharp[DeviceManager.scala](../../examples/Tutorials/Tutorial3/DeviceManager.cs?name=device-manager-msgs)] diff --git a/docs/articles/networking/io.md b/docs/articles/networking/io.md index 64ae6fcb68a..9ffa3aa7d04 100644 --- a/docs/articles/networking/io.md +++ b/docs/articles/networking/io.md @@ -20,9 +20,10 @@ var system = ActorSystem.Create("example"); var manager = system.Tcp(); ``` -### TCP Driver +## TCP Driver + +### Client Connection -#### Client Connection To create a connection an actor sends a `Tcp.Connect` message to the TCP Manager. Once the connection is established the connection actor sends a `Tcp.Connected` message to the `commander`, which registers the `connection handler` by replying with a `Tcp.Register` message. @@ -36,7 +37,8 @@ The following example shows a simple Telnet client. The client send lines entere [!code-csharp[Main](../../examples/DocsExamples/Networking/IO/TelnetClient.cs?range=10-63)] -#### Server Connection +### Server Connection + To accept connections, an actor sends an `Tcp.Bind` message to the TCP manager, passing the `bind handler` in the message. The `bind commander` will receive a `Tcp.Bound` message when the connection is listening. @@ -50,4 +52,4 @@ The following code example shows a simple server that echo's data received from [!code-csharp[Main](../../examples/DocsExamples/Networking/IO/EchoServer.cs?range=8-29)] -[!code-csharp[Main](../../examples/DocsExamples/Networking/IO/EchoConnection.cs?range=6-27)] +[!code-csharp[Main](../../examples/DocsExamples/Networking/IO/EchoConnection.cs?range=7-28)] diff --git a/docs/articles/networking/multi-node-test-kit.md b/docs/articles/networking/multi-node-test-kit.md index 200611b27a9..9c6b4278263 100644 --- a/docs/articles/networking/multi-node-test-kit.md +++ b/docs/articles/networking/multi-node-test-kit.md @@ -99,6 +99,7 @@ A multi-node spec gives us the ability to do the following: 4. Create barriers that are used to synchronize nodes at specific points within a test; and 5. Test assertions across one or more nodes. +> [!NOTE] > Everything that's available in the default `Akka.TestKit` is also available inside the `Akka.Remote.TestKit`, but it's worth bearing in mind that `Akka.Remote.TestKit` only works with the `Akka.MultiNodeTestRunner` and uses Xunit 2.0 internally. #### Step 1 - Subclass `MultiNodeConfig` diff --git a/docs/articles/persistence/event-sourcing.md b/docs/articles/persistence/event-sourcing.md index 4737234aa32..d06492fcd39 100644 --- a/docs/articles/persistence/event-sourcing.md +++ b/docs/articles/persistence/event-sourcing.md @@ -174,7 +174,7 @@ You can also call `DeferAsync` with `Persist`. It is possible to call `Persist` and `PersistAsync` inside their respective callback blocks and they will properly retain both the thread safety (including the right value of `Sender`) as well as stashing guarantees. -In general it is encouraged to create command handlers which do not need to resort to nested event persisting, however there are situations where it may be useful. It is important to understand the ordering of callback execution in those situations, as well as their implication on the stashing behaviour (that persist enforces). In the following example two persist calls are issued, and each of them issues another persist inside its callback: +In general it is encouraged to create command handlers which do not need to resort to nested event persisting, however there are situations where it may be useful. It is important to understand the ordering of callback execution in those situations, as well as their implication on the stashing behavior (that persist enforces). In the following example two persist calls are issued, and each of them issues another persist inside its callback: [!code-csharp[Main](../../examples/DocsExamples/Persistence/PersistentActor/NestedPersists.cs?range=8-36)] @@ -269,7 +269,7 @@ This can be dangerous when used with `UntypedPersistentActor` due to the fact th > [!WARNING] > Consider using explicit shut-down messages instead of `PoisonPill` when working with persistent actors. -The example below highlights how messages arrive in the Actor's mailbox and how they interact with its internal stashing mechanism when `Persist()` is used. Notice the early stop behaviour that occurs when `PoisonPill` is used: +The example below highlights how messages arrive in the Actor's mailbox and how they interact with its internal stashing mechanism when `Persist()` is used. Notice the early stop behavior that occurs when `PoisonPill` is used: [!code-csharp[Main](../../examples/DocsExamples/Persistence/PersistentActor/AvoidPoisonPill.cs?range=9-35)] diff --git a/docs/articles/persistence/persistence-query.md b/docs/articles/persistence/persistence-query.md index b59fb67c745..f181d7e6127 100644 --- a/docs/articles/persistence/persistence-query.md +++ b/docs/articles/persistence/persistence-query.md @@ -7,9 +7,6 @@ Akka persistence query complements Persistence by providing a universal asynchro The most typical use case of persistence query is implementing the so-called query side (also known as "read side") in the popular CQRS architecture pattern - in which the writing side of the application (e.g. implemented using akka persistence) is completely separated from the "query side". Akka Persistence Query itself is not directly the query side of an application, however it can help to migrate data from the write side to the query side database. In very simple scenarios Persistence Query may be powerful enough to fulfill the query needs of your app, however we highly recommend (in the spirit of CQRS) of splitting up the write/read sides into separate datastores as the need arises. -> [!WARNING] -> This module is marked as `experimental` as of its introduction in Akka.Net 1.1.0. We will continue to improve this API based on our users’ feedback, which implies that while we try to keep incompatible changes to a minimum the binary compatibility guarantee for maintenance releases does not apply to the contents of the Akka.Persistence.Query package. - ## Design overview Akka Persistence Query is purposely designed to be a very loosely specified API. This is in order to keep the provided APIs general enough for each journal implementation to be able to expose its best features, e.g. a SQL journal can use complex SQL queries or if a journal is able to subscribe to a live event stream this should also be possible to expose the same API - a typed stream of events. diff --git a/docs/articles/persistence/persistent-fsm.md b/docs/articles/persistence/persistent-fsm.md index 128255668e7..8d809bcad31 100644 --- a/docs/articles/persistence/persistent-fsm.md +++ b/docs/articles/persistence/persistent-fsm.md @@ -42,7 +42,7 @@ Here is how everything is wired together: [!code-csharp[WebStoreCustomerFSMActor.cs](../../examples/DocsExamples/Persistence/WebStoreCustomerFSMActor.cs?name=persistent-fsm-apply-event)] `AndThen` can be used to define actions which will be executed following event’s persistence - convenient for "side effects" like sending a message or logging. Notice that actions defined in andThen block are not executed on recovery: -```C# +```cs GoTo(Paid.Instance).Applying(OrderExecuted.Instance).AndThen(cart => { if (cart is NonEmptyShoppingCart nonShoppingCart) @@ -52,7 +52,7 @@ GoTo(Paid.Instance).Applying(OrderExecuted.Instance).AndThen(cart => }); ``` A snapshot of state data can be persisted by calling the `SaveStateSnapshot()` method: -```C# +```cs Stop().Applying(OrderDiscarded.Instance).AndThen(cart => { reportActor.Tell(ShoppingCardDiscarded.Instance); diff --git a/docs/articles/remoting/messaging.md b/docs/articles/remoting/messaging.md index 53ee37da865..c788f3d2675 100644 --- a/docs/articles/remoting/messaging.md +++ b/docs/articles/remoting/messaging.md @@ -67,7 +67,7 @@ What's really going on there? The `Sender`, an `IActorRef`, is actually an `Akka.Remote.RemoteActorRef`! But the fact that this actor reference resides elsewhere on the network is a detail that's transparent to the actor code you wrote! -In essence, minus the initial `ActorSelection` used to start remote communication between the two `ActorSystem`s, any actor in either `ActorSystem` could reply to eachother without knowing or caring that they exist elsewhere on the network. That's pretty cool! +In essence, minus the initial `ActorSelection` used to start remote communication between the two `ActorSystem`s, any actor in either `ActorSystem` could reply to each other without knowing or caring that they exist elsewhere on the network. That's pretty cool! ## `RemoteActorRef` and Location Transparency What `RemoteActorRef` gives us is a magical property called [Location Transparency](/concepts/location-transparency.md). @@ -78,7 +78,7 @@ It's the job of the `RemoteActorRef` to make a remote actor running on in a diff Regardless of where the actor actually resides, it doesn't affect your code one way or another. -**So this has one profound implicat on your Akka.NET applications - all of your actor code *is already able to run on the network by default*. ** +**So this has one profound implication on your Akka.NET applications - all of your actor code** ***is already able to run on the network by default.*** Therefore, many of the code samples in *Akka.NET Remoting* won't look very "networky." That's on purpose. That's Akka.NET taking care of the heavy lifting for us! diff --git a/docs/articles/remoting/transports.md b/docs/articles/remoting/transports.md index ddbeb32b5ae..ac82271f4ba 100644 --- a/docs/articles/remoting/transports.md +++ b/docs/articles/remoting/transports.md @@ -6,7 +6,7 @@ title: Transports # Akka.Remote Transports In the [Akka.Remote overview](index.md) we introduced the concept of "transports" for Akka.Remote. -> A"transport" refers to an actual network transport, such as TCP or UDP. By default Akka.Remote uses a [DotNetty](https://github.com/Azure/DotNetty) TCP transport, but you could write your own transport and use that instead of you wish. +A "transport" refers to an actual network transport, such as TCP or UDP. By default Akka.Remote uses a [DotNetty](https://github.com/Azure/DotNetty) TCP transport, but you could write your own transport and use that instead of you wish. In this section we'll expand a bit more on what transports are and how Akka.Remote can support multiple transports simultaneously. @@ -14,12 +14,12 @@ In this section we'll expand a bit more on what transports are and how Akka.Remo Transports in Akka.Remote are abstractions on top of actual network transports, such as TCP and UDP sockets, and in truth transports have pretty simple requirements. > [!NOTE] -> most of the information below are things you, as an Akka.NET user, do not need to care about 99% of the time. Feel free to skip to the "Akka.Remote's Built-in Transports" section. +> most of the information below are things you, as an Akka.NET user, do not need to care about 99% of the time. Feel free to skip to the [Akka.Remote's Built-in Transports](#akkaremotes-built-in-transports) section. Transports **do not need to care** about: * **Serialization** - that's handled by Akka.NET itself; -* **Connection-oriented behavior** - the assocation process inside Akka.Remote ensures this, even over connectionless transports like UDP; -* **Reliable delivery** - for system messages this is handled by Akka.Remote and for user-defined messages this is taken care of at the application level through something like the [`AtLeastOnceDeliveryActor` class](xref:at-least-once-delivery), part of Akka.Persistence; +* **Connection-oriented behavior** - the association process inside Akka.Remote ensures this, even over connectionless transports like UDP; +* **Reliable delivery** - for system messages this is handled by Akka.Remote and for user-defined messages this is taken care of at the application level through something like the [`AtLeastOnceDeliveryActor`](xref:at-least-once-delivery) class, part of Akka.Persistence; * **Handling network failures** - all a transport needs to do is forward that information back up to Akka.Remote. Transports **do need to care** about: @@ -79,7 +79,7 @@ You'd define a custom HOCON section (`akka.remote.google-quic`) and let Akka.Rem > [!NOTE] > To implement a custom transport yourself, you need to implement the [`Akka.Remote.Transport.Transport` abstract class](xref:Akka.Remote.Transport.Transport). -One important thing to note is the `akka.remote.google-quic.transport-protocol` setting - this specifices the address scheme you will use to address remote actors via the Quic protocol. +One important thing to note is the `akka.remote.google-quic.transport-protocol` setting - this specifies the address scheme you will use to address remote actors via the Quic protocol. A remote address for an actor on this transport will look like: diff --git a/docs/articles/streams/basics.md b/docs/articles/streams/basics.md index 84b396416f8..0b2372e7596 100644 --- a/docs/articles/streams/basics.md +++ b/docs/articles/streams/basics.md @@ -211,7 +211,7 @@ specification, which Akka is a founding member of. The user of the library does not have to write any explicit back-pressure handling code — it is built in and dealt with automatically by all of the provided Akka Streams processing stages. It is possible however to add -explicit buffer stages with overflow strategies that can influence the behaviour of the stream. This is especially important +explicit buffer stages with overflow strategies that can influence the behavior of the stream. This is especially important in complex processing graphs which may even contain loops (which *must* be treated with very special care, as explained in [Graph cycles, liveness and deadlocks](xref:streams-working-with-graphs#graph-cycles-liveness-and-deadlocks)). diff --git a/docs/articles/streams/builtinstages.md b/docs/articles/streams/builtinstages.md index 35289f2b736..58872fdc4f5 100644 --- a/docs/articles/streams/builtinstages.md +++ b/docs/articles/streams/builtinstages.md @@ -365,7 +365,7 @@ to provide back pressure onto the sink. **cancels** when the actor terminates -**backpressures** when the actor acknowledgement has not arrived. +**backpressures** when the actor acknowledgment has not arrived. #### PreMaterialize @@ -680,7 +680,7 @@ Throwing an exception inside `RecoverWith` _will_ be logged on ERROR level autom **emits** the element is available from the upstream or upstream is failed and pf returns alternative Source -**backpressures** downstream backpressures, after failure happened it backprssures to alternative Source +**backpressures** downstream backpressures, after failure happened it backpressures to alternative Source **completes** upstream completes or upstream failed with exception pf can handle @@ -709,7 +709,7 @@ would log the `e2` error. Since the underlying failure signal OnError arrives out-of-band, it might jump over existing elements. This stage can recover the failure signal, but not the skipped elements, which will be dropped. -Similarily to `Recover` throwing an exception inside `SelectError` _will_ be logged on ERROR level automatically. +Similarly to `Recover` throwing an exception inside `SelectError` _will_ be logged on ERROR level automatically. **emits** when element is available from the upstream or upstream is failed and function returns an element diff --git a/docs/articles/streams/cookbook.md b/docs/articles/streams/cookbook.md index 7970dfb0605..08740eb8cee 100644 --- a/docs/articles/streams/cookbook.md +++ b/docs/articles/streams/cookbook.md @@ -376,7 +376,7 @@ a special ``Sum`` operation that collapses multiple upstream elements into one a the speed of the upstream unaffected by the downstream. When the upstream is faster, the sum process of the ``Conflate`` starts. Our reducer function simply takes -the freshest element. This cin a simple dropping operation. +the freshest element. This is shown as a simple dropping operation. ```csharp var droppyStream = Flow.Create().Conflate((lastMessage, newMessage) => newMessage); diff --git a/docs/articles/streams/custom-stream-processing.md b/docs/articles/streams/custom-stream-processing.md index 219aa952703..52c015a2d6d 100644 --- a/docs/articles/streams/custom-stream-processing.md +++ b/docs/articles/streams/custom-stream-processing.md @@ -4,7 +4,7 @@ title: Custom stream processing --- # Custom stream processing -While the processing vocabulary of Akka Streams is quite rich (see the [Streams Cookbook](xref:streams-cookbook) for examples) it is sometimes necessary to define new transformation stages either because some functionality is missing from the stock operations, or for preformance reasons. In this part we show how to build custom processing stages and graph junctions of various kinds. +While the processing vocabulary of Akka Streams is quite rich (see the [Streams Cookbook](xref:streams-cookbook) for examples) it is sometimes necessary to define new transformation stages either because some functionality is missing from the stock operations, or for performance reasons. In this part we show how to build custom processing stages and graph junctions of various kinds. > [!NOTE] > A custom graph stage should not be the first tool you reach for, defining graphs using flows and the graph DSL is in general easier and does to a larger extent protect you from mistakes that might be easy to make with a custom `GraphStage` @@ -69,7 +69,7 @@ class NumbersSource : GraphStage> // Define the (sole) output port of this stage public Outlet Out { get; } = new Outlet("NumbersSource"); - // Define the shape of this tage, which is SourceShape with the port we defined above + // Define the shape of this stage, which is SourceShape with the port we defined above public override SourceShape Shape => new SourceShape(Out); //this is where the actual logic will be created @@ -121,7 +121,7 @@ The following operations are available for *input* ports: * `Grab(in)` acquires the element that has been received during an `onPush` It cannot be called again until the port is pushed again by the upstream. * `Cancel(in)` closes the input port. -The events corresponding to an *input* port can be received in an `Action` registrered to the input port using `setHandler(in, action)`. This handler has three callbacks: +The events corresponding to an *input* port can be received in an `Action` registered to the input port using `setHandler(in, action)`. This handler has three callbacks: * `onPush` is called when the output port has now a new element. Now it is possible to acquire this element using `Grab(in)` and/or call `Pull(in)` on the port to request the next element. It is not mandatory to grab the element, but if it is pulled while the element has not been grabbed it will drop the buffered element. @@ -362,9 +362,9 @@ If we attempt to draw the sequence of events, it shows that there is one "event ### Completion -Completion handling usually (but not exclusively) comes into the picture when processing stages need to emit a few more elements after their upstream source has been completed. We have seen an example of this in our first `Duplicator` implementation where the last element needs to be doubled even after the upstream neighbour stage has been completed. This can be done by overriding the `onUpstreamFinish` callback in `SetHandler(in, action)`. +Completion handling usually (but not exclusively) comes into the picture when processing stages need to emit a few more elements after their upstream source has been completed. We have seen an example of this in our first `Duplicator` implementation where the last element needs to be doubled even after the upstream neighbor stage has been completed. This can be done by overriding the `onUpstreamFinish` callback in `SetHandler(in, action)`. -Stages by default automatically stop once all of their ports (input and output) have been closed externally or internally. It is possible to opt out from this behavior by invoking `SetKeepGoing(true)` (which is not supported from the stage’s constructor and usually done in `PreStart`). In this case the stage **must** be explicitly closed by calling `CompleteStage()` or `FailStage(exception)`. This feature carries the risk of leaking streams and actors, therefore it should be used with care. +Stages by default automatically stop once all of their ports (input and output) have been closed externally or internally. It is possible to opt out from this behavior by invoking `SetKeepGoing(true)` (which is not supported from the stage's constructor and usually done in `PreStart`). In this case the stage **must** be explicitly closed by calling `CompleteStage()` or `FailStage(exception)`. This feature carries the risk of leaking streams and actors, therefore it should be used with care. ### Logging inside GraphStages diff --git a/docs/articles/streams/reactivetweets.md b/docs/articles/streams/reactivetweets.md index a323eb39db3..000330986fd 100644 --- a/docs/articles/streams/reactivetweets.md +++ b/docs/articles/streams/reactivetweets.md @@ -172,7 +172,7 @@ elements*" this can be expressed using the ``Buffer`` element: ```csharp tweetSource .Buffer(10, OverflowStrategy.DropHead) - .Selet(SlowComputation) + .Select(SlowComputation) .RunWith(Sink.Ignore(), mat); ``` diff --git a/docs/articles/streams/stream-dynamic.md b/docs/articles/streams/stream-dynamic.md index 47f92c33d2c..5b0afa6f08c 100644 --- a/docs/articles/streams/stream-dynamic.md +++ b/docs/articles/streams/stream-dynamic.md @@ -66,6 +66,12 @@ by the switch. Refer to the below for usage examples. > [!NOTE] > A `UniqueKillSwitch` is always a result of a materialization, whilst `SharedKillSwitch` needs to be constructed before any materialization takes place. +### Using `CancellationToken`s as kill switches + +Plain old .NET cancellation tokens can also be used as kill switch stages via extension method: `cancellationToken.AsFlow(cancelGracefully: true)`. Their behavior is very similar to what a `SharedKillSwitch` has to offer with one exception - while normal kill switch recognizes difference between closing a stream gracefully (via. `Shutdown()`) and abruptly (via. `Abort(exception)`), .NET cancellation tokens have no such distinction. + +Therefore you need to explicitly specify at the moment of defining a flow stage, if cancellation token call should cause stream to close with completion or failure, by using `cancelGracefully` parameter. If it's set to `false`, calling cancel on a token's source will cause stream to fail with an `OperationCanceledException`. + ## Dynamic fan-in and fan-out with MergeHub and BroadcastHub There are many cases when consumers or producers of a certain service (represented as a Sink, Source, or possibly Flow) are dynamic and not known in advance. The Graph DSL does not allow to represent this, all connections of the graph must be known in advance and must be connected upfront. To allow dynamic fan-in and fan-out streaming, the Hubs should be used. They provide means to construct Sink and Source pairs that are “attached” to each other, but one of them can be materialized multiple times to implement dynamic fan-in or fan-out. diff --git a/docs/articles/streams/workingwithgraphs.md b/docs/articles/streams/workingwithgraphs.md index c6a01cdb7de..5ac388ffb35 100644 --- a/docs/articles/streams/workingwithgraphs.md +++ b/docs/articles/streams/workingwithgraphs.md @@ -260,7 +260,7 @@ It is possible to build reusable, encapsulated components of arbitrary input and As an example, we will build a graph junction that represents a pool of workers, where a worker is expressed as a ``Flow``, i.e. a simple transformation of jobs of type ``I`` to results of type ``O`` (as you have seen already, this flow can actually contain a complex graph inside). Our reusable worker pool junction will -not preserve the order of the incoming jobs (they are assumed to have a proper ID field) and it will use a ``Balance`` junction to schedule jobs to available workers. On top of this, our junction will feature a "fastlane", a dedicated port where jobs of higher priority can be sent. +not preserve the order of the incoming jobs (they are assumed to have a proper ID field) and it will use a ``Balance`` junction to schedule jobs to available workers. On top of this, our junction will feature a "fast lane", a dedicated port where jobs of higher priority can be sent. Altogether, our junction will have two input ports of type ``I`` (for the normal and priority jobs) and an output port of type ``O``. To represent this interface, we need to define a custom `Shape`. The following lines show how to do that. diff --git a/docs/articles/utilities/scheduler.md b/docs/articles/utilities/scheduler.md index a36e44633a0..7a01ac854bb 100644 --- a/docs/articles/utilities/scheduler.md +++ b/docs/articles/utilities/scheduler.md @@ -14,7 +14,7 @@ You can schedule sending of messages to actors and execution of tasks (`Action` delegate). You will get a `Task` back. By providing a `CancellationTokenSource` you can cancel the scheduled task. -The scheduler in Akka is designed for high-throughput of thousands up to millions of triggers. THe prime use-case being triggering Actor receive timeouts, Future timeouts, circuit breakers and other time dependent events which happen all-the-time and in many instances at the same time. The implementation is based on a Hashed Wheel Timer, which is a known datastrucure and algorithm for handling such use cases, refer to the [Hashed and Hierarchical Timing Wheels](http://www.cs.columbia.edu/~nahum/w6998/papers/sosp87-timing-wheels.pdf) whitepaper by Varghese and Lauck if you'd like to understand its inner workings. +The scheduler in Akka is designed for high-throughput of thousands up to millions of triggers. The prime use-case being triggering Actor receive timeouts, Future timeouts, circuit breakers and other time dependent events which happen all-the-time and in many instances at the same time. The implementation is based on a Hashed Wheel Timer, which is a known data structure and algorithm for handling such use cases, refer to the [Hashed and Hierarchical Timing Wheels](http://www.cs.columbia.edu/~nahum/w6998/papers/sosp87-timing-wheels.pdf) whitepaper by Varghese and Lauck if you'd like to understand its inner workings. The Akka scheduler is **not** designed for long-term scheduling (see [Akka.Quartz.Actor](https://github.com/akkadotnet/Akka.Quartz.Actor) instead for this use-case) nor is it to be used for highly precise firing of the events. The maximum amount of time into the future you can schedule an event to trigger is around 8 months, which in practice is too much to be useful since this would assume the system never went down during that period. If you need long-term scheduling we highly recommend looking into alternative schedulers, as this is not the use-case the Akka scheduler is implemented for. @@ -56,7 +56,7 @@ Context.System.Scheduler.ScheduleTellRepeatedly(....); ## The scheduler interface The actual scheduler implementation is defined by config and loaded upon ActorSystem start-up, which means that it is possible to provide a different one using the `akka.scheduler.implementation` configuration property. The referenced class must implement the `Akka.Actor.IScheduler` and `Akka.Actor.IAdvancedScheduler` interfaces -##The cancellable interface +## The cancellable interface Scheduling a task will result in a `ICancellable` or (or throw an `Exception` if attempted after the scheduler's shutdown). This allows you to cancel something that has been scheduled for execution. diff --git a/docs/articles/utilities/serilog.md b/docs/articles/utilities/serilog.md index 61d86c2bc97..bdbeb520659 100644 --- a/docs/articles/utilities/serilog.md +++ b/docs/articles/utilities/serilog.md @@ -15,7 +15,7 @@ PM> Install-Package Akka.Logger.Serilog ## Example -The following example uses Serilogs __Colored Console__ sink available via nuget, there are wide range of other sinks available depending on your needs, for example a rolling log file sink. See serilogs documentation for details on these. +The following example uses Serilog's __Colored Console__ sink available via nuget, there are wide range of other sinks available depending on your needs, for example a rolling log file sink. See serilog's documentation for details on these. ``` PM> Install-Package Serilog.Sinks.ColoredConsole @@ -101,7 +101,7 @@ In order to be able to change log level without the need to recompile, we need t ``` -The code can then be updated as follows removing the inline HOCON from the actor system creation code. Note in the following example, if a minimum level is not specfied, Information level events and higher will be processed. Please read the documentation for [Serilog](https://serilog.net/) configuration for more details on this. It is also possible to move serilog configuration to the application configuration, for example if using a rolling log file sink, again, browsing the serilog documentation is the best place for details on that feature. +The code can then be updated as follows removing the inline HOCON from the actor system creation code. Note in the following example, if a minimum level is not specified, Information level events and higher will be processed. Please read the documentation for [Serilog](https://serilog.net/) configuration for more details on this. It is also possible to move serilog configuration to the application configuration, for example if using a rolling log file sink, again, browsing the serilog documentation is the best place for details on that feature. ```csharp var logger = new LoggerConfiguration() @@ -113,6 +113,3 @@ Serilog.Log.Logger = logger; var system = ActorSystem.Create("my-test-system"); ``` - - - diff --git a/docs/examples/DocsExamples/DocsExamples.csproj b/docs/examples/DocsExamples/DocsExamples.csproj index 02eff16bc06..49f4c2f886b 100644 --- a/docs/examples/DocsExamples/DocsExamples.csproj +++ b/docs/examples/DocsExamples/DocsExamples.csproj @@ -1,5 +1,5 @@  - + Exe net461 @@ -18,8 +18,8 @@ - - + + \ No newline at end of file diff --git a/docs/examples/Tutorials/Tutorial2/DeviceInProgress.cs b/docs/examples/Tutorials/Tutorial2/DeviceInProgress.cs index 4500f909724..c65b04edfd1 100644 --- a/docs/examples/Tutorials/Tutorial2/DeviceInProgress.cs +++ b/docs/examples/Tutorials/Tutorial2/DeviceInProgress.cs @@ -58,7 +58,7 @@ public RespondTemperature(long requestId, double? value) #region device-with-read public class Device : UntypedActor { - private readonly double? _lastTemperatureReading = null; + private double? _lastTemperatureReading = null; public Device(string groupId, string deviceId) { diff --git a/src/common.props b/src/common.props index dc755eb08b6..a8384606649 100644 --- a/src/common.props +++ b/src/common.props @@ -2,21 +2,85 @@ Copyright © 2013-2018 Akka.NET Team Akka.NET Team - 1.3.8 + 1.3.9 http://getakka.net/images/akkalogo.png https://github.com/akkadotnet/akka.net https://github.com/akkadotnet/akka.net/blob/master/LICENSE $(NoWarn);CS1591 - 2.3.0 - 15.3.0 + 2.3.1 + 15.7.2 akka;actors;actor model;Akka;concurrency true - Placeholder* + Maintenance Release for Akka.NET 1.3** +Akka.NET v1.3.9 features some major changes to Akka.Cluster.Sharding, additional Akka.Streams stages, and some general bug fixes across the board. +Akka.Cluster.Sharding Improvements** +The [Akka.Cluster.Sharding documentation](http://getakka.net/articles/clustering/cluster-sharding.html#quickstart) already describes some of the major changes in Akka.NET v1.3.9, but we figured it would be worth calling special attention to those changes here. +Props Factory for Entity Actors** +> In some cases, the actor may need to know the `entityId` associated with it. This can be achieved using the `entityPropsFactory` parameter to `ClusterSharding.Start` or `ClusterSharding.StartAsync`. The entity ID will be passed to the factory as a parameter, which can then be used in the creation of the actor. +In addition to the existing APIs we've always had for defining sharded entities via `Props`, Akka.NET v1.3.9 introduces [a new method overload for `Start`](http://getakka.net/api/Akka.Cluster.Sharding.ClusterSharding.html#Akka_Cluster_Sharding_ClusterSharding_Start_System_String_System_Func_System_String_Akka_Actor_Props__Akka_Cluster_Sharding_ClusterShardingSettings_Akka_Cluster_Sharding_ExtractEntityId_Akka_Cluster_Sharding_ExtractShardId_) and [`StartAsync`](http://getakka.net/api/Akka.Cluster.Sharding.ClusterSharding.html#Akka_Cluster_Sharding_ClusterSharding_StartAsync_System_String_System_Func_System_String_Akka_Actor_Props__Akka_Cluster_Sharding_ClusterShardingSettings_Akka_Cluster_Sharding_ExtractEntityId_Akka_Cluster_Sharding_ExtractShardId_) which allows users to pass in the `entityId` of each entity actor as a constructor argument to those entities when they start. +For example: +``` +var anotherCounterShard = ClusterSharding.Get(Sys).Start( + typeName: "AnotherCounter", + entityProps: Props.Create<AnotherCounter>(), + typeName: AnotherCounter.ShardingTypeName, + entityPropsFactory: entityId => AnotherCounter.Props(entityId), + settings: ClusterShardingSettings.Create(Sys), + extractEntityId: Counter.ExtractEntityId, + extractShardId: Counter.ExtractShardId); +``` +This will give you the opportunity to pass in the `entityId` for each actor as a constructor argument into the `Props` of your entity actor and possibly other use cases too. +Improvements to Starting and Querying Existing Shard Entity Types** +Two additional major usability improvements to Cluster.Sharding come from some API additions and changes. +The first is that it's now possible to look up all of the currently registered shard types via the [`ClusterSharding.ShardTypeNames` property](http://getakka.net/api/Akka.Cluster.Sharding.ClusterSharding.html#Akka_Cluster_Sharding_ClusterSharding_ShardTypeNames). So long as a `ShardRegion` of that type has been started in the cluster, that entity type name will be added to the collection exposed by this property. +The other major usability improvement is a change to the `ClusterSharding.Start` property itself. Historically, you used to have to know whether or not the node you wanted to use sharding on was going to be hosting shards (call `ClusterSharding.Start`) or simply communicated with shards hosted on a different cluster role type (call `ClusterSharding.StartProxy`). Going forward, it's safe to call `ClusterSharding.Start` on any node and you will either receive an `IActorRef` to active `ShardRegion` or a `ShardRegion` running in "proxy only" mode; this is determined by looking at the `ClusterShardingSettings` and determining if the current node is in a role that is allowed to host shards of this type. +[Akka.Cluster.Sharding: Sharding API Updates](https://github.com/akkadotnet/akka.net/pull/3524) +[Akka.Cluster.Sharding: sharding rebalance fix](https://github.com/akkadotnet/akka.net/pull/3518) +[Akka.Cluster.Sharding: log formatting fix](https://github.com/akkadotnet/akka.net/pull/3554) +[Akka.Cluster.Sharding: `RestartShard` escapes into userspace](https://github.com/akkadotnet/akka.net/pull/3509) +Akka.Streams Additions and Changes** +In Akka.NET v1.3.9 we've added some new built-in stream stages and API methods designed to help improve developer productivity and ease of use. +[Akka.Streams: add CombineMaterialized method to Source](https://github.com/akkadotnet/akka.net/pull/3489) +[Akka.Streams: +KillSwitches: flow stage from CancellationToken](https://github.com/akkadotnet/akka.net/pull/3568) +[Akka.Streams: Port KeepAliveConcat and UnfoldFlow](https://github.com/akkadotnet/akka.net/pull/3560) +[Akka.Streams: Port PagedSource & IntervalBasedRateLimiter](https://github.com/akkadotnet/akka.net/pull/3570) +Other Updates, Additions, and Bugfixes** +[Akka.Cluster: cluster coordinated leave fix for empty cluster](https://github.com/akkadotnet/akka.net/pull/3516) +[Akka.Cluster.Tools: bumped ClusterClient message drop log messages from DEBUG to WARNING](https://github.com/akkadotnet/akka.net/pull/3513) +[Akka.Cluster.Tools: Singleton - confirm TakeOverFromMe when singleton already in oldest state](https://github.com/akkadotnet/akka.net/pull/3553) +[Akka.Remote: RemoteWatcher race-condition fix](https://github.com/akkadotnet/akka.net/pull/3519) +[Akka: fix concurrency bug in CircuitBreaker](https://github.com/akkadotnet/akka.net/pull/3505) +[Akka: Fixed ReceiveTimeout not triggered in some case when combined with NotInfluenceReceiveTimeout messages](https://github.com/akkadotnet/akka.net/pull/3555) +[Akka.Persistence: Optimized recovery](https://github.com/akkadotnet/akka.net/pull/3549) +[Akka.Persistence: Allow persisting events when recovery has completed](https://github.com/akkadotnet/akka.net/pull/3366) +To [see the full set of changes for Akka.NET v1.3.9, click here](https://github.com/akkadotnet/akka.net/milestone/27). +| COMMITS | LOC+ | LOC- | AUTHOR | +| --- | --- | --- | --- | +| 28 | 2448 | 5691 | Aaron Stannard | +| 11 | 1373 | 230 | zbynek001 | +| 8 | 4590 | 577 | Bartosz Sypytkowski | +| 4 | 438 | 99 | Ismael Hamed | +| 4 | 230 | 240 | Sean Gilliam | +| 2 | 1438 | 0 | Oleksandr Bogomaz | +| 1 | 86 | 79 | Nick Polideropoulos | +| 1 | 78 | 0 | v1rusw0rm | +| 1 | 4 | 4 | Joshua Garnett | +| 1 | 32 | 17 | Jarl Sveinung Flø Rasmussen | +| 1 | 27 | 1 | Sam13 | +| 1 | 250 | 220 | Maxim Cherednik | +| 1 | 184 | 124 | Josh Taylor | +| 1 | 14 | 0 | Peter Shrosbree | +| 1 | 1278 | 42 | Marc Piechura | +| 1 | 1 | 1 | Vasily Kirichenko | +| 1 | 1 | 1 | Samuel Kelemen | +| 1 | 1 | 1 | Nyola Mike | +| 1 | 1 | 1 | Fábio Beirão | \ No newline at end of file diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingFailureSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingFailureSpec.cs index c08c3bd3faa..9e85c5ef50f 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingFailureSpec.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingFailureSpec.cs @@ -41,7 +41,7 @@ protected ClusterShardingFailureSpecConfig(string mode) serialization-bindings {{ ""System.Object"" = hyperion }} - }} + }} akka.loglevel = INFO akka.actor.provider = cluster akka.remote.log-remote-lifecycle-events = off @@ -176,7 +176,7 @@ public Entity() private readonly ClusterShardingFailureSpecConfig _config; private readonly List _storageLocations; - + protected ClusterShardingFailureSpec(ClusterShardingFailureSpecConfig config, Type type) : base(config, type) { @@ -194,7 +194,7 @@ protected ClusterShardingFailureSpec(ClusterShardingFailureSpecConfig config, Ty } protected bool IsDDataMode { get; } - + protected override void AfterTermination() { base.AfterTermination(); @@ -364,6 +364,13 @@ public void ClusterSharding_with_flaky_journal_network_should_recover_after_jour //Test the Shard passivate works during a journal failure shard2.Tell(new Passivate(PoisonPill.Instance), entity21); + + AwaitAssert(() => + { + region.Tell(new Get("21")); + ExpectMsg(v => v.Id == "21" && v.N == 0, hint: "Passivating did not reset Value down to 0"); + }); + region.Tell(new Add("21", 1)); region.Tell(new Get("21")); diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingSpec.cs index d822a77e65a..4003b139acf 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingSpec.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests.MultiNode/ClusterShardingSpec.cs @@ -220,9 +220,15 @@ private Stop() public const int NumberOfShards = 12; private int _count = 0; + private readonly string id; - public Counter() + public static Props Props(string id) => Actor.Props.Create(() => new Counter(id)); + + public static string ShardingTypeName => "Counter"; + + public Counter(string id) { + this.id = id; Context.SetReceiveTimeout(TimeSpan.FromMinutes(2)); } @@ -233,7 +239,7 @@ protected override void PostStop() Thread.Sleep(500); } - public override string PersistenceId { get { return "Counter-" + Self.Path.Name; } } + public override string PersistenceId { get { return $"Counter.{ShardingTypeName}-{id}"; } } protected override bool ReceiveRecover(object message) { @@ -277,16 +283,17 @@ private void UpdateState(CounterChanged e) internal class QualifiedCounter : Counter { - public static Props Props(string typeName) + public static Props Props(string typeName, string id) { - return Actor.Props.Create(() => new QualifiedCounter(typeName)); + return Actor.Props.Create(() => new QualifiedCounter(typeName, id)); } public readonly string TypeName; public override string PersistenceId { get { return TypeName + "-" + Self.Path.Name; } } - public QualifiedCounter(string typeName) + public QualifiedCounter(string typeName, string id) + : base(id) { TypeName = typeName; } @@ -294,19 +301,34 @@ public QualifiedCounter(string typeName) internal class AnotherCounter : QualifiedCounter { - public AnotherCounter() - : base("AnotherCounter") + public static new Props Props(string id) + { + return Actor.Props.Create(() => new AnotherCounter(id)); + } + public static new string ShardingTypeName => nameof(AnotherCounter); + + public AnotherCounter(string id) + : base(AnotherCounter.ShardingTypeName, id) { } } internal class CounterSupervisor : ActorBase { - public readonly IActorRef Counter; + public static string ShardingTypeName => nameof(CounterSupervisor); - public CounterSupervisor() + public static Props Props(string id) { - Counter = Context.ActorOf(Props.Create(), "theCounter"); + return Actor.Props.Create(() => new CounterSupervisor(id)); + } + + public readonly string entityId; + public readonly IActorRef counter; + + public CounterSupervisor(string entityId) + { + this.entityId = entityId; + counter = Context.ActorOf(Counter.Props(entityId), "theCounter"); } protected override SupervisorStrategy SupervisorStrategy() @@ -328,7 +350,7 @@ protected override SupervisorStrategy SupervisorStrategy() protected override bool Receive(object message) { - Counter.Forward(message); + counter.Forward(message); return true; } } @@ -355,6 +377,9 @@ protected DDataClusterShardingWithEntityRecoverySpec(DDataClusterShardingWithEnt } public abstract class ClusterShardingSpec : MultiNodeClusterSpec { + // must use different unique name for some tests than the one used in API tests + public static string TestCounterShardingTypeName => $"Test{Counter.ShardingTypeName}"; + #region Setup private readonly Lazy _region; @@ -373,7 +398,7 @@ protected ClusterShardingSpec(ClusterShardingSpecConfig config, Type type) { _config = config; - _region = new Lazy(() => CreateRegion("counter", false)); + _region = new Lazy(() => CreateRegion(TestCounterShardingTypeName, false)); _rebalancingRegion = new Lazy(() => CreateRegion("rebalancingCounter", false)); _persistentEntitiesRegion = new Lazy(() => CreateRegion("RememberCounterEntities", true)); @@ -397,7 +422,7 @@ protected ClusterShardingSpec(ClusterShardingSpecConfig config, Type type) EnterBarrier("startup"); } protected bool IsDDataMode { get; } - + protected override void AfterTermination() { base.AfterTermination(); @@ -430,7 +455,7 @@ private void CreateCoordinator() { var typeNames = new[] { - "counter", "rebalancingCounter", "RememberCounterEntities", "AnotherRememberCounter", + TestCounterShardingTypeName, "rebalancingCounter", "RememberCounterEntities", "AnotherRememberCounter", "RememberCounter", "RebalancingRememberCounter", "AutoMigrateRememberRegionTest" }; @@ -479,7 +504,7 @@ private IActorRef CreateRegion(string typeName, bool rememberEntities) return Sys.ActorOf(Props.Create(() => new ShardRegion( typeName, - QualifiedCounter.Props(typeName), + entityId => QualifiedCounter.Props(typeName, entityId), settings, "/user/" + typeName + "Coordinator/singleton/coordinator", Counter.ExtractEntityId, @@ -609,7 +634,7 @@ public void ClusterSharding_should_use_second_node() r.Tell(new Counter.EntityEnvelope(2, Counter.Increment.Instance)); r.Tell(new Counter.Get(2)); ExpectMsg(3); - LastSender.Path.Should().Be(Node(_config.Second) / "user" / "counterRegion" / "2" / "2"); + LastSender.Path.Should().Be(Node(_config.Second) / "user" / $"{TestCounterShardingTypeName}Region" / "2" / "2"); r.Tell(new Counter.Get(11)); ExpectMsg(1); @@ -669,9 +694,9 @@ public void ClusterSharding_should_support_proxy_only_mode() var settings = ClusterShardingSettings.Create(cfg, Sys.Settings.Config.GetConfig("akka.cluster.singleton")); var proxy = Sys.ActorOf(ShardRegion.ProxyProps( - typeName: "counter", + typeName: TestCounterShardingTypeName, settings: settings, - coordinatorPath: "/user/counterCoordinator/singleton/coordinator", + coordinatorPath: $"/user/{TestCounterShardingTypeName}Coordinator/singleton/coordinator", extractEntityId: Counter.ExtractEntityId, extractShardId: Counter.ExtractShardId, replicator: Sys.DeadLetters, @@ -770,12 +795,12 @@ public void ClusterSharding_should_use_third_and_fourth_node() r.Tell(new Counter.EntityEnvelope(3, Counter.Increment.Instance)); r.Tell(new Counter.Get(3)); ExpectMsg(11); - LastSender.Path.Should().Be(Node(_config.Third) / "user" / "counterRegion" / "3" / "3"); + LastSender.Path.Should().Be(Node(_config.Third) / "user" / $"{TestCounterShardingTypeName}Region" / "3" / "3"); r.Tell(new Counter.EntityEnvelope(4, Counter.Increment.Instance)); r.Tell(new Counter.Get(4)); ExpectMsg(21); - LastSender.Path.Should().Be(Node(_config.Fourth) / "user" / "counterRegion" / "4" / "4"); + LastSender.Path.Should().Be(Node(_config.Fourth) / "user" / $"{TestCounterShardingTypeName}Region" / "4" / "4"); }, _config.First); EnterBarrier("first-update"); @@ -818,7 +843,7 @@ public void ClusterSharding_should_recover_coordinator_state_after_coordinator_c { _region.Value.Tell(new Counter.Get(3), probe3.Ref); probe3.ExpectMsg(11); - probe3.LastSender.Path.Should().Be(Node(_config.Third) / "user" / "counterRegion" / "3" / "3"); + probe3.LastSender.Path.Should().Be(Node(_config.Third) / "user" / $"{TestCounterShardingTypeName}Region" / "3" / "3"); }); }); @@ -829,7 +854,7 @@ public void ClusterSharding_should_recover_coordinator_state_after_coordinator_c { _region.Value.Tell(new Counter.Get(4), probe4.Ref); probe4.ExpectMsg(21); - probe4.LastSender.Path.Should().Be(Node(_config.Fourth) / "user" / "counterRegion" / "4" / "4"); + probe4.LastSender.Path.Should().Be(Node(_config.Fourth) / "user" / $"{TestCounterShardingTypeName}Region" / "4" / "4"); }); }); }, _config.Fifth); @@ -888,24 +913,24 @@ public void ClusterSharding_should_be_easy_to_use_with_extensions() { //#counter-start ClusterSharding.Get(Sys).Start( - typeName: "Counter", - entityProps: Props.Create(), + typeName: Counter.ShardingTypeName, + entityPropsFactory: entityId => Counter.Props(entityId), settings: ClusterShardingSettings.Create(Sys), extractEntityId: Counter.ExtractEntityId, extractShardId: Counter.ExtractShardId); //#counter-start ClusterSharding.Get(Sys).Start( - typeName: "AnotherCounter", - entityProps: Props.Create(), + typeName: AnotherCounter.ShardingTypeName, + entityPropsFactory: entityId => AnotherCounter.Props(entityId), settings: ClusterShardingSettings.Create(Sys), extractEntityId: Counter.ExtractEntityId, extractShardId: Counter.ExtractShardId); //#counter-supervisor-start ClusterSharding.Get(Sys).Start( - typeName: "SupervisedCounter", - entityProps: Props.Create(), + typeName: CounterSupervisor.ShardingTypeName, + entityPropsFactory: entityId => CounterSupervisor.Props(entityId), settings: ClusterShardingSettings.Create(Sys), extractEntityId: Counter.ExtractEntityId, extractShardId: Counter.ExtractShardId); @@ -915,18 +940,19 @@ public void ClusterSharding_should_be_easy_to_use_with_extensions() RunOn(() => { //#counter-usage - var counterRegion = ClusterSharding.Get(Sys).ShardRegion("Counter"); - counterRegion.Tell(new Counter.Get(123)); + var counterRegion = ClusterSharding.Get(Sys).ShardRegion(Counter.ShardingTypeName); + var entityId = 999; + counterRegion.Tell(new Counter.Get(entityId)); ExpectMsg(0); - counterRegion.Tell(new Counter.EntityEnvelope(123, Counter.Increment.Instance)); - counterRegion.Tell(new Counter.Get(123)); + counterRegion.Tell(new Counter.EntityEnvelope(entityId, Counter.Increment.Instance)); + counterRegion.Tell(new Counter.Get(entityId)); ExpectMsg(1); //#counter-usage - var anotherCounterRegion = ClusterSharding.Get(Sys).ShardRegion("AnotherCounter"); - anotherCounterRegion.Tell(new Counter.EntityEnvelope(123, Counter.Decrement.Instance)); - anotherCounterRegion.Tell(new Counter.Get(123)); + var anotherCounterRegion = ClusterSharding.Get(Sys).ShardRegion(AnotherCounter.ShardingTypeName); + anotherCounterRegion.Tell(new Counter.EntityEnvelope(entityId, Counter.Decrement.Instance)); + anotherCounterRegion.Tell(new Counter.Get(entityId)); ExpectMsg(-1); }, _config.Fifth); EnterBarrier("extension-used"); @@ -936,8 +962,8 @@ public void ClusterSharding_should_be_easy_to_use_with_extensions() { for (int i = 1000; i <= 1010; i++) { - ClusterSharding.Get(Sys).ShardRegion("Counter").Tell(new Counter.EntityEnvelope(i, Counter.Increment.Instance)); - ClusterSharding.Get(Sys).ShardRegion("Counter").Tell(new Counter.Get(i)); + ClusterSharding.Get(Sys).ShardRegion(Counter.ShardingTypeName).Tell(new Counter.EntityEnvelope(i, Counter.Increment.Instance)); + ClusterSharding.Get(Sys).ShardRegion(Counter.ShardingTypeName).Tell(new Counter.Get(i)); ExpectMsg(1); LastSender.Path.Address.Should().NotBe(Cluster.SelfAddress); } @@ -954,7 +980,7 @@ public void ClusterSharding_should_be_easy_API_for_starting() { var counterRegionViaStart = ClusterSharding.Get(Sys).Start( typeName: "ApiTest", - entityProps: Props.Create(), + entityPropsFactory: Counter.Props, settings: ClusterShardingSettings.Create(Sys), extractEntityId: Counter.ExtractEntityId, extractShardId: Counter.ExtractShardId); diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests/ClusterShardingInternalsSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/ClusterShardingInternalsSpec.cs new file mode 100644 index 00000000000..bee6a932e0d --- /dev/null +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/ClusterShardingInternalsSpec.cs @@ -0,0 +1,82 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using Akka.Actor; +using Akka.Cluster.Tools.Singleton; +using Akka.Configuration; +using Akka.TestKit.TestActors; +using FluentAssertions; +using Xunit; + +namespace Akka.Cluster.Sharding.Tests +{ + public class ClusterShardingInternalsSpec : Akka.TestKit.Xunit2.TestKit + { + ClusterSharding clusterSharding; + + public ClusterShardingInternalsSpec() : base(GetConfig()) + { + clusterSharding = ClusterSharding.Get(Sys); + } + + private Tuple ExtractEntityId(object message) + { + switch (message) + { + case int i: + return new Tuple(i.ToString(), message); + } + throw new NotSupportedException(); + } + + private string ExtractShardId(object message) + { + switch (message) + { + case int i: + return (i % 10).ToString(); + } + throw new NotSupportedException(); + } + + + public static Config GetConfig() + { + return ConfigurationFactory.ParseString("akka.actor.provider = cluster") + + .WithFallback(Sharding.ClusterSharding.DefaultConfig()) + .WithFallback(DistributedData.DistributedData.DefaultConfig()) + .WithFallback(ClusterSingletonManager.DefaultConfig()); + } + + [Fact] + public void ClusterSharding_must_start_a_region_in_proxy_mode_in_case_of_node_role_mismatch() + { + var settingsWithRole = ClusterShardingSettings.Create(Sys).WithRole("nonExistingRole"); + var typeName = "typeName"; + + var region = clusterSharding.Start( + typeName: typeName, + entityProps: Props.Empty, + settings: settingsWithRole, + extractEntityId: ExtractEntityId, + extractShardId: ExtractShardId, + allocationStrategy: new LeastShardAllocationStrategy(0, 0), + handOffStopMessage: PoisonPill.Instance); + + var proxy = clusterSharding.StartProxy( + typeName: typeName, + role: settingsWithRole.Role, + extractEntityId: ExtractEntityId, + extractShardId: ExtractShardId + ); + + region.Should().BeSameAs(proxy); + } + } +} diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests/CoordinatedShutdownShardingSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/CoordinatedShutdownShardingSpec.cs new file mode 100644 index 00000000000..38b2f5ae966 --- /dev/null +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/CoordinatedShutdownShardingSpec.cs @@ -0,0 +1,210 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2016 Lightbend Inc. +// Copyright (C) 2013-2016 Akka.NET project +// +//----------------------------------------------------------------------- + +using System; +using System.Linq; +using System.Threading.Tasks; +using Akka.Actor; +using Akka.Cluster.Tools.Singleton; +using Akka.Configuration; +using Akka.TestKit; +using FluentAssertions; +using Xunit; +using Xunit.Abstractions; + +namespace Akka.Cluster.Sharding.Tests +{ + public class CoordinatedShutdownShardingSpec : AkkaSpec + { + private readonly ActorSystem _sys1; + private readonly ActorSystem _sys2; + private readonly ActorSystem _sys3; + + private readonly IActorRef _region1; + private readonly IActorRef _region2; + private readonly IActorRef _region3; + + private readonly TestProbe _probe1; + private readonly TestProbe _probe2; + private readonly TestProbe _probe3; + + private static readonly Config SpecConfig; + + private class EchoActor : ReceiveActor + { + public EchoActor() + { + ReceiveAny(_ => Sender.Tell(_)); + } + } + + private readonly ExtractEntityId _extractEntityId = message => Tuple.Create(message.ToString(), message); + + private readonly ExtractShardId _extractShard = message => (message.GetHashCode() % 10).ToString(); + + static CoordinatedShutdownShardingSpec() + { + SpecConfig = ConfigurationFactory.ParseString(@" + akka.loglevel = DEBUG + akka.actor.provider = cluster + akka.remote.dot-netty.tcp.port = 0") + .WithFallback(ClusterSingletonManager.DefaultConfig() + .WithFallback(ClusterSharding.DefaultConfig())); + } + + public CoordinatedShutdownShardingSpec(ITestOutputHelper helper) : base(SpecConfig, helper) + { + _sys1 = ActorSystem.Create(Sys.Name, Sys.Settings.Config); + _sys2 = ActorSystem.Create(Sys.Name, Sys.Settings.Config); + _sys3 = Sys; + + var props = Props.Create(() => new EchoActor()); + _region1 = ClusterSharding.Get(_sys1).Start("type1", props, ClusterShardingSettings.Create(_sys1), + _extractEntityId, _extractShard); + _region2 = ClusterSharding.Get(_sys2).Start("type1", props, ClusterShardingSettings.Create(_sys2), + _extractEntityId, _extractShard); + _region3 = ClusterSharding.Get(_sys3).Start("type1", props, ClusterShardingSettings.Create(_sys3), + _extractEntityId, _extractShard); + + + _probe1 = CreateTestProbe(_sys1); + _probe2 = CreateTestProbe(_sys2); + _probe3 = CreateTestProbe(_sys3); + + CoordinatedShutdown.Get(_sys1).AddTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "unbind", () => + { + _probe1.Ref.Tell("CS-unbind-1"); + return Task.FromResult(Done.Instance); + }); + + CoordinatedShutdown.Get(_sys2).AddTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "unbind", () => + { + _probe2.Ref.Tell("CS-unbind-2"); + return Task.FromResult(Done.Instance); + }); + + CoordinatedShutdown.Get(_sys3).AddTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "unbind", () => + { + _probe3.Ref.Tell("CS-unbind-3"); + return Task.FromResult(Done.Instance); + }); + } + + protected override void BeforeTermination() + { + Shutdown(_sys1); + Shutdown(_sys2); + } + + /// + /// Using region 2 as it is not shutdown in either test. + /// + private void PingEntities() + { + _region2.Tell(1, _probe2.Ref); + _probe2.ExpectMsg(10.Seconds()).Should().Be(1); + _region2.Tell(2, _probe2.Ref); + _probe2.ExpectMsg(10.Seconds()).Should().Be(2); + _region2.Tell(3, _probe2.Ref); + _probe2.ExpectMsg(10.Seconds()).Should().Be(3); + } + + [Fact] + public void Sharding_and_CoordinatedShutdown_must_run_successfully() + { + InitCluster(); + RunCoordinatedShutdownWhenLeaving(); + RunCoordinatedShutdownWhenDowning(); + } + + private void InitCluster() + { + Cluster.Get(_sys1).Join(Cluster.Get(_sys1).SelfAddress); // coordinator will initially run on sys1 + AwaitAssert(() => Cluster.Get(_sys1).SelfMember.Status.Should().Be(MemberStatus.Up)); + + Cluster.Get(_sys2).Join(Cluster.Get(_sys1).SelfAddress); + Within(10.Seconds(), () => + { + AwaitAssert(() => + { + Cluster.Get(_sys1).State.Members.Count.Should().Be(2); + Cluster.Get(_sys1).State.Members.All(x => x.Status == MemberStatus.Up).Should().BeTrue(); + Cluster.Get(_sys2).State.Members.Count.Should().Be(2); + Cluster.Get(_sys2).State.Members.All(x => x.Status == MemberStatus.Up).Should().BeTrue(); + }); + }); + + Cluster.Get(_sys3).Join(Cluster.Get(_sys1).SelfAddress); + Within(10.Seconds(), () => + { + AwaitAssert(() => + { + Cluster.Get(_sys1).State.Members.Count.Should().Be(3); + Cluster.Get(_sys1).State.Members.All(x => x.Status == MemberStatus.Up).Should().BeTrue(); + Cluster.Get(_sys2).State.Members.Count.Should().Be(3); + Cluster.Get(_sys2).State.Members.All(x => x.Status == MemberStatus.Up).Should().BeTrue(); + Cluster.Get(_sys3).State.Members.Count.Should().Be(3); + Cluster.Get(_sys3).State.Members.All(x => x.Status == MemberStatus.Up).Should().BeTrue(); + }); + }); + + PingEntities(); + } + + private void RunCoordinatedShutdownWhenLeaving() + { + Cluster.Get(_sys3).Leave(Cluster.Get(_sys1).SelfAddress); + _probe1.ExpectMsg("CS-unbind-1"); + + Within(20.Seconds(), () => + { + AwaitAssert(() => + { + Cluster.Get(_sys2).State.Members.Count.Should().Be(2); + Cluster.Get(_sys3).State.Members.Count.Should().Be(2); + }); + }); + + Within(10.Seconds(), () => + { + AwaitAssert(() => + { + Cluster.Get(_sys1).IsTerminated.Should().BeTrue(); + _sys1.WhenTerminated.IsCompleted.Should().BeTrue(); + }); + }); + + PingEntities(); + } + + private void RunCoordinatedShutdownWhenDowning() + { + // coordinator is on Sys2 + Cluster.Get(_sys2).Down(Cluster.Get(_sys3).SelfAddress); + _probe3.ExpectMsg("CS-unbind-3"); + + Within(20.Seconds(), () => + { + AwaitAssert(() => + { + Cluster.Get(_sys2).State.Members.Count.Should().Be(1); + }); + }); + + Within(10.Seconds(), () => + { + AwaitAssert(() => + { + Cluster.Get(_sys3).IsTerminated.Should().BeTrue(); + _sys3.WhenTerminated.IsCompleted.Should().BeTrue(); + }); + }); + + PingEntities(); + } + } +} diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests/DDataClusterShardingConfigSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/DDataClusterShardingConfigSpec.cs new file mode 100644 index 00000000000..1be6e6defa7 --- /dev/null +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/DDataClusterShardingConfigSpec.cs @@ -0,0 +1,46 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using Akka.Configuration; +using Akka.DistributedData; +using Akka.DistributedData.Internal; +using Akka.DistributedData.Serialization; +using FluentAssertions; +using Xunit; +using Xunit.Abstractions; + +namespace Akka.Cluster.Sharding.Tests +{ + /// + /// Used to validate that https://github.com/akkadotnet/akka.net/issues/3529 works as expected + /// + public class DDataClusterShardingConfigSpec : TestKit.Xunit2.TestKit + { + public DDataClusterShardingConfigSpec(ITestOutputHelper helper) : base(GetConfig(), output:helper) + { + } + + public static Config GetConfig() + { + return ConfigurationFactory.ParseString(@"akka.actor.provider = ""Akka.Cluster.ClusterActorRefProvider, Akka.Cluster"" + akka.cluster.sharding.state-store-mode = ddata + "); + } + + [Fact] + public void Should_load_DData_serializers_when_enabled() + { + ClusterSharding.Get(Sys); + + var rmSerializer = Sys.Serialization.FindSerializerFor(WriteAck.Instance); + rmSerializer.Should().BeOfType(); + + var rDSerializer = Sys.Serialization.FindSerializerFor(ORDictionary>.Empty); + rDSerializer.Should().BeOfType(); + } + } +} \ No newline at end of file diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests/GetShardTypeNamesSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/GetShardTypeNamesSpec.cs new file mode 100644 index 00000000000..d86e7f5eb1f --- /dev/null +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/GetShardTypeNamesSpec.cs @@ -0,0 +1,69 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using Akka.Cluster.Tools.Singleton; +using Akka.Configuration; +using Akka.TestKit.TestActors; +using FluentAssertions; +using Xunit; + +namespace Akka.Cluster.Sharding.Tests +{ + public class GetShardTypeNamesSpec : Akka.TestKit.Xunit2.TestKit + { + public GetShardTypeNamesSpec() : base(GetConfig()) + { + } + + public static Config GetConfig() + { + return ConfigurationFactory.ParseString("akka.actor.provider = cluster") + + .WithFallback(Sharding.ClusterSharding.DefaultConfig()) + .WithFallback(DistributedData.DistributedData.DefaultConfig()) + .WithFallback(ClusterSingletonManager.DefaultConfig()); + } + + [Fact] + public void GetShardTypeNames_must_contain_empty_when_join_cluster_without_shards() + { + ClusterSharding.Get(Sys).ShardTypeNames.Should().BeEmpty(); + } + + [Fact] + public void GetShardTypeNames_must_contain_started_shards_when_started_2_shards() + { + Cluster.Get(Sys).Join(Cluster.Get(Sys).SelfAddress); + var settings = ClusterShardingSettings.Create(Sys); + ClusterSharding.Get(Sys).Start("type1", EchoActor.Props(this), settings, ExtractEntityId, ExtractShardId); + ClusterSharding.Get(Sys).Start("type2", EchoActor.Props(this), settings, ExtractEntityId, ExtractShardId); + + ClusterSharding.Get(Sys).ShardTypeNames.ShouldBeEquivalentTo(new string[] { "type1", "type2" }); + } + + private Tuple ExtractEntityId(object message) + { + switch (message) + { + case int i: + return new Tuple(i.ToString(), message); + } + throw new NotSupportedException(); + } + + private string ExtractShardId(object message) + { + switch (message) + { + case int i: + return (i % 10).ToString(); + } + throw new NotSupportedException(); + } + } +} diff --git a/src/contrib/cluster/Akka.Cluster.Sharding.Tests/ProxyShardingSpec.cs b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/ProxyShardingSpec.cs new file mode 100644 index 00000000000..c4c7c971458 --- /dev/null +++ b/src/contrib/cluster/Akka.Cluster.Sharding.Tests/ProxyShardingSpec.cs @@ -0,0 +1,105 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using Akka.Actor; +using Akka.Cluster.Tools.Singleton; +using Akka.Configuration; +using Akka.TestKit.TestActors; +using FluentAssertions; +using Xunit; + +namespace Akka.Cluster.Sharding.Tests +{ + public class ProxyShardingSpec : Akka.TestKit.Xunit2.TestKit + { + ClusterSharding clusterSharding; + ClusterShardingSettings shardingSettings; + private MessageExtractor messageExtractor = new MessageExtractor(10); + + public ProxyShardingSpec() : base(GetConfig()) + { + var role = "Shard"; + clusterSharding = ClusterSharding.Get(Sys); + shardingSettings = ClusterShardingSettings.Create(Sys); + clusterSharding.StartProxy("myType", role, IdExtractor, ShardResolver); + } + + private class MessageExtractor : HashCodeMessageExtractor + { + public MessageExtractor(int maxNumberOfShards) : base(maxNumberOfShards) + { + } + + public override string EntityId(object message) + { + return "dummyId"; + } + } + + private Tuple IdExtractor(object message) + { + switch (message) + { + case int i: + return new Tuple(i.ToString(), message); + } + throw new NotSupportedException(); + } + + private string ShardResolver(object message) + { + switch (message) + { + case int i: + return (i % 10).ToString(); + } + throw new NotSupportedException(); + } + + + public static Config GetConfig() + { + return ConfigurationFactory.ParseString("akka.actor.provider = cluster") + + .WithFallback(Sharding.ClusterSharding.DefaultConfig()) + .WithFallback(DistributedData.DistributedData.DefaultConfig()) + .WithFallback(ClusterSingletonManager.DefaultConfig()); + } + + [Fact] + public void ProxyShardingSpec_Proxy_should_be_found() + { + IActorRef proxyActor = Sys.ActorSelection("akka://test/system/sharding/myTypeProxy") + .ResolveOne(TimeSpan.FromSeconds(5)).Result; + + proxyActor.Path.Should().NotBeNull(); + proxyActor.Path.ToString().Should().EndWith("Proxy"); + } + + [Fact] + public void ProxyShardingSpec_Shard_region_should_be_found() + { + var shardRegion = clusterSharding.Start("myType", EchoActor.Props(this), shardingSettings, messageExtractor); + + shardRegion.Path.Should().NotBeNull(); + shardRegion.Path.ToString().Should().EndWith("myType"); + } + + [Fact] + public void ProxyShardingSpec_Shard_coordinator_should_be_found() + { + var shardRegion = clusterSharding.Start("myType", EchoActor.Props(this), shardingSettings, messageExtractor); + + IActorRef shardCoordinator = Sys.ActorSelection("akka://test/system/sharding/myTypeCoordinator") + .ResolveOne(TimeSpan.FromSeconds(5)).Result; + + shardCoordinator.Path.Should().NotBeNull(); + shardCoordinator.Path.ToString().Should().EndWith("Coordinator"); + } + } +} diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/ClusterSharding.cs b/src/contrib/cluster/Akka.Cluster.Sharding/ClusterSharding.cs index 935dc8e814e..c9cda07a2c5 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/ClusterSharding.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/ClusterSharding.cs @@ -8,6 +8,7 @@ using System; using System.Collections.Concurrent; using System.Collections.Generic; +using System.Collections.Immutable; using System.Runtime.ExceptionServices; using System.Threading.Tasks; using Akka.Actor; @@ -228,6 +229,7 @@ public class ClusterSharding : IExtension { private readonly Lazy _guardian; private readonly ConcurrentDictionary _regions = new ConcurrentDictionary(); + private readonly ConcurrentDictionary _proxies = new ConcurrentDictionary(); private readonly ExtendedActorSystem _system; private readonly Cluster _cluster; @@ -281,6 +283,10 @@ public static Config DefaultConfig() /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -313,25 +319,31 @@ public IActorRef Start( IShardAllocationStrategy allocationStrategy, object handOffStopMessage) { - RequireClusterRole(settings.Role); - - var timeout = _system.Settings.CreationTimeout; - var startMsg = new ClusterShardingGuardian.Start(typeName, entityProps, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage); - - var reply = _guardian.Value.Ask(startMsg, timeout).Result; - switch (reply) + if (settings.ShouldHostShard(_cluster)) { - case ClusterShardingGuardian.Started started: - var shardRegion = started.ShardRegion; - _regions.TryAdd(typeName, shardRegion); - return shardRegion; - - case Status.Failure failure: - ExceptionDispatchInfo.Capture(failure.Cause).Throw(); - return ActorRefs.Nobody; - - default: - throw new ActorInitializationException($"Unsupported guardian response: {reply}"); + var timeout = _system.Settings.CreationTimeout; + var startMsg = new ClusterShardingGuardian.Start(typeName, _ => entityProps, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage); + + var reply = _guardian.Value.Ask(startMsg, timeout).Result; + switch (reply) + { + case ClusterShardingGuardian.Started started: + var shardRegion = started.ShardRegion; + _regions.TryAdd(typeName, shardRegion); + return shardRegion; + + case Status.Failure failure: + ExceptionDispatchInfo.Capture(failure.Cause).Throw(); + return ActorRefs.Nobody; + + default: + throw new ActorInitializationException($"Unsupported guardian response: {reply}"); + } + } + else + { + _cluster.System.Log.Debug("Starting Shard Region Proxy [{0}] (no actors will be hosted on this node)...", typeName); + return StartProxy(typeName, settings.Role, extractEntityId, extractShardId); } } @@ -339,6 +351,10 @@ public IActorRef Start( /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -371,25 +387,31 @@ public async Task StartAsync( IShardAllocationStrategy allocationStrategy, object handOffStopMessage) { - RequireClusterRole(settings.Role); - - var timeout = _system.Settings.CreationTimeout; - var startMsg = new ClusterShardingGuardian.Start(typeName, entityProps, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage); - - var reply = await _guardian.Value.Ask(startMsg, timeout); - switch (reply) + if (settings.ShouldHostShard(_cluster)) { - case ClusterShardingGuardian.Started started: - var shardRegion = started.ShardRegion; - _regions.TryAdd(typeName, shardRegion); - return shardRegion; - - case Status.Failure failure: - ExceptionDispatchInfo.Capture(failure.Cause).Throw(); - return ActorRefs.Nobody; - - default: - throw new ActorInitializationException($"Unsupported guardian response: {reply}"); + var timeout = _system.Settings.CreationTimeout; + var startMsg = new ClusterShardingGuardian.Start(typeName, _ => entityProps, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage); + + var reply = await _guardian.Value.Ask(startMsg, timeout); + switch (reply) + { + case ClusterShardingGuardian.Started started: + var shardRegion = started.ShardRegion; + _regions.TryAdd(typeName, shardRegion); + return shardRegion; + + case Status.Failure failure: + ExceptionDispatchInfo.Capture(failure.Cause).Throw(); + return ActorRefs.Nobody; + + default: + throw new ActorInitializationException($"Unsupported guardian response: {reply}"); + } + } + else + { + _cluster.System.Log.Debug("Starting Shard Region Proxy [{0}] (no actors will be hosted on this node)...", typeName); + return await StartProxyAsync(typeName, settings.Role, extractEntityId, extractShardId); } } @@ -397,6 +419,10 @@ public async Task StartAsync( /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -419,9 +445,7 @@ public IActorRef Start( ExtractEntityId extractEntityId, ExtractShardId extractShardId) { - var allocationStrategy = new LeastShardAllocationStrategy( - Settings.TunningParameters.LeastShardAllocationRebalanceThreshold, - Settings.TunningParameters.LeastShardAllocationMaxSimultaneousRebalance); + var allocationStrategy = DefaultShardAllocationStrategy(settings); return Start(typeName, entityProps, settings, extractEntityId, extractShardId, allocationStrategy, PoisonPill.Instance); } @@ -429,6 +453,10 @@ public IActorRef Start( /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -451,9 +479,7 @@ public Task StartAsync( ExtractEntityId extractEntityId, ExtractShardId extractShardId) { - var allocationStrategy = new LeastShardAllocationStrategy( - Settings.TunningParameters.LeastShardAllocationRebalanceThreshold, - Settings.TunningParameters.LeastShardAllocationMaxSimultaneousRebalance); + var allocationStrategy = DefaultShardAllocationStrategy(settings); return StartAsync(typeName, entityProps, settings, extractEntityId, extractShardId, allocationStrategy, PoisonPill.Instance); } @@ -461,6 +487,10 @@ public Task StartAsync( /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -489,6 +519,10 @@ public IActorRef Start(string typeName, Props entityProps, ClusterShardingSettin /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -517,6 +551,10 @@ public Task StartAsync(string typeName, Props entityProps, ClusterSha /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -534,9 +572,7 @@ public IActorRef Start(string typeName, Props entityProps, ClusterShardingSettin entityProps, settings, messageExtractor, - new LeastShardAllocationStrategy( - Settings.TunningParameters.LeastShardAllocationRebalanceThreshold, - Settings.TunningParameters.LeastShardAllocationMaxSimultaneousRebalance), + DefaultShardAllocationStrategy(settings), PoisonPill.Instance); } @@ -544,6 +580,10 @@ public IActorRef Start(string typeName, Props entityProps, ClusterShardingSettin /// Register a named entity type by defining the of the entity actor and /// functions to extract entity and shard identifier from messages. The /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// /// /// The name of the entity type /// @@ -561,9 +601,334 @@ public Task StartAsync(string typeName, Props entityProps, ClusterSha entityProps, settings, messageExtractor, - new LeastShardAllocationStrategy( - Settings.TunningParameters.LeastShardAllocationRebalanceThreshold, - Settings.TunningParameters.LeastShardAllocationMaxSimultaneousRebalance), + DefaultShardAllocationStrategy(settings), + PoisonPill.Instance); + } + + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Partial function to extract the entity id and the message to send to the entity from the incoming message, + /// if the partial function does not match the message will be `unhandled`, + /// i.e.posted as `Unhandled` messages on the event stream + /// + /// + /// Function to determine the shard id for an incoming message, only messages that passed the `extractEntityId` will be used + /// + /// Possibility to use a custom shard allocation and rebalancing logic + /// + /// The message that will be sent to entities when they are to be stopped for a rebalance or + /// graceful shutdown of a , e.g. . + /// + /// + /// This exception is thrown when the cluster member doesn't have the role specified in . + /// + /// The actor ref of the that is to be responsible for the shard. + public IActorRef Start( + string typeName, + Func entityPropsFactory, + ClusterShardingSettings settings, + ExtractEntityId extractEntityId, + ExtractShardId extractShardId, + IShardAllocationStrategy allocationStrategy, + object handOffStopMessage) + { + if (settings.ShouldHostShard(_cluster)) + { + var timeout = _system.Settings.CreationTimeout; + var startMsg = new ClusterShardingGuardian.Start(typeName, entityPropsFactory, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage); + + var reply = _guardian.Value.Ask(startMsg, timeout).Result; + switch (reply) + { + case ClusterShardingGuardian.Started started: + var shardRegion = started.ShardRegion; + _regions.TryAdd(typeName, shardRegion); + return shardRegion; + + case Status.Failure failure: + ExceptionDispatchInfo.Capture(failure.Cause).Throw(); + return ActorRefs.Nobody; + + default: + throw new ActorInitializationException($"Unsupported guardian response: {reply}"); + } + } + else + { + _cluster.System.Log.Debug("Starting Shard Region Proxy [{0}] (no actors will be hosted on this node)...", typeName); + return StartProxy(typeName, settings.Role, extractEntityId, extractShardId); + } + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Partial function to extract the entity id and the message to send to the entity from the incoming message, + /// if the partial function does not match the message will be `unhandled`, + /// i.e.posted as `Unhandled` messages on the event stream + /// + /// + /// Function to determine the shard id for an incoming message, only messages that passed the `extractEntityId` will be used + /// + /// Possibility to use a custom shard allocation and rebalancing logic + /// + /// The message that will be sent to entities when they are to be stopped for a rebalance or + /// graceful shutdown of a , e.g. . + /// + /// + /// This exception is thrown when the cluster member doesn't have the role specified in . + /// + /// The actor ref of the that is to be responsible for the shard. + public async Task StartAsync( + string typeName, + Func entityPropsFactory, + ClusterShardingSettings settings, + ExtractEntityId extractEntityId, + ExtractShardId extractShardId, + IShardAllocationStrategy allocationStrategy, + object handOffStopMessage) + { + if (settings.ShouldHostShard(_cluster)) + { + var timeout = _system.Settings.CreationTimeout; + var startMsg = new ClusterShardingGuardian.Start(typeName, entityPropsFactory, settings, extractEntityId, extractShardId, allocationStrategy, handOffStopMessage); + + var reply = await _guardian.Value.Ask(startMsg, timeout); + switch (reply) + { + case ClusterShardingGuardian.Started started: + var shardRegion = started.ShardRegion; + _regions.TryAdd(typeName, shardRegion); + return shardRegion; + + case Status.Failure failure: + ExceptionDispatchInfo.Capture(failure.Cause).Throw(); + return ActorRefs.Nobody; + + default: + throw new ActorInitializationException($"Unsupported guardian response: {reply}"); + } + } + else + { + _cluster.System.Log.Debug("Starting Shard Region Proxy [{0}] (no actors will be hosted on this node)...", typeName); + return StartProxy(typeName, settings.Role, extractEntityId, extractShardId); + } + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Partial function to extract the entity id and the message to send to the entity from the incoming message, + /// if the partial function does not match the message will be `unhandled`, + /// i.e.posted as `Unhandled` messages on the event stream + /// + /// + /// Function to determine the shard id for an incoming message, only messages that passed the `extractEntityId` will be used + /// + /// The actor ref of the that is to be responsible for the shard. + public IActorRef Start( + string typeName, + Func entityPropsFactory, + ClusterShardingSettings settings, + ExtractEntityId extractEntityId, + ExtractShardId extractShardId) + { + var allocationStrategy = DefaultShardAllocationStrategy(settings); + return Start(typeName, entityPropsFactory, settings, extractEntityId, extractShardId, allocationStrategy, PoisonPill.Instance); + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Partial function to extract the entity id and the message to send to the entity from the incoming message, + /// if the partial function does not match the message will be `unhandled`, + /// i.e.posted as `Unhandled` messages on the event stream + /// + /// + /// Function to determine the shard id for an incoming message, only messages that passed the `extractEntityId` will be used + /// + /// The actor ref of the that is to be responsible for the shard. + public Task StartAsync( + string typeName, + Func entityPropsFactory, + ClusterShardingSettings settings, + ExtractEntityId extractEntityId, + ExtractShardId extractShardId) + { + var allocationStrategy = DefaultShardAllocationStrategy(settings); + return StartAsync(typeName, entityPropsFactory, settings, extractEntityId, extractShardId, allocationStrategy, PoisonPill.Instance); + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Functions to extract the entity id, shard id, and the message to send to the entity from the incoming message. + /// + /// Possibility to use a custom shard allocation and rebalancing logic + /// + /// The message that will be sent to entities when they are to be stopped for a rebalance or + /// graceful shutdown of a , e.g. . + /// + /// The actor ref of the that is to be responsible for the shard. + public IActorRef Start(string typeName, Func entityPropsFactory, ClusterShardingSettings settings, + IMessageExtractor messageExtractor, IShardAllocationStrategy allocationStrategy, object handOffMessage) + { + ExtractEntityId extractEntityId = messageExtractor.ToExtractEntityId(); + ExtractShardId extractShardId = messageExtractor.ShardId; + + return Start(typeName, entityPropsFactory, settings, extractEntityId, extractShardId, allocationStrategy, handOffMessage); + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Functions to extract the entity id, shard id, and the message to send to the entity from the incoming message. + /// + /// Possibility to use a custom shard allocation and rebalancing logic + /// + /// The message that will be sent to entities when they are to be stopped for a rebalance or + /// graceful shutdown of a , e.g. . + /// + /// The actor ref of the that is to be responsible for the shard. + public Task StartAsync(string typeName, Func entityPropsFactory, ClusterShardingSettings settings, + IMessageExtractor messageExtractor, IShardAllocationStrategy allocationStrategy, object handOffMessage) + { + ExtractEntityId extractEntityId = messageExtractor.ToExtractEntityId(); + ExtractShardId extractShardId = messageExtractor.ShardId; + + return StartAsync(typeName, entityPropsFactory, settings, extractEntityId, extractShardId, allocationStrategy, handOffMessage); + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Functions to extract the entity id, shard id, and the message to send to the entity from the incoming message. + /// + /// The actor ref of the that is to be responsible for the shard. + public IActorRef Start(string typeName, Func entityPropsFactory, ClusterShardingSettings settings, + IMessageExtractor messageExtractor) + { + return Start(typeName, + entityPropsFactory, + settings, + messageExtractor, + DefaultShardAllocationStrategy(settings), + PoisonPill.Instance); + } + + /// + /// Register a named entity type by defining the of the entity actor and + /// functions to extract entity and shard identifier from messages. The + /// actor for this type can later be retrieved with the method. + /// + /// This method will start a in proxy mode in case if there is no match between the roles of + /// the current cluster node and the role specified in passed to this method. + /// + /// + /// The name of the entity type + /// + /// Function that, given an entity id, returns the of the entity actors that will be created by the + /// + /// Configuration settings, see + /// + /// Functions to extract the entity id, shard id, and the message to send to the entity from the incoming message. + /// + /// The actor ref of the that is to be responsible for the shard. + public Task StartAsync(string typeName, Func entityPropsFactory, ClusterShardingSettings settings, + IMessageExtractor messageExtractor) + { + return StartAsync(typeName, + entityPropsFactory, + settings, + messageExtractor, + DefaultShardAllocationStrategy(settings), PoisonPill.Instance); } @@ -596,7 +961,7 @@ public IActorRef StartProxy(string typeName, string role, ExtractEntityId extrac switch (reply) { case ClusterShardingGuardian.Started started: - _regions.TryAdd(typeName, started.ShardRegion); + _proxies.TryAdd(typeName, started.ShardRegion); return started.ShardRegion; case Status.Failure failure: @@ -637,7 +1002,7 @@ public async Task StartProxyAsync(string typeName, string role, Extra switch (reply) { case ClusterShardingGuardian.Started started: - _regions.TryAdd(typeName, started.ShardRegion); + _proxies.TryAdd(typeName, started.ShardRegion); return started.ShardRegion; case Status.Failure failure: @@ -703,6 +1068,11 @@ Tuple extractEntityId(Msg msg) return StartProxyAsync(typeName, role, extractEntityId, messageExtractor.ShardId); } + /// + /// get all currently defined sharding type names. + /// + public ImmutableHashSet ShardTypeNames => _regions.Keys.ToImmutableHashSet(); + /// /// Retrieve the actor reference of the actor responsible for the named entity type. /// The entity type must be registered with the method before it can be used here. @@ -717,18 +1087,35 @@ public IActorRef ShardRegion(string typeName) { if (_regions.TryGetValue(typeName, out var region)) return region; + if (_proxies.TryGetValue(typeName, out region)) + return region; throw new ArgumentException($"Shard type [{typeName}] must be started first"); } - private void RequireClusterRole(string role) + /// + /// Retrieve the actor reference of the actor that will act as a proxy to the + /// named entity type running in another data center. A proxy within the same data center can be accessed + /// with instead of this method. The entity type must be registered with the + /// method before it can be used here. Messages to the entity is always sent + /// via the . + /// + /// + /// + public IActorRef ShardRegionProxy(string typeName) { - if (!(string.IsNullOrEmpty(role) || _cluster.SelfRoles.Contains(role))) - { - throw new IllegalStateException( - $"This cluster member [{_cluster.SelfAddress}] doesn't have the role [{role}]"); - } + if (_proxies.TryGetValue(typeName, out var proxy)) + return proxy; + throw new ArgumentException($"Shard type [{typeName}] must be started first"); + } + + private IShardAllocationStrategy DefaultShardAllocationStrategy(ClusterShardingSettings settings) + { + return new LeastShardAllocationStrategy( + Settings.TunningParameters.LeastShardAllocationRebalanceThreshold, + Settings.TunningParameters.LeastShardAllocationMaxSimultaneousRebalance); } + } /// @@ -777,7 +1164,7 @@ public interface IMessageExtractor object EntityMessage(object message); /// - /// Extract the entity id from an incoming . Only messages that + /// Extract the shard id from an incoming . Only messages that /// passed the method will be used as input to this method. /// /// TBD diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingGuardian.cs b/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingGuardian.cs index ba2140ed1d7..b775f66d607 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingGuardian.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingGuardian.cs @@ -55,7 +55,7 @@ public sealed class Start : INoSerializationVerificationNeeded /// /// TBD /// - public readonly Props EntityProps; + public readonly Func EntityProps; /// /// TBD /// @@ -90,7 +90,7 @@ public sealed class Start : INoSerializationVerificationNeeded /// /// This exception is thrown when the specified or is undefined. /// - public Start(string typeName, Props entityProps, ClusterShardingSettings settings, + public Start(string typeName, Func entityProps, ClusterShardingSettings settings, ExtractEntityId extractEntityId, ExtractShardId extractShardId, IShardAllocationStrategy allocationStrategy, object handOffStopMessage) { if (string.IsNullOrEmpty(typeName)) throw new ArgumentNullException(nameof(typeName), "ClusterSharding start requires type name to be provided"); @@ -218,9 +218,8 @@ public ClusterShardingGuardian() try { var settings = startProxy.Settings; - var encName = Uri.EscapeDataString(startProxy.TypeName); - var coordinatorSingletonManagerName = CoordinatorSingletonManagerName(encName); - var coordinatorPath = CoordinatorPath(encName); + var encName = Uri.EscapeDataString(startProxy.TypeName + "Proxy"); + var coordinatorPath = CoordinatorPath(Uri.EscapeDataString(startProxy.TypeName)); var shardRegion = Context.Child(encName); if (Equals(shardRegion, ActorRefs.Nobody)) diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingSettings.cs b/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingSettings.cs index e346460b7f3..d6606483b55 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingSettings.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/ClusterShardingSettings.cs @@ -94,6 +94,8 @@ public class TunningParameters /// Keep this number of old persistent batches /// TBD /// TBD + /// TBD + /// TBD /// TBD /// TBD /// TBD @@ -276,6 +278,16 @@ public ClusterShardingSettings( CoordinatorSingletonSettings = coordinatorSingletonSettings; } + /// + /// If true, this node should run the shard region, otherwise just a shard proxy should started on this node. + /// + /// + /// + internal bool ShouldHostShard(Cluster cluster) + { + return string.IsNullOrEmpty(Role) || cluster.SelfRoles.Contains(Role); + } + /// /// TBD /// diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/DDataShard.cs b/src/contrib/cluster/Akka.Cluster.Sharding/DDataShard.cs index e8d9738f39f..46659feccd7 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/DDataShard.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/DDataShard.cs @@ -35,7 +35,7 @@ internal sealed class DDataShard : ActorBase, IShard, IWithUnboundedStash public string TypeName { get; } public string ShardId { get; } - public Props EntityProps { get; } + public Func EntityProps { get; } public ClusterShardingSettings Settings { get; } public ExtractEntityId ExtractEntityId { get; } public ExtractShardId ExtractShardId { get; } @@ -72,7 +72,7 @@ internal sealed class DDataShard : ActorBase, IShard, IWithUnboundedStash public DDataShard( string typeName, ShardId shardId, - Props entityProps, + Func entityProps, ClusterShardingSettings settings, ExtractEntityId extractEntityId, ExtractShardId extractShardId, @@ -100,7 +100,7 @@ public DDataShard( _readConsistency = new ReadMajority(settings.TunningParameters.WaitingForStateTimeout, majorityCap); _writeConsistency = new WriteMajority(settings.TunningParameters.UpdatingStateTimeout, majorityCap); _stateKeys = Enumerable.Range(0, NrOfKeys).Select(i => new ORSetKey($"shard-{typeName}-{shardId}-{i}")).ToImmutableArray(); - + GetState(); } diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/PersistentShard.cs b/src/contrib/cluster/Akka.Cluster.Sharding/PersistentShard.cs index 626187c7cdd..0b5388c21a4 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/PersistentShard.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/PersistentShard.cs @@ -33,7 +33,7 @@ internal sealed class PersistentShard : PersistentActor, IShard public string TypeName { get; } public string ShardId { get; } - public Props EntityProps { get; } + public Func EntityProps { get; } public ClusterShardingSettings Settings { get; } public ExtractEntityId ExtractEntityId { get; } public ExtractShardId ExtractShardId { get; } @@ -45,12 +45,12 @@ internal sealed class PersistentShard : PersistentActor, IShard public ImmutableHashSet Passivating { get; set; } = ImmutableHashSet.Empty; public ImmutableDictionary>> MessageBuffers { get; set; } = ImmutableDictionary>>.Empty; - private EntityRecoveryStrategy RememberedEntitiesRecoveryStrategy { get; } + private EntityRecoveryStrategy RememberedEntitiesRecoveryStrategy { get; } public PersistentShard( string typeName, string shardId, - Props entityProps, + Func entityProps, ClusterShardingSettings settings, ExtractEntityId extractEntityId, ExtractShardId extractShardId, @@ -198,9 +198,20 @@ public void DeliverTo(string id, object message, object payload, IActorRef sende var child = Context.Child(name); if (Equals(child, ActorRefs.Nobody)) { - // Note; we only do this if remembering, otherwise the buffer is an overhead - MessageBuffers = MessageBuffers.SetItem(id, ImmutableList>.Empty.Add(Tuple.Create(message, sender))); - ProcessChange(new Shard.EntityStarted(id), this.SendMessageBuffer); + if (State.Entries.Contains(id)) + { + if (MessageBuffers.ContainsKey(id)) + { + throw new InvalidOperationException($"Message buffers contains id [{id}]."); + } + this.GetEntity(id).Tell(payload, sender); + } + else + { + // Note; we only do this if remembering, otherwise the buffer is an overhead + MessageBuffers = MessageBuffers.SetItem(id, ImmutableList>.Empty.Add(Tuple.Create(message, sender))); + ProcessChange(new Shard.EntityStarted(id), this.SendMessageBuffer); + } } else child.Tell(payload, sender); diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/Shard.cs b/src/contrib/cluster/Akka.Cluster.Sharding/Shard.cs index 6ce0bc9eb20..194b28aa84c 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/Shard.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/Shard.cs @@ -18,7 +18,7 @@ namespace Akka.Cluster.Sharding using ShardId = String; using EntityId = String; using Msg = Object; - + internal interface IShard { IActorContext Context { get; } @@ -26,7 +26,7 @@ internal interface IShard IActorRef Sender { get; } string TypeName { get; } ShardId ShardId { get; } - Props EntityProps { get; } + Func EntityProps { get; } ClusterShardingSettings Settings { get; } ExtractEntityId ExtractEntityId { get; } ExtractShardId ExtractShardId { get; } @@ -360,7 +360,7 @@ public override int GetHashCode() public ILoggingAdapter Log { get; } = Context.GetLogger(); public string TypeName { get; } public string ShardId { get; } - public Props EntityProps { get; } + public Func EntityProps { get; } public ClusterShardingSettings Settings { get; } public ExtractEntityId ExtractEntityId { get; } public ExtractShardId ExtractShardId { get; } @@ -377,7 +377,7 @@ public override int GetHashCode() public Shard( string typeName, string shardId, - Props entityProps, + Func entityProps, ClusterShardingSettings settings, ExtractEntityId extractEntityId, ExtractShardId extractShardId, @@ -417,14 +417,14 @@ public static void Initialized(this TShard shard) where TShard : IShard { shard.Context.Parent.Tell(new ShardInitialized(shard.ShardId)); } - + public static void BaseProcessChange(this TShard shard, T evt, Action handler) where TShard : IShard where T : Shard.StateChange { handler(evt); } - + public static bool HandleCommand(this TShard shard, object message) where TShard : IShard { switch (message) @@ -450,6 +450,8 @@ public static bool HandleCommand(this TShard shard, object message) wher case Shard.IShardQuery sq: shard.HandleShardRegionQuery(sq); return true; + case ShardRegion.RestartShard _: + return true; case var _ when shard.ExtractEntityId(message) != null: shard.DeliverMessage(message, shard.Context.Sender); return true; @@ -469,7 +471,7 @@ private static void HandleShardRegionQuery(this TShard shard, Shard.ISha break; } } - + public static void BaseEntityTerminated(this TShard shard, IActorRef tref) where TShard : IShard { if (shard.IdByRef.TryGetValue(tref, out var id)) @@ -614,14 +616,14 @@ private static void Passivate(this TShard shard, IActorRef entity, objec shard.Log.Debug("Unknown entity {0}. Not sending stopMessage back to entity.", entity); } } - + public static void PassivateCompleted(this TShard shard, Shard.EntityStopped evt) where TShard: IShard { shard.Log.Debug("Entity stopped after passivation [{0}]", evt.EntityId); shard.State = new Shard.ShardState(shard.State.Entries.Remove(evt.EntityId)); shard.MessageBuffers = shard.MessageBuffers.Remove(evt.EntityId); } - + public static void SendMessageBuffer(this TShard shard, Shard.EntityStarted message) where TShard: IShard { var id = message.EntityId; @@ -675,7 +677,7 @@ private static void DeliverMessage(this TShard shard, object message, IA shard.DeliverTo(id, message, payload, sender); } } - + internal static void BaseDeliverTo(this TShard shard, string id, object message, object payload, IActorRef sender) where TShard : IShard { var name = Uri.EscapeDataString(id); @@ -686,7 +688,7 @@ internal static void BaseDeliverTo(this TShard shard, string id, object else child.Tell(payload, sender); } - + internal static IActorRef GetEntity(this TShard shard, string id) where TShard: IShard { var name = Uri.EscapeDataString(id); @@ -695,7 +697,7 @@ internal static IActorRef GetEntity(this TShard shard, string id) where { shard.Log.Debug("Starting entity [{0}] in shard [{1}]", id, shard.ShardId); - child = shard.Context.Watch(shard.Context.ActorOf(shard.EntityProps, name)); + child = shard.Context.Watch(shard.Context.ActorOf(shard.EntityProps(id), name)); shard.IdByRef = shard.IdByRef.SetItem(child, id); shard.RefById = shard.RefById.SetItem(id, child); shard.State = new Shard.ShardState(shard.State.Entries.Add(id)); @@ -704,12 +706,12 @@ internal static IActorRef GetEntity(this TShard shard, string id) where return child; } - internal static int TotalBufferSize(this TShard shard) where TShard : IShard => + internal static int TotalBufferSize(this TShard shard) where TShard : IShard => shard.MessageBuffers.Aggregate(0, (sum, entity) => sum + entity.Value.Count); #endregion - public static Props Props(string typeName, ShardId shardId, Props entityProps, ClusterShardingSettings settings, ExtractEntityId extractEntityId, ExtractShardId extractShardId, object handOffStopMessage, IActorRef replicator, int majorityMinCap) + public static Props Props(string typeName, ShardId shardId, Func entityProps, ClusterShardingSettings settings, ExtractEntityId extractEntityId, ExtractShardId extractShardId, object handOffStopMessage, IActorRef replicator, int majorityMinCap) { switch (settings.StateStoreMode) { @@ -722,7 +724,7 @@ public static Props Props(string typeName, ShardId shardId, Props entityProps, C } } } - + class RememberEntityStarter : ActorBase { private class Tick : INoSerializationVerificationNeeded diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/ShardCoordinator.cs b/src/contrib/cluster/Akka.Cluster.Sharding/ShardCoordinator.cs index aa83b8c74b5..8e5deb38d6c 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/ShardCoordinator.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/ShardCoordinator.cs @@ -223,7 +223,6 @@ private static void HandleGracefulShutdownRequest(this TCoordinato private static void HandleRebalanceDone(this TCoordinator coordinator, string shard, bool ok) where TCoordinator : IShardCoordinator { - coordinator.RebalanceInProgress = coordinator.RebalanceInProgress.Remove(shard); coordinator.Log.Debug("Rebalance shard [{0}] done [{1}]", shard, ok); // The shard could have been removed by ShardRegionTerminated @@ -264,7 +263,7 @@ private static void ClearRebalanceInProgress(this TCoordinator coo private static void DeferGetShardHomeRequest(this TCoordinator coordinator, string shard, IActorRef from) where TCoordinator : IShardCoordinator { - coordinator.Log.Debug("GetShardHome [{1}] request from [{2}] deferred, because rebalance is in progress for this shard. It will be handled when rebalance is done.", shard, from); + coordinator.Log.Debug("GetShardHome [{0}] request from [{1}] deferred, because rebalance is in progress for this shard. It will be handled when rebalance is done.", shard, from); var pending = coordinator.RebalanceInProgress.TryGetValue(shard, out var prev) ? prev : ImmutableHashSet.Empty; diff --git a/src/contrib/cluster/Akka.Cluster.Sharding/ShardRegion.cs b/src/contrib/cluster/Akka.Cluster.Sharding/ShardRegion.cs index 5ed456eb95a..3dcdc1fb750 100644 --- a/src/contrib/cluster/Akka.Cluster.Sharding/ShardRegion.cs +++ b/src/contrib/cluster/Akka.Cluster.Sharding/ShardRegion.cs @@ -129,7 +129,7 @@ public sealed class StartEntityAck : IClusterShardingSerializable public readonly ShardId ShardId; /// - /// Creates a new instance of a class, used to confirm that + /// Creates a new instance of a class, used to confirm that /// request has succeed. /// /// An identifier of a newly started entity. @@ -243,7 +243,7 @@ public int Compare(Member x, Member y) /// /// /// TBD - internal static Props Props(string typeName, Props entityProps, ClusterShardingSettings settings, string coordinatorPath, ExtractEntityId extractEntityId, ExtractShardId extractShardId, object handOffStopMessage, IActorRef replicator, int majorityMinCap) + internal static Props Props(string typeName, Func entityProps, ClusterShardingSettings settings, string coordinatorPath, ExtractEntityId extractEntityId, ExtractShardId extractShardId, object handOffStopMessage, IActorRef replicator, int majorityMinCap) { return Actor.Props.Create(() => new ShardRegion(typeName, entityProps, settings, coordinatorPath, extractEntityId, extractShardId, handOffStopMessage, replicator, majorityMinCap)).WithDeploy(Deploy.Local); } @@ -271,7 +271,7 @@ internal static Props ProxyProps(string typeName, ClusterShardingSettings settin /// /// TBD /// - public readonly Props EntityProps; + public readonly Func EntityProps; /// /// TBD /// @@ -358,7 +358,7 @@ internal static Props ProxyProps(string typeName, ClusterShardingSettings settin /// TBD /// /// - public ShardRegion(string typeName, Props entityProps, ClusterShardingSettings settings, string coordinatorPath, ExtractEntityId extractEntityId, ExtractShardId extractShardId, object handOffStopMessage, IActorRef replicator, int majorityMinCap) + public ShardRegion(string typeName, Func entityProps, ClusterShardingSettings settings, string coordinatorPath, ExtractEntityId extractEntityId, ExtractShardId extractShardId, object handOffStopMessage, IActorRef replicator, int majorityMinCap) { TypeName = typeName; EntityProps = entityProps; @@ -379,8 +379,15 @@ private void SetupCoordinatedShutdown() var self = Self; _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterShardingShutdownRegion, "region-shutdown", () => { - self.Tell(GracefulShutdown.Instance); - return _gracefulShutdownProgress.Task; + if (Cluster.IsTerminated || Cluster.SelfMember.Status == MemberStatus.Down) + { + return Task.FromResult(Done.Instance); + } + else + { + self.Tell(GracefulShutdown.Instance); + return _gracefulShutdownProgress.Task; + } }); } @@ -417,7 +424,7 @@ protected object RegistrationMessage { get { - if (EntityProps != null && !EntityProps.Equals(Actor.Props.None)) + if (EntityProps != null) return new PersistentShardCoordinator.Register(Self); return new PersistentShardCoordinator.RegisterProxy(Self); } @@ -518,10 +525,21 @@ private void Register() { var coordinator = CoordinatorSelection; coordinator?.Tell(RegistrationMessage); - if (ShardBuffers.Count != 0 && _retryCount >= RetryCountThreshold) - Log.Warning("Trying to register to coordinator at [{0}], but no acknowledgement. Total [{1}] buffered messages.", - coordinator != null ? coordinator.PathString : string.Empty, TotalBufferSize); + { + if (coordinator != null) + { + var coordinatorMessage = Cluster.State.Unreachable.Contains(MembersByAge.First()) ? $"Coordinator [{MembersByAge.First()}] is unreachable." : $"Coordinator [{MembersByAge.First()}] is reachable."; + + Log.Warning("Trying to register to coordinator at [{0}], but no acknowledgement. Total [{1}] buffered messages. [{2}]", + coordinator != null ? coordinator.PathString : string.Empty, TotalBufferSize, coordinatorMessage); + } + else + { + Log.Warning("No coordinator found to register. Probably, no seed-nodes configured and manual cluster join not performed? Total [{0}] buffered messages.", + TotalBufferSize); + } + } } private void DeliverStartEntity(object message, IActorRef sender) @@ -902,7 +920,7 @@ private IActorRef GetShard(ShardId id) //TODO: change on ConcurrentDictionary.GetOrAdd? if (!Shards.TryGetValue(id, out var region)) { - if (EntityProps == null || EntityProps.Equals(Actor.Props.Empty)) + if (EntityProps == null) throw new IllegalStateException("Shard must not be allocated to a proxy only ShardRegion"); if (ShardsByRef.Values.All(shardId => shardId != id)) diff --git a/src/contrib/cluster/Akka.Cluster.Tools.Tests/ClusterClient/ClusterClientConfigSpec.cs b/src/contrib/cluster/Akka.Cluster.Tools.Tests/ClusterClient/ClusterClientConfigSpec.cs index 42a8e8b4259..92da28ebefb 100644 --- a/src/contrib/cluster/Akka.Cluster.Tools.Tests/ClusterClient/ClusterClientConfigSpec.cs +++ b/src/contrib/cluster/Akka.Cluster.Tools.Tests/ClusterClient/ClusterClientConfigSpec.cs @@ -60,6 +60,17 @@ public void ClusterClientSettings_must_throw_exception_on_empty_initial_contacts exception.Message.Should().Be("InitialContacts must be defined"); } + /// + /// Addresses the bug discussed here: https://github.com/akkadotnet/akka.net/issues/3417#issuecomment-397443227 + /// + [Fact] + public void ClusterClientSettings_must_copy_initial_contacts_via_fluent_interface() + { + var initialContacts = ImmutableHashSet.Empty.Add(new RootActorPath(Address.AllSystems) / "user" / "foo"); + var clusterClientSettings = ClusterClientSettings.Create(Sys).WithInitialContacts(initialContacts).WithBufferSize(2000); + clusterClientSettings.InitialContacts.Should().BeEquivalentTo(initialContacts); + } + [Fact] public void ClusterReceptionistSettings_must_have_default_config() { diff --git a/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClient.cs b/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClient.cs index 6a4a0c90b8e..1200688147e 100644 --- a/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClient.cs +++ b/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClient.cs @@ -468,17 +468,18 @@ private void Buffer(object message) { if (_settings.BufferSize == 0) { - _log.Debug("Receptionist not available and buffering is disabled, dropping message [{0}]", message.GetType().Name); + _log.Warning("Receptionist not available and buffering is disabled, dropping message [{0}]", message.GetType().Name); } else if (_buffer.Count == _settings.BufferSize) { var m = _buffer.Dequeue(); - _log.Debug("Receptionist not available, buffer is full, dropping first message [{0}]", m.Item1.GetType().Name); + _log.Warning("Receptionist not available, buffer is full, dropping first message [{0}]", m.Item1.GetType().Name); _buffer.Enqueue(Tuple.Create(message, Sender)); } else { - _log.Debug("Receptionist not available, buffering message type [{0}]", message.GetType().Name); + if(_log.IsDebugEnabled) // don't invoke reflection call on message type if we don't have to + _log.Debug("Receptionist not available, buffering message type [{0}]", message.GetType().Name); _buffer.Enqueue(Tuple.Create(message, Sender)); } } diff --git a/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClientSettings.cs b/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClientSettings.cs index 0795ae78b20..8bdf8dcfad9 100644 --- a/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClientSettings.cs +++ b/src/contrib/cluster/Akka.Cluster.Tools/Client/ClusterClientSettings.cs @@ -211,7 +211,7 @@ private ClusterClientSettings Copy( TimeSpan? reconnectTimeout = null) { return new ClusterClientSettings( - initialContacts, + initialContacts ?? InitialContacts, establishingGetContactsInterval ?? EstablishingGetContactsInterval, refreshContactsInterval ?? RefreshContactsInterval, heartbeatInterval ?? HeartbeatInterval, diff --git a/src/contrib/cluster/Akka.Cluster.Tools/Singleton/ClusterSingletonManager.cs b/src/contrib/cluster/Akka.Cluster.Tools/Singleton/ClusterSingletonManager.cs index 2cf0723b29c..e6fc0ea1daf 100644 --- a/src/contrib/cluster/Akka.Cluster.Tools/Singleton/ClusterSingletonManager.cs +++ b/src/contrib/cluster/Akka.Cluster.Tools/Singleton/ClusterSingletonManager.cs @@ -463,7 +463,7 @@ public ClusterSingletonManagerIsStuckException(SerializationInfo info, Streaming /// broadcast its existence when it is started. /// /// - /// Use factory method to create the for the actor. + /// Use one of the factory methods ClusterSingletonManager.Props to create the for the actor. /// /// public sealed class ClusterSingletonManager : FSM @@ -571,11 +571,24 @@ public ClusterSingletonManager(Props singletonProps, object terminationMessage, private void SetupCoordinatedShutdown() { var self = Self; - _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExiting, "wait-singleton-exiting", () => _memberExitingProgress.Task); + _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExiting, "wait-singleton-exiting", () => + { + if (_cluster.IsTerminated || _cluster.SelfMember.Status == MemberStatus.Down) + return Task.FromResult(Done.Instance); + else + return _memberExitingProgress.Task; + }); _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExiting, "singleton-exiting-2", () => { - var timeout = _coordShutdown.Timeout(CoordinatedShutdown.PhaseClusterExiting); - return self.Ask(SelfExiting.Instance, timeout).ContinueWith(tr => Done.Instance); + if (_cluster.IsTerminated || _cluster.SelfMember.Status == MemberStatus.Down) + { + return Task.FromResult(Done.Instance); + } + else + { + var timeout = _coordShutdown.Timeout(CoordinatedShutdown.PhaseClusterExiting); + return self.Ask(SelfExiting.Instance, timeout).ContinueWith(tr => Done.Instance); + } }); } @@ -751,7 +764,8 @@ private void InitializeFSM() // transition when OldestChanged return Stay().Using(new YoungerData(null)); } - else if (e.FsmEvent is HandOverToMe) { + else if (e.FsmEvent is HandOverToMe) + { // this node was probably quickly restarted with same hostname:port, // confirm that the old singleton instance has been stopped Sender.Tell(HandOverDone.Instance); @@ -903,6 +917,12 @@ private void InitializeFSM() { return GoToHandingOver(oldest.Singleton, oldest.SingletonTerminated, Sender); } + else if (e.FsmEvent is TakeOverFromMe) + { + // already oldest, so confirm and continue like that + Sender.Tell(HandOverToMe.Instance); + return Stay(); + } else if (e.FsmEvent is Terminated terminated && e.StateData is OldestData o && terminated.ActorRef.Equals(o.Singleton)) { return Stay().Using(new OldestData(o.Singleton, true)); diff --git a/src/contrib/cluster/Akka.Cluster.Tools/Singleton/OldestChangedBuffer.cs b/src/contrib/cluster/Akka.Cluster.Tools/Singleton/OldestChangedBuffer.cs index 2e1f83e217b..bf7ff57398a 100644 --- a/src/contrib/cluster/Akka.Cluster.Tools/Singleton/OldestChangedBuffer.cs +++ b/src/contrib/cluster/Akka.Cluster.Tools/Singleton/OldestChangedBuffer.cs @@ -8,6 +8,7 @@ using System; using System.Collections.Immutable; using System.Linq; +using System.Threading.Tasks; using Akka.Actor; using Akka.Util.Internal; @@ -115,8 +116,15 @@ private void SetupCoordinatedShutdown() var self = Self; _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExiting, "singleton-exiting-1", () => { - var timeout = _coordShutdown.Timeout(CoordinatedShutdown.PhaseClusterExiting); - return self.Ask(SelfExiting.Instance, timeout).ContinueWith(tr => Done.Instance); + if (_cluster.IsTerminated || _cluster.SelfMember.Status == MemberStatus.Down) + { + return Task.FromResult(Done.Instance); + } + else + { + var timeout = _coordShutdown.Timeout(CoordinatedShutdown.PhaseClusterExiting); + return self.Ask(SelfExiting.Instance, timeout).ContinueWith(tr => Done.Instance); + } }); } @@ -172,9 +180,13 @@ private void Remove(Member member) private void SendFirstChange() { - object change; - _changes = _changes.Dequeue(out change); - Context.Parent.Tell(change); + // don't send cluster change events if this node is shutting its self down, just wait for SelfExiting + if (!_cluster.IsTerminated) + { + object change; + _changes = _changes.Dequeue(out change); + Context.Parent.Tell(change); + } } /// @@ -231,7 +243,7 @@ private void OnDeliverNext(object message) } else if (message is ClusterEvent.MemberRemoved) { - var removed = (ClusterEvent.MemberRemoved) message; + var removed = (ClusterEvent.MemberRemoved)message; Remove(removed.Member); DeliverChanges(); } diff --git a/src/contrib/cluster/Akka.DistributedData/Dsl.cs b/src/contrib/cluster/Akka.DistributedData/Dsl.cs index 053573db179..b10ec1e346e 100644 --- a/src/contrib/cluster/Akka.DistributedData/Dsl.cs +++ b/src/contrib/cluster/Akka.DistributedData/Dsl.cs @@ -25,7 +25,7 @@ public static class Dsl /// /// Gets a setup, which will acknowledge success of an - /// or operation immediately as soon, as + /// Update or operation immediately as soon, as /// result will be confirmed by the local replica only. /// public static WriteLocal WriteLocal => Akka.DistributedData.WriteLocal.Instance; diff --git a/src/contrib/cluster/Akka.DistributedData/Internal/Internal.cs b/src/contrib/cluster/Akka.DistributedData/Internal/Internal.cs index 57caa174f27..9419d3bf13a 100644 --- a/src/contrib/cluster/Akka.DistributedData/Internal/Internal.cs +++ b/src/contrib/cluster/Akka.DistributedData/Internal/Internal.cs @@ -830,7 +830,7 @@ private NoDelta() { } /// and thereby violating . /// /// This is used as a placeholder for such `null` delta. It's filtered out - /// in , i.e. never sent to the other replicas. + /// in , i.e. never sent to the other replicas. /// public static readonly IReplicatedDelta NoDeltaPlaceholder = NoDelta.Instance; diff --git a/src/contrib/cluster/Akka.DistributedData/ORDictionary.cs b/src/contrib/cluster/Akka.DistributedData/ORDictionary.cs index 5f5e5834b50..c3579415a5c 100644 --- a/src/contrib/cluster/Akka.DistributedData/ORDictionary.cs +++ b/src/contrib/cluster/Akka.DistributedData/ORDictionary.cs @@ -123,12 +123,12 @@ internal ORDictionary(ORSet keySet, IImmutableDictionary val /// on other nodes and the outcome depends on what /// type that is used. /// - /// Consider using instead of if you want modify + /// Consider using AddOrUpdate instead of if you want modify /// existing entry. /// /// is thrown if you try to replace an existing /// value, because important history can be lost when replacing the `ORSet` and - /// undesired effects of merging will occur. Use or instead. + /// undesired effects of merging will occur. Use or AddOrUpdate instead. /// public ORDictionary SetItem(Cluster.Cluster node, TKey key, TValue value) => SetItem(node.SelfUniqueAddress, key, value); @@ -139,12 +139,12 @@ public ORDictionary SetItem(Cluster.Cluster node, TKey key, TValue /// on other nodes and the outcome depends on what /// type that is used. /// - /// Consider using instead of if you want modify + /// Consider using AddOrUpdate instead of if you want modify /// existing entry. /// /// is thrown if you try to replace an existing /// value, because important history can be lost when replacing the `ORSet` and - /// undesired effects of merging will occur. Use or instead. + /// undesired effects of merging will occur. Use or AddOrUpdate instead. /// public ORDictionary SetItem(UniqueAddress node, TKey key, TValue value) { diff --git a/src/contrib/cluster/Akka.DistributedData/Properties/AssemblyInfo.cs b/src/contrib/cluster/Akka.DistributedData/Properties/AssemblyInfo.cs index 7cabb752a5f..628cdb7f4e9 100644 --- a/src/contrib/cluster/Akka.DistributedData/Properties/AssemblyInfo.cs +++ b/src/contrib/cluster/Akka.DistributedData/Properties/AssemblyInfo.cs @@ -24,3 +24,4 @@ [assembly: InternalsVisibleTo("Akka.DistributedData.Tests.MultiNode")] [assembly: InternalsVisibleTo("Akka.Cluster.Sharding")] [assembly: InternalsVisibleTo("Akka.Cluster.Sharding.Tests.MultiNode")] +[assembly: InternalsVisibleTo("Akka.Cluster.Sharding.Tests")] diff --git a/src/contrib/cluster/Akka.DistributedData/Replicator.Messages.cs b/src/contrib/cluster/Akka.DistributedData/Replicator.Messages.cs index 75b8c3be1ec..2e09338a7c2 100644 --- a/src/contrib/cluster/Akka.DistributedData/Replicator.Messages.cs +++ b/src/contrib/cluster/Akka.DistributedData/Replicator.Messages.cs @@ -317,9 +317,9 @@ public T Get(IKey key) where T : IReplicatedData } /// - /// Register a subscriber that will be notified with a message + /// Register a subscriber that will be notified with a message /// when the value of the given is changed. Current value is also - /// sent as a message to a new subscriber. + /// sent as a message to a new subscriber. /// /// Subscribers will be notified periodically with the configured `notify-subscribers-interval`, /// and it is also possible to send an explicit `FlushChanges` message to @@ -327,7 +327,7 @@ public T Get(IKey key) where T : IReplicatedData /// /// The subscriber will automatically be unregistered if it is terminated. /// - /// If the key is deleted the subscriber is notified with a message. + /// If the key is deleted the subscriber is notified with a message. /// [Serializable] public sealed class Subscribe : IReplicatorMessage, IEquatable @@ -370,7 +370,7 @@ public override int GetHashCode() /// /// Unregister a subscriber. /// - /// + /// [Serializable] public sealed class Unsubscribe : IEquatable, IReplicatorMessage { @@ -466,7 +466,7 @@ public override int GetHashCode() /// /// Send this message to the local to update a data value for the /// given . The will reply with one of the - /// messages. + /// messages. /// /// The current data value for the is passed as parameter to the function. /// It is if there is no value for the , and otherwise . The function @@ -753,7 +753,7 @@ public override int GetHashCode() /// /// Send this message to the local to delete a data value for the - /// given . The will reply with one of the messages. + /// given . The will reply with one of the messages. /// [Serializable] public sealed class Delete : ICommand, INoSerializationVerificationNeeded, IEquatable diff --git a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs index 8a0e8edfa26..defc29c92cc 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.Common/Journal/SqlJournal.cs @@ -168,7 +168,8 @@ protected override async Task> WriteMessagesAsync(IEnu } var batch = new WriteJournalBatch(eventToTags); - await QueryExecutor.InsertBatchAsync(connection, _pendingRequestsCancellation.Token, batch); + using(var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) + await QueryExecutor.InsertBatchAsync(connection, cancellationToken.Token, batch); } }).ToArray(); @@ -206,13 +207,16 @@ protected virtual async Task ReplayTaggedMessagesAsync(ReplayTaggedMessage using (var connection = CreateDbConnection()) { await connection.OpenAsync(); - return await QueryExecutor - .SelectByTagAsync(connection, _pendingRequestsCancellation.Token, replay.Tag, replay.FromOffset, replay.ToOffset, replay.Max, replayedTagged => { - foreach(var adapted in AdaptFromJournal(replayedTagged.Persistent)) - { - replay.ReplyTo.Tell(new ReplayedTaggedMessage(adapted, replayedTagged.Tag, replayedTagged.Offset), ActorRefs.NoSender); - } - }); + using(var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) + { + return await QueryExecutor + .SelectByTagAsync(connection, cancellationToken.Token, replay.Tag, replay.FromOffset, replay.ToOffset, replay.Max, replayedTagged => { + foreach(var adapted in AdaptFromJournal(replayedTagged.Persistent)) + { + replay.ReplyTo.Tell(new ReplayedTaggedMessage(adapted, replayedTagged.Tag, replayedTagged.Offset), ActorRefs.NoSender); + } + }); + } } } @@ -233,7 +237,10 @@ public override async Task ReplayMessagesAsync(IActorContext context, string per using (var connection = CreateDbConnection()) { await connection.OpenAsync(); - await QueryExecutor.SelectByPersistenceIdAsync(connection, _pendingRequestsCancellation.Token, persistenceId, fromSequenceNr, toSequenceNr, max, recoveryCallback); + using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) + { + await QueryExecutor.SelectByPersistenceIdAsync(connection, cancellationToken.Token, persistenceId, fromSequenceNr, toSequenceNr, max, recoveryCallback); + } } } @@ -286,14 +293,16 @@ private async Task Initialize() using (var connection = CreateDbConnection()) { await connection.OpenAsync(); - - if (_settings.AutoInitialize) + using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) { - await QueryExecutor.CreateTablesAsync(connection, _pendingRequestsCancellation.Token); - } + if (_settings.AutoInitialize) + { + await QueryExecutor.CreateTablesAsync(connection, cancellationToken.Token); + } - var ids = await QueryExecutor.SelectAllPersistenceIdsAsync(connection, _pendingRequestsCancellation.Token); - return new AllPersistenceIds(ids); + var ids = await QueryExecutor.SelectAllPersistenceIdsAsync(connection, cancellationToken.Token); + return new AllPersistenceIds(ids); + } } } catch (Exception e) @@ -459,7 +468,10 @@ protected override async Task DeleteMessagesToAsync(string persistenceId, long t using (var connection = CreateDbConnection()) { await connection.OpenAsync(); - await QueryExecutor.DeleteBatchAsync(connection, _pendingRequestsCancellation.Token, persistenceId, toSequenceNr); + using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) + { + await QueryExecutor.DeleteBatchAsync(connection, cancellationToken.Token, persistenceId, toSequenceNr); + } } } @@ -475,7 +487,10 @@ public override async Task ReadHighestSequenceNrAsync(string persistenceId using (var connection = CreateDbConnection()) { await connection.OpenAsync(); - return await QueryExecutor.SelectHighestSequenceNrAsync(connection, _pendingRequestsCancellation.Token, persistenceId); + using (var cancellationToken = CancellationTokenSource.CreateLinkedTokenSource(_pendingRequestsCancellation.Token)) + { + return await QueryExecutor.SelectHighestSequenceNrAsync(connection, cancellationToken.Token, persistenceId); + } } } diff --git a/src/contrib/persistence/Akka.Persistence.Sql.Common/Snapshot/QueryExecutor.cs b/src/contrib/persistence/Akka.Persistence.Sql.Common/Snapshot/QueryExecutor.cs index d64ec8c1f9f..d94ae81a224 100644 --- a/src/contrib/persistence/Akka.Persistence.Sql.Common/Snapshot/QueryExecutor.cs +++ b/src/contrib/persistence/Akka.Persistence.Sql.Common/Snapshot/QueryExecutor.cs @@ -345,7 +345,7 @@ protected virtual void SetPayloadParameter(object snapshot, DbCommand command) /// /// TBD /// - /// TBD + /// TBD /// TBD protected virtual void SetManifestParameters(object snapshot, DbCommand command) { diff --git a/src/core/Akka.API.Tests/Akka.API.Tests.csproj b/src/core/Akka.API.Tests/Akka.API.Tests.csproj index 6bb230ee621..cf2293f76f0 100644 --- a/src/core/Akka.API.Tests/Akka.API.Tests.csproj +++ b/src/core/Akka.API.Tests/Akka.API.Tests.csproj @@ -6,6 +6,40 @@ net452 + + + + + + + + + + + + + Always + + + Always + + + Always + + + Always + + + Always + + + Always + + + Always + + + diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApproveCluster.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApproveCluster.approved.txt index 65be772a7d0..96ac4d2ebe0 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApproveCluster.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApproveCluster.approved.txt @@ -25,6 +25,7 @@ namespace Akka.Cluster public Akka.Remote.DefaultFailureDetectorRegistry FailureDetector { get; } public bool IsTerminated { get; } public Akka.Actor.Address SelfAddress { get; } + public Akka.Cluster.Member SelfMember { get; } public System.Collections.Immutable.ImmutableHashSet SelfRoles { get; } public Akka.Cluster.UniqueAddress SelfUniqueAddress { get; } public Akka.Cluster.ClusterSettings Settings { get; } diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt index b2a78a7fe9c..db2f658843e 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApproveCore.approved.txt @@ -503,12 +503,35 @@ namespace Akka.Actor public const string PhaseServiceRequestsDone = "service-requests-done"; public const string PhaseServiceStop = "service-stop"; public const string PhaseServiceUnbind = "service-unbind"; + public Akka.Actor.CoordinatedShutdown.Reason ShutdownReason { get; } public Akka.Actor.ExtendedActorSystem System { get; } public System.TimeSpan TotalTimeout { get; } public void AddTask(string phase, string taskName, System.Func> task) { } public static Akka.Actor.CoordinatedShutdown Get(Akka.Actor.ActorSystem sys) { } + [System.ObsoleteAttribute("Use the method with \'reason\' parameter instead")] public System.Threading.Tasks.Task Run(string fromPhase = null) { } + public System.Threading.Tasks.Task Run(Akka.Actor.CoordinatedShutdown.Reason reason, string fromPhase = null) { } public System.TimeSpan Timeout(string phase) { } + public class ClrExitReason : Akka.Actor.CoordinatedShutdown.Reason + { + public static Akka.Actor.CoordinatedShutdown.Reason Instance; + } + public class ClusterDowningReason : Akka.Actor.CoordinatedShutdown.Reason + { + public static Akka.Actor.CoordinatedShutdown.Reason Instance; + } + public class ClusterLeavingReason : Akka.Actor.CoordinatedShutdown.Reason + { + public static Akka.Actor.CoordinatedShutdown.Reason Instance; + } + public class Reason + { + protected Reason() { } + } + public class UnknownReason : Akka.Actor.CoordinatedShutdown.Reason + { + public static Akka.Actor.CoordinatedShutdown.Reason Instance; + } } public sealed class CoordinatedShutdownExtension : Akka.Actor.ExtensionIdProvider { @@ -2951,20 +2974,20 @@ namespace Akka.Event public abstract bool IsErrorEnabled { get; } public abstract bool IsInfoEnabled { get; } public abstract bool IsWarningEnabled { get; } - public void Debug(string format, params object[] args) { } - public void Error(System.Exception cause, string format, params object[] args) { } - public void Error(string format, params object[] args) { } - public void Info(string format, params object[] args) { } + public virtual void Debug(string format, params object[] args) { } + public virtual void Error(System.Exception cause, string format, params object[] args) { } + public virtual void Error(string format, params object[] args) { } + public virtual void Info(string format, params object[] args) { } public bool IsEnabled(Akka.Event.LogLevel logLevel) { } - public void Log(Akka.Event.LogLevel logLevel, string format, params object[] args) { } + public virtual void Log(Akka.Event.LogLevel logLevel, string format, params object[] args) { } protected abstract void NotifyDebug(object message); protected abstract void NotifyError(object message); protected abstract void NotifyError(System.Exception cause, object message); protected abstract void NotifyInfo(object message); protected void NotifyLog(Akka.Event.LogLevel logLevel, object message) { } protected abstract void NotifyWarning(object message); - public void Warn(string format, params object[] args) { } - public void Warning(string format, params object[] args) { } + public virtual void Warn(string format, params object[] args) { } + public virtual void Warning(string format, params object[] args) { } } public class LoggingBus : Akka.Event.ActorEventBus { diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistence.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistence.approved.txt index 9d6c8be8c32..8c59631138f 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistence.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApprovePersistence.approved.txt @@ -1137,7 +1137,7 @@ namespace Akka.Persistence.Snapshot protected override System.Threading.Tasks.Task DeleteAsync(Akka.Persistence.SnapshotMetadata metadata) { } protected override System.Threading.Tasks.Task DeleteAsync(string persistenceId, Akka.Persistence.SnapshotSelectionCriteria criteria) { } protected override System.Threading.Tasks.Task LoadAsync(string persistenceId, Akka.Persistence.SnapshotSelectionCriteria criteria) { } - protected override async System.Threading.Tasks.Task SaveAsync(Akka.Persistence.SnapshotMetadata metadata, object snapshot) { } + protected override System.Threading.Tasks.Task SaveAsync(Akka.Persistence.SnapshotMetadata metadata, object snapshot) { } } public sealed class NoSnapshotStore : Akka.Persistence.Snapshot.SnapshotStore { diff --git a/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt b/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt index 595d16b2a59..36c9e3c4436 100644 --- a/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt +++ b/src/core/Akka.API.Tests/CoreAPISpec.ApproveStreams.approved.txt @@ -642,6 +642,7 @@ namespace Akka.Streams } public class static KillSwitches { + public static Akka.Streams.IGraph, Akka.NotUsed> AsFlow(this System.Threading.CancellationToken cancellationToken, bool cancelGracefully = False) { } public static Akka.Streams.SharedKillSwitch Shared(string name) { } public static Akka.Streams.IGraph, Akka.Streams.UniqueKillSwitch> Single() { } public static Akka.Streams.IGraph, Akka.Streams.UniqueKillSwitch> SingleBidi() { } @@ -1328,6 +1329,11 @@ namespace Akka.Streams.Dsl public Akka.Streams.Inlet In(int id) { } public override string ToString() { } } + public class static IntervalBasedRateLimiter + { + [Akka.Annotations.ApiMayChangeAttribute()] + public static Akka.Streams.IGraph>, Akka.NotUsed> Create(System.TimeSpan minInterval, int maxBatchSize) { } + } public interface IRunnableGraph : Akka.Streams.IGraph, Akka.Streams.IGraph { Akka.Streams.Dsl.IRunnableGraph AddAttributes(Akka.Streams.Attributes attributes); @@ -1358,6 +1364,14 @@ namespace Akka.Streams.Dsl public static Akka.NotUsed None(TLeft left, TRight right) { } public static TRight Right(TLeft left, TRight right) { } } + public class KeepAliveConcat : Akka.Streams.Stage.GraphStage> + { + public KeepAliveConcat(int keepAliveFailoverSize, System.TimeSpan interval, System.Func> extrapolate) { } + public Akka.Streams.Inlet In { get; } + public Akka.Streams.Outlet Out { get; } + public override Akka.Streams.FlowShape Shape { get; } + protected override Akka.Streams.Stage.GraphStageLogic CreateLogic(Akka.Streams.Attributes inheritedAttributes) { } + } public class LastElement : Akka.Streams.Stage.GraphStageWithMaterializedValue, System.Threading.Tasks.Task>> { public LastElement() { } @@ -1462,6 +1476,17 @@ namespace Akka.Streams.Dsl { public OutputTruncationException() { } } + public class static PagedSource + { + [Akka.Annotations.ApiMayChangeAttribute()] + public static Akka.Streams.Dsl.Source Create(TKey firstKey, System.Func>> pageFactory) { } + public class Page + { + public Page(System.Collections.Generic.IEnumerable items, Akka.Streams.Util.Option nextKey) { } + public System.Collections.Generic.IEnumerable Items { get; } + public Akka.Streams.Util.Option NextKey { get; } + } + } public sealed class Partition : Akka.Streams.Stage.GraphStage> { public readonly Akka.Streams.Inlet In; @@ -1620,6 +1645,7 @@ namespace Akka.Streams.Dsl public static Akka.Streams.Dsl.Source ActorRef(int bufferSize, Akka.Streams.OverflowStrategy overflowStrategy) { } public static Akka.Streams.Dsl.Source> AsSubscriber() { } public static Akka.Streams.Dsl.Source Combine(Akka.Streams.Dsl.Source first, Akka.Streams.Dsl.Source second, System.Func, Akka.NotUsed>> strategy, params Akka.Streams.Dsl.Source<, >[] rest) { } + public static Akka.Streams.Dsl.Source CombineMaterialized(Akka.Streams.Dsl.Source first, Akka.Streams.Dsl.Source second, System.Func, Akka.NotUsed>> strategy, System.Func combineMaterializers) { } public static Akka.Streams.Dsl.Source Cycle(System.Func> enumeratorFactory) { } public static Akka.Streams.Dsl.Source Empty() { } public static Akka.Streams.Dsl.Source Failed(System.Exception cause) { } @@ -1671,6 +1697,13 @@ namespace Akka.Streams.Dsl public Akka.Streams.Dsl.Source, Akka.NotUsed> ZipN(System.Collections.Generic.IEnumerable> sources) { } public Akka.Streams.Dsl.Source ZipWithN(System.Func, TOut2> zipper, System.Collections.Generic.IEnumerable> sources) { } } + public class static SourceGen + { + [Akka.Annotations.ApiMayChangeAttribute()] + public static Akka.Streams.Dsl.Source UnfoldFlow(TState seed, Akka.Streams.IGraph>, TMat> flow, System.TimeSpan timeout) { } + [Akka.Annotations.ApiMayChangeAttribute()] + public static Akka.Streams.Dsl.Source UnfoldFlowWith(TState seed, Akka.Streams.IGraph, TMat> flow, System.Func>> unfoldWith, System.TimeSpan timeout) { } + } public class static SourceOperations { public static Akka.Streams.Dsl.Source Aggregate(this Akka.Streams.Dsl.Source flow, TOut2 zero, System.Func fold) { } @@ -1682,6 +1715,7 @@ namespace Akka.Streams.Dsl public static Akka.Streams.Dsl.Source BatchWeighted(this Akka.Streams.Dsl.Source flow, long max, System.Func costFunction, System.Func seed, System.Func aggregate) { } public static Akka.Streams.Dsl.Source Buffer(this Akka.Streams.Dsl.Source flow, int size, Akka.Streams.OverflowStrategy strategy) { } public static Akka.Streams.Dsl.Source Collect(this Akka.Streams.Dsl.Source flow, System.Func collector) { } + public static Akka.Streams.Dsl.Source CombineMaterialized(this Akka.Streams.Dsl.Source flow, Akka.Streams.Dsl.Source other, System.Func, Akka.NotUsed>> strategy, System.Func combineMaterializers) { } public static Akka.Streams.Dsl.Source CompletionTimeout(this Akka.Streams.Dsl.Source flow, System.TimeSpan timeout) { } public static Akka.Streams.Dsl.Source Concat(this Akka.Streams.Dsl.Source flow, Akka.Streams.IGraph, TMat> other) { } public static Akka.Streams.Dsl.Source ConcatMany(this Akka.Streams.Dsl.Source flow, System.Func, TMat>> flatten) { } diff --git a/src/core/Akka.Cluster.TestKit/MultiNodeClusterSpec.cs b/src/core/Akka.Cluster.TestKit/MultiNodeClusterSpec.cs index 22a005ad559..62dd976c799 100644 --- a/src/core/Akka.Cluster.TestKit/MultiNodeClusterSpec.cs +++ b/src/core/Akka.Cluster.TestKit/MultiNodeClusterSpec.cs @@ -246,7 +246,7 @@ public void StartClusterNode() if (ClusterView.Members.IsEmpty) { Cluster.Join(GetAddress(Myself)); - AwaitAssert(() => Assert.True(ClusterView.Members.Select(m => m.Address).Contains(GetAddress(Myself)))); + AwaitAssert(() => Assert.Contains(GetAddress(Myself), ClusterView.Members.Select(m => m.Address))); } } diff --git a/src/core/Akka.Cluster.Tests.MultiNode/ClusterDeathWatchSpec.cs b/src/core/Akka.Cluster.Tests.MultiNode/ClusterDeathWatchSpec.cs index e280ff7717b..9e34c183ff5 100644 --- a/src/core/Akka.Cluster.Tests.MultiNode/ClusterDeathWatchSpec.cs +++ b/src/core/Akka.Cluster.Tests.MultiNode/ClusterDeathWatchSpec.cs @@ -120,11 +120,11 @@ public void An_actor_watching_a_remote_actor_in_the_cluster_must_receive_termina ExpectNoMsg(TimeSpan.FromSeconds(2)); EnterBarrier("second-terminated"); MarkNodeAsUnavailable(GetAddress(_config.Third)); - AwaitAssert(() => Assert.True(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Third)))); + AwaitAssert(() => Assert.Contains(GetAddress(_config.Third), ClusterView.UnreachableMembers.Select(x => x.Address))); Cluster.Down(GetAddress(_config.Third)); //removed - AwaitAssert(() => Assert.False(ClusterView.Members.Select(x => x.Address).Contains(GetAddress(_config.Third)))); - AwaitAssert(() => Assert.False(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Third)))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Third), ClusterView.Members.Select(x => x.Address))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Third), ClusterView.UnreachableMembers.Select(x => x.Address))); ExpectMsg(path3); EnterBarrier("third-terminated"); }, _config.First); @@ -137,11 +137,11 @@ public void An_actor_watching_a_remote_actor_in_the_cluster_must_receive_termina RunOn(() => { MarkNodeAsUnavailable(GetAddress(_config.Second)); - AwaitAssert(() => Assert.True(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Second)))); + AwaitAssert(() => Assert.Contains(GetAddress(_config.Second), ClusterView.UnreachableMembers.Select(x => x.Address))); Cluster.Down(GetAddress(_config.Second)); //removed - AwaitAssert(() => Assert.False(ClusterView.Members.Select(x => x.Address).Contains(GetAddress(_config.Second)))); - AwaitAssert(() => Assert.False(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Second)))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Second), ClusterView.Members.Select(x => x.Address))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Second), ClusterView.UnreachableMembers.Select(x => x.Address))); }, _config.Third); EnterBarrier("second-terminated"); EnterBarrier("third-terminated"); @@ -227,8 +227,8 @@ public void An_actor_watching_a_remote_actor_in_the_cluster_must_be_able_to_watc AwaitAssert(() => ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Fifth)).ShouldBeTrue()); Cluster.Down(GetAddress(_config.Fifth)); // removed - AwaitAssert(() => Assert.False(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Fifth)))); - AwaitAssert(() => Assert.False(ClusterView.Members.Select(x => x.Address).Contains(GetAddress(_config.Fifth)))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Fifth), ClusterView.UnreachableMembers.Select(x => x.Address))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Fifth), ClusterView.Members.Select(x => x.Address))); }, _config.Fourth); EnterBarrier("fifth-terminated"); @@ -266,8 +266,8 @@ public void An_actor_watching_a_remote_actor_in_the_cluster_must_be_able_to_shut AwaitAssert(() => ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.First)).ShouldBeTrue()); Cluster.Down(GetAddress(_config.First)); // removed - AwaitAssert(() => Assert.False(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.First)))); - AwaitAssert(() => Assert.False(ClusterView.Members.Select(x => x.Address).Contains(GetAddress(_config.First)))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.First), ClusterView.UnreachableMembers.Select(x => x.Address))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.First), ClusterView.Members.Select(x => x.Address))); ExpectTerminated(hello); EnterBarrier("first-unavailable"); diff --git a/src/core/Akka.Cluster.Tests.MultiNode/InitialHeartbeatSpec.cs b/src/core/Akka.Cluster.Tests.MultiNode/InitialHeartbeatSpec.cs index b2118f0ad90..f1af896d13e 100644 --- a/src/core/Akka.Cluster.Tests.MultiNode/InitialHeartbeatSpec.cs +++ b/src/core/Akka.Cluster.Tests.MultiNode/InitialHeartbeatSpec.cs @@ -84,10 +84,7 @@ public void A_member_must_detect_failure_even_though_no_heartbeats_have_been_rec AwaitAssert(() => { Cluster.SendCurrentClusterState(TestActor); - Assert.True( - ExpectMsg() - .Members.Select(m => m.Address) - .Contains(secondAddress)); + Assert.Contains(secondAddress, ExpectMsg().Members.Select(m => m.Address)); }, TimeSpan.FromSeconds(20), TimeSpan.FromMilliseconds(50)) , _config.First); @@ -97,10 +94,7 @@ public void A_member_must_detect_failure_even_though_no_heartbeats_have_been_rec AwaitAssert(() => { Cluster.SendCurrentClusterState(TestActor); - Assert.True( - ExpectMsg() - .Members.Select(m => m.Address) - .Contains(firstAddress)); + Assert.Contains(firstAddress, ExpectMsg().Members.Select(m => m.Address)); }, TimeSpan.FromSeconds(20), TimeSpan.FromMilliseconds(50)); }, _config.Second); diff --git a/src/core/Akka.Cluster.Tests.MultiNode/RestartNodeSpec.cs b/src/core/Akka.Cluster.Tests.MultiNode/RestartNodeSpec.cs index b608a4d6fab..92cd4c8272e 100644 --- a/src/core/Akka.Cluster.Tests.MultiNode/RestartNodeSpec.cs +++ b/src/core/Akka.Cluster.Tests.MultiNode/RestartNodeSpec.cs @@ -171,9 +171,9 @@ public void ClusterNodesMustBeAbleToRestartAndJoinAgain() AwaitAssert(() => { Assert.Equal(3, Cluster.Get(Sys).ReadView.Members.Count); - Assert.True( - Cluster.Get(Sys) - .ReadView.Members.Any(m => m.Address.Equals(SecondUniqueAddress.Address) && m.UniqueAddress.Uid != SecondUniqueAddress.Uid)); + Assert.Contains( + Cluster.Get(Sys).ReadView.Members, + m => m.Address.Equals(SecondUniqueAddress.Address) && m.UniqueAddress.Uid != SecondUniqueAddress.Uid); }); }, _config.First, _config.Third); diff --git a/src/core/Akka.Cluster.Tests.MultiNode/Routing/ClusterConsistentHashingRouterSpec.cs b/src/core/Akka.Cluster.Tests.MultiNode/Routing/ClusterConsistentHashingRouterSpec.cs index 935ffee169f..8e4000ef37e 100644 --- a/src/core/Akka.Cluster.Tests.MultiNode/Routing/ClusterConsistentHashingRouterSpec.cs +++ b/src/core/Akka.Cluster.Tests.MultiNode/Routing/ClusterConsistentHashingRouterSpec.cs @@ -263,8 +263,8 @@ protected void A_cluster_router_with_consistent_hashing_pool_must_remove_routees { Cluster.Down(GetAddress(_config.Third)); //removed - AwaitAssert(() => Assert.False(ClusterView.UnreachableMembers.Select(x => x.Address).Contains(GetAddress(_config.Third)))); - AwaitAssert(() => Assert.False(ClusterView.Members.Select(x => x.Address).Contains(GetAddress(_config.Third)))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Third), ClusterView.UnreachableMembers.Select(x => x.Address))); + AwaitAssert(() => Assert.DoesNotContain(GetAddress(_config.Third), ClusterView.Members.Select(x => x.Address))); // it may take some time until router receives cluster member events AwaitAssert(() => diff --git a/src/core/Akka.Cluster.Tests.MultiNode/UnreachableNodeJoinsAgainSpec.cs b/src/core/Akka.Cluster.Tests.MultiNode/UnreachableNodeJoinsAgainSpec.cs index caa989c628e..b61da48f2a2 100644 --- a/src/core/Akka.Cluster.Tests.MultiNode/UnreachableNodeJoinsAgainSpec.cs +++ b/src/core/Akka.Cluster.Tests.MultiNode/UnreachableNodeJoinsAgainSpec.cs @@ -138,12 +138,12 @@ public void MarkNodeAsUNREACHABLEWhenWePullTheNetwork() AwaitAssert(() => { var members = ClusterView.Members; // to snapshot the object - Assert.Equal(1, ClusterView.UnreachableMembers.Count); + Assert.Single(ClusterView.UnreachableMembers); }); AwaitSeenSameState(allButVictim.Select(GetAddress).ToArray()); // still once unreachable - Assert.Equal(1, ClusterView.UnreachableMembers.Count); + Assert.Single(ClusterView.UnreachableMembers); Assert.Equal(Node(_victim.Value).Address, ClusterView.UnreachableMembers.First().Address); Assert.Equal(MemberStatus.Up, ClusterView.UnreachableMembers.First().Status); }); @@ -219,7 +219,7 @@ public void AllowFreshNodeWithSameHostAndPortToJoinAgainWhenTheNetworkIsPluggedB Cluster.Get(freshSystem).Join(masterAddress); Within(TimeSpan.FromSeconds(15), () => { - AwaitAssert(() => Assert.True(Cluster.Get(freshSystem).ReadView.Members.Select(x => x.Address).Contains(victimAddress))); + AwaitAssert(() => Assert.Contains(victimAddress, Cluster.Get(freshSystem).ReadView.Members.Select(x => x.Address))); AwaitAssert(() => Assert.Equal(expectedNumberOfMembers,Cluster.Get(freshSystem).ReadView.Members.Count)); AwaitAssert(() => Assert.True(Cluster.Get(freshSystem).ReadView.Members.All(y => y.Status == MemberStatus.Up))); }); diff --git a/src/core/Akka.Cluster.Tests/ClusterSpec.cs b/src/core/Akka.Cluster.Tests/ClusterSpec.cs index b7387095561..86a976eaff6 100644 --- a/src/core/Akka.Cluster.Tests/ClusterSpec.cs +++ b/src/core/Akka.Cluster.Tests/ClusterSpec.cs @@ -189,8 +189,9 @@ public void A_cluster_must_complete_LeaveAsync_task_upon_being_removed() leaveTask.IsCompleted.Should().BeFalse(); probe.ExpectMsg(); - probe.ExpectMsg(); - probe.ExpectMsg(); + // MemberExited might not be published before MemberRemoved + var removed = (ClusterEvent.MemberRemoved)probe.FishForMessage(m => m is ClusterEvent.MemberRemoved); + removed.PreviousStatus.ShouldBeEquivalentTo(MemberStatus.Exiting); AwaitCondition(() => leaveTask.IsCompleted); @@ -492,11 +493,45 @@ public void A_cluster_must_leave_via_CoordinatedShutdownRun() Cluster.Get(sys2).Join(Cluster.Get(sys2).SelfAddress); probe.ExpectMsg(); - CoordinatedShutdown.Get(sys2).Run(); + CoordinatedShutdown.Get(sys2).Run(CoordinatedShutdown.UnknownReason.Instance); probe.ExpectMsg(); - probe.ExpectMsg(); - probe.ExpectMsg(); + // MemberExited might not be published before MemberRemoved + var removed = (ClusterEvent.MemberRemoved)probe.FishForMessage(m => m is ClusterEvent.MemberRemoved); + removed.PreviousStatus.ShouldBeEquivalentTo(MemberStatus.Exiting); + } + finally + { + Shutdown(sys2); + } + } + + [Fact] + public void A_cluster_must_leave_via_CoordinatedShutdownRun_when_member_status_is_Joining() + { + var sys2 = ActorSystem.Create("ClusterSpec2", ConfigurationFactory.ParseString(@" + akka.actor.provider = ""cluster"" + akka.remote.dot-netty.tcp.port = 0 + akka.coordinated-shutdown.run-by-clr-shutdown-hook = off + akka.coordinated-shutdown.terminate-actor-system = off + akka.cluster.run-coordinated-shutdown-when-down = off + akka.cluster.min-nr-of-members = 2 + ").WithFallback(Akka.TestKit.Configs.TestConfigs.DefaultConfig)); + + try + { + var probe = CreateTestProbe(sys2); + Cluster.Get(sys2).Subscribe(probe.Ref, typeof(ClusterEvent.IMemberEvent)); + probe.ExpectMsg(); + Cluster.Get(sys2).Join(Cluster.Get(sys2).SelfAddress); + probe.ExpectMsg(); + + CoordinatedShutdown.Get(sys2).Run(CoordinatedShutdown.UnknownReason.Instance); + + probe.ExpectMsg(); + // MemberExited might not be published before MemberRemoved + var removed = (ClusterEvent.MemberRemoved)probe.FishForMessage(m => m is ClusterEvent.MemberRemoved); + removed.PreviousStatus.ShouldBeEquivalentTo(MemberStatus.Exiting); } finally { @@ -524,10 +559,12 @@ public void A_cluster_must_terminate_ActorSystem_via_leave_CoordinatedShutdown() Cluster.Get(sys2).Leave(Cluster.Get(sys2).SelfAddress); probe.ExpectMsg(); - probe.ExpectMsg(); - probe.ExpectMsg(); + // MemberExited might not be published before MemberRemoved + var removed = (ClusterEvent.MemberRemoved)probe.FishForMessage(m => m is ClusterEvent.MemberRemoved); + removed.PreviousStatus.ShouldBeEquivalentTo(MemberStatus.Exiting); AwaitCondition(() => sys2.WhenTerminated.IsCompleted, TimeSpan.FromSeconds(10)); Cluster.Get(sys2).IsTerminated.Should().BeTrue(); + CoordinatedShutdown.Get(sys2).ShutdownReason.Should().BeOfType(); } finally { @@ -559,6 +596,7 @@ public void A_cluster_must_terminate_ActorSystem_via_Down_CoordinatedShutdown() probe.ExpectMsg(); AwaitCondition(() => sys3.WhenTerminated.IsCompleted, TimeSpan.FromSeconds(10)); Cluster.Get(sys3).IsTerminated.Should().BeTrue(); + CoordinatedShutdown.Get(sys3).ShutdownReason.Should().BeOfType(); } finally { diff --git a/src/core/Akka.Cluster/Cluster.cs b/src/core/Akka.Cluster/Cluster.cs index b367a47d601..ff967a9fc19 100644 --- a/src/core/Akka.Cluster/Cluster.cs +++ b/src/core/Akka.Cluster/Cluster.cs @@ -296,6 +296,7 @@ public void JoinSeedNodes(IEnumerable
seedNodes) /// actor system is manually restarted. /// /// TBD + /// TBD public Task JoinSeedNodesAsync(IEnumerable
seedNodes, CancellationToken token = default(CancellationToken)) { var completion = new TaskCompletionSource(); @@ -424,7 +425,7 @@ public void RegisterOnMemberRemoved(Action callback) /// ActorRef with the cluster's , unless address' host is already defined /// /// An belonging to the current node. - /// The absolute remote of . + /// The absolute remote of . public ActorPath RemotePathOf(IActorRef actorRef) { var path = actorRef.Path; @@ -463,6 +464,11 @@ public ImmutableHashSet SelfRoles /// public ClusterEvent.CurrentClusterState State { get { return _readView._state; } } + /// + /// Access to the current member info for this node. + /// + public Member SelfMember => _readView.Self; + private readonly AtomicBoolean _isTerminated = new AtomicBoolean(false); /// diff --git a/src/core/Akka.Cluster/ClusterDaemon.cs b/src/core/Akka.Cluster/ClusterDaemon.cs index 0fc632b5138..3a791d866c7 100644 --- a/src/core/Akka.Cluster/ClusterDaemon.cs +++ b/src/core/Akka.Cluster/ClusterDaemon.cs @@ -398,7 +398,7 @@ public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; if (ReferenceEquals(this, obj)) return true; - return obj is ExitingConfirmed && Equals((ExitingConfirmed) obj); + return obj is ExitingConfirmed && Equals((ExitingConfirmed)obj); } /// @@ -872,7 +872,7 @@ private void AddCoordinatedLeave() var self = Self; _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterLeave, "leave", () => { - if (Cluster.Get(sys).IsTerminated) + if (Cluster.Get(sys).IsTerminated || Cluster.Get(sys).SelfMember.Status == MemberStatus.Down) { return Task.FromResult(Done.Instance); } @@ -898,8 +898,8 @@ protected override void PostStop() _clusterPromise.TrySetResult(Done.Instance); if (_settings.RunCoordinatedShutdownWhenDown) { - // run the last phases if the node was downed (not leaving) - _coordShutdown.Run(CoordinatedShutdown.PhaseClusterShutdown); + // if it was stopped due to leaving CoordinatedShutdown was started earlier + _coordShutdown.Run(CoordinatedShutdown.ClusterDowningReason.Instance); } } } @@ -941,7 +941,7 @@ protected override SupervisorStrategy SupervisorStrategy() { return new OneForOneStrategy(e => { - //TODO: JVM version matches NonFatal. Can / should we do something similar? + //TODO: JVM version matches NonFatal. Can / should we do something similar? _log.Error(e, "Cluster node [{0}] crashed, [{1}] - shutting down...", Cluster.Get(Context.System).SelfAddress, e); Self.Tell(PoisonPill.Instance); @@ -968,7 +968,7 @@ private void CreateChildren() /// /// INTERNAL API - /// + /// /// Actor used to power the guts of the Akka.Cluster membership and gossip protocols. /// internal class ClusterCoreDaemon : UntypedActor, IRequiresMessageQueue @@ -1075,10 +1075,16 @@ private void AddCoordinatedLeave() { var sys = Context.System; var self = Self; - _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExiting, "wait-exiting", () => _selfExiting.Task); + _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExiting, "wait-exiting", () => + { + if (_latestGossip.Members.IsEmpty) + return Task.FromResult(Done.Instance); // not joined yet + else + return _selfExiting.Task; + }); _coordShutdown.AddTask(CoordinatedShutdown.PhaseClusterExitingDone, "exiting-completed", () => { - if (Cluster.Get(sys).IsTerminated) + if (Cluster.Get(sys).IsTerminated || Cluster.Get(sys).SelfMember.Status == MemberStatus.Down) return TaskEx.Completed; else { @@ -1295,7 +1301,7 @@ private void BecomeUninitialized() private void BecomeInitialized() { // start heartbeatSender here, and not in constructor to make sure that - // heartbeating doesn't start before Welcome is received + // heartbeating doesn't start before Welcome is received Context.ActorOf(Props.Create().WithDispatcher(_cluster.Settings.UseDispatcher), "heartbeatSender"); // make sure that join process is stopped @@ -1314,7 +1320,7 @@ private void Initialized(object message) { var ge = message as GossipEnvelope; var receivedType = ReceiveGossip(ge); - if(_cluster.Settings.VerboseGossipReceivedLogging) + if (_cluster.Settings.VerboseGossipReceivedLogging) _log.Debug("Cluster Node [{0}] - Received gossip from [{1}] which was {2}.", _cluster.SelfAddress, ge.From, receivedType); } else if (message is GossipStatus) @@ -1388,7 +1394,7 @@ private void Initialized(object message) } else if (message is InternalClusterAction.ExitingConfirmed) { - var c = (InternalClusterAction.ExitingConfirmed) message; + var c = (InternalClusterAction.ExitingConfirmed)message; ReceiveExitingConfirmed(c.Address); } else if (ReceiveExitingCompleted(message)) { } @@ -1662,7 +1668,7 @@ public void Welcome(Address joinWith, UniqueAddress from, Gossip gossip) public void Leaving(Address address) { // only try to update if the node is available (in the member ring) - if (_latestGossip.Members.Any(m => m.Address.Equals(address) && m.Status == MemberStatus.Up)) + if (_latestGossip.Members.Any(m => m.Address.Equals(address) && (m.Status == MemberStatus.Joining || m.Status == MemberStatus.WeaklyUp || m.Status == MemberStatus.Up))) { // mark node as LEAVING var newMembers = _latestGossip.Members.Select(m => @@ -1968,7 +1974,7 @@ public ReceiveGossipType ReceiveGossip(GossipEnvelope envelope) _exitingTasksInProgress = true; _log.Info("Exiting, starting coordinated shutdown."); _selfExiting.TrySetResult(Done.Instance); - _coordShutdown.Run(); + _coordShutdown.Run(CoordinatedShutdown.ClusterLeavingReason.Instance); } if (talkback) @@ -2094,7 +2100,7 @@ public double AdjustedGossipDifferentViewProbability // linear reduction of the probability with increasing number of nodes // from ReduceGossipDifferentViewProbability at ReduceGossipDifferentViewProbability nodes // to ReduceGossipDifferentViewProbability / 10 at ReduceGossipDifferentViewProbability * 3 nodes - // i.e. default from 0.8 at 400 nodes, to 0.08 at 1600 nodes + // i.e. default from 0.8 at 400 nodes, to 0.08 at 1600 nodes var k = (minP - _cluster.Settings.GossipDifferentViewProbability) / (high - low); return _cluster.Settings.GossipDifferentViewProbability + (size - low) * k; } @@ -2202,7 +2208,7 @@ private void ShutdownSelfWhenDown() /// this function will check to see if that threshold is met. /// /// - /// true if the setting isn't enabled or is satisfied. + /// true if the setting isn't enabled or is satisfied. /// false is the setting is enabled and unsatisfied. /// public bool IsMinNrOfMembersFulfilled() @@ -2314,7 +2320,7 @@ public void LeaderActionsOnConvergence() _exitingTasksInProgress = true; _log.Info("Exiting (leader), starting coordinated shutdown."); _selfExiting.TrySetResult(Done.Instance); - _coordShutdown.Run(); + _coordShutdown.Run(CoordinatedShutdown.ClusterLeavingReason.Instance); } UpdateLatestGossip(newGossip); @@ -2553,21 +2559,21 @@ public void PublishInternalStats() /// /// INTERNAL API - /// + /// /// Sends to all seed nodes (except itself) and expect /// reply back. The seed node that replied first /// will be used and joined to. replies received after /// the first one are ignored. - /// - /// Retries if no replies are received within the + /// + /// Retries if no replies are received within the /// . When at least one reply has been received it stops itself after /// an idle . - /// + /// /// The seed nodes can be started in any order, but they will not be "active" until they have been /// able to join another seed node (seed1.) - /// + /// /// They will retry the join procedure. - /// + /// /// Possible scenarios: /// 1. seed2 started, but doesn't get any ack from seed1 or seed3 /// 2. seed3 started, doesn't get any ack from seed1 or seed3 (seed2 doesn't reply) @@ -2589,7 +2595,7 @@ internal sealed class JoinSeedNodeProcess : UntypedActor /// TBD /// /// This exception is thrown when either the list of specified is empty - /// or the first listed seed is a reference to the 's address. + /// or the first listed seed is a reference to the IUntypedActorContext.System's address. /// public JoinSeedNodeProcess(ImmutableList
seeds) { @@ -2659,12 +2665,12 @@ private void Done(object message) /// /// INTERNAL API - /// + /// /// Used only for the first seed node. /// Sends to all seed nodes except itself. - /// If other seed nodes are not part of the cluster yet they will reply with + /// If other seed nodes are not part of the cluster yet they will reply with /// or not respond at all and then the - /// first seed node will join itself to initialize the new cluster. When the first seed + /// first seed node will join itself to initialize the new cluster. When the first seed /// node is restarted, and some other seed node is part of the cluster it will reply with /// and then the first seed node will /// join that other seed node to join the existing cluster. @@ -2685,7 +2691,7 @@ internal sealed class FirstSeedNodeProcess : UntypedActor /// TBD /// /// This exception is thrown when either the number of specified is less than or equal to 1 - /// or the first listed seed is a reference to the 's address. + /// or the first listed seed is a reference to the IUntypedActorContext.System's address. /// public FirstSeedNodeProcess(ImmutableList
seeds) { @@ -2896,7 +2902,7 @@ public GossipStats Copy(long? receivedGossipCount = null, /// /// INTERNAL API - /// + /// /// The supplied callback will be run once when the current cluster member has the same status. /// internal class OnMemberStatusChangedListener : ReceiveActor diff --git a/src/core/Akka.Cluster/CoordinatedShutdownLeave.cs b/src/core/Akka.Cluster/CoordinatedShutdownLeave.cs index 8a852581384..6fb3534237b 100644 --- a/src/core/Akka.Cluster/CoordinatedShutdownLeave.cs +++ b/src/core/Akka.Cluster/CoordinatedShutdownLeave.cs @@ -13,7 +13,7 @@ namespace Akka.Cluster { /// /// INTERNAL API - /// + /// /// Used for executing phases for graceful /// behaviors. /// @@ -55,7 +55,13 @@ private void WaitingLeaveCompleted(IActorRef replyTo) { Receive(s => { - if (s.Members.Any(m => m.UniqueAddress.Equals(_cluster.SelfUniqueAddress) + if (s.Members.IsEmpty) + { + // not joined yet + replyTo.Tell(Done.Instance); + Context.Stop(Self); + } + else if (s.Members.Any(m => m.UniqueAddress.Equals(_cluster.SelfUniqueAddress) && (m.Status == MemberStatus.Leaving || m.Status == MemberStatus.Exiting || m.Status == MemberStatus.Down))) diff --git a/src/core/Akka.Cluster/Member.cs b/src/core/Akka.Cluster/Member.cs index 8699e115c8b..423531b8bbe 100644 --- a/src/core/Akka.Cluster/Member.cs +++ b/src/core/Akka.Cluster/Member.cs @@ -18,7 +18,7 @@ namespace Akka.Cluster /// Represents the address, current status, and roles of a cluster member node. ///
/// - /// NOTE: and are solely based on the underlying , + /// NOTE: and are solely based on the underlying , /// not its and roles. /// public class Member : IComparable, IComparable @@ -169,7 +169,7 @@ public Member Copy(MemberStatus status) //TODO: Akka exception? if (!AllowedTransitions[oldStatus].Contains(status)) throw new InvalidOperationException($"Invalid member status transition {Status} -> {status}"); - + return new Member(UniqueAddress, UpNumber, status, Roles); } @@ -305,7 +305,7 @@ public static ImmutableSortedSet PickNextTransition(IEnumerable ///
/// First member instance. /// Second member instance. - /// If a and b are different members, this method will return null. + /// If a and b are different members, this method will return null. /// Otherwise, will return a or b depending on which one is a valid transition of the other. /// If neither are a valid transition, we return null public static Member PickNextTransition(Member a, Member b) @@ -386,8 +386,8 @@ public static Member HighestPriorityOf(Member m1, Member m2) internal static readonly ImmutableDictionary> AllowedTransitions = new Dictionary> { - {MemberStatus.Joining, ImmutableHashSet.Create(MemberStatus.WeaklyUp, MemberStatus.Up, MemberStatus.Down, MemberStatus.Removed)}, - {MemberStatus.WeaklyUp, ImmutableHashSet.Create(MemberStatus.Up, MemberStatus.Down, MemberStatus.Removed) }, + {MemberStatus.Joining, ImmutableHashSet.Create(MemberStatus.WeaklyUp, MemberStatus.Up,MemberStatus.Leaving, MemberStatus.Down, MemberStatus.Removed)}, + {MemberStatus.WeaklyUp, ImmutableHashSet.Create(MemberStatus.Up, MemberStatus.Leaving, MemberStatus.Down, MemberStatus.Removed) }, {MemberStatus.Up, ImmutableHashSet.Create(MemberStatus.Leaving, MemberStatus.Down, MemberStatus.Removed)}, {MemberStatus.Leaving, ImmutableHashSet.Create(MemberStatus.Exiting, MemberStatus.Down, MemberStatus.Removed)}, {MemberStatus.Down, ImmutableHashSet.Create(MemberStatus.Removed)}, @@ -399,7 +399,7 @@ public static Member HighestPriorityOf(Member m1, Member m2) /// /// Defines the current status of a cluster member node - /// + /// /// Can be one of: Joining, Up, WeaklyUp, Leaving, Exiting and Down. /// public enum MemberStatus diff --git a/src/core/Akka.MultiNodeTestRunner.Shared.Tests/TestRunCoordinatorSpec.cs b/src/core/Akka.MultiNodeTestRunner.Shared.Tests/TestRunCoordinatorSpec.cs index 23a85311c89..240814dd67d 100644 --- a/src/core/Akka.MultiNodeTestRunner.Shared.Tests/TestRunCoordinatorSpec.cs +++ b/src/core/Akka.MultiNodeTestRunner.Shared.Tests/TestRunCoordinatorSpec.cs @@ -43,7 +43,7 @@ public void TestRunCoordinator_should_start_and_route_messages_to_SpecRunCoordin testRunCoordinator.Tell(new EndTestRun(), TestActor); var testRunData = ExpectMsg(); - Assert.Equal(1, testRunData.Specs.Count()); + Assert.Single(testRunData.Specs); var specMessages = new SortedSet(); foreach (var spec in testRunData.Specs) diff --git a/src/core/Akka.Persistence.TCK.Tests/MemorySnapshotStoreSpec.cs b/src/core/Akka.Persistence.TCK.Tests/MemorySnapshotStoreSpec.cs index fcab9ae6698..efcb78c77f4 100644 --- a/src/core/Akka.Persistence.TCK.Tests/MemorySnapshotStoreSpec.cs +++ b/src/core/Akka.Persistence.TCK.Tests/MemorySnapshotStoreSpec.cs @@ -5,8 +5,17 @@ // //----------------------------------------------------------------------- +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Akka.Actor; using Akka.Configuration; +using Akka.Persistence.Snapshot; using Akka.Persistence.TCK.Snapshot; +using Akka.Util; +using FluentAssertions; +using Xunit; using Xunit.Abstractions; namespace Akka.Persistence.TCK.Tests @@ -23,5 +32,129 @@ public MemorySnapshotStoreSpec(ITestOutputHelper output) Initialize(); } + [Fact] + public void MemorySnapshotStore_is_threadsafe() + { + EventFilter.Error().Expect(0, () => + { + // get a few persistent actors going in parallel + var sa1 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa1", TestActor))); + var sa2 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa2", TestActor))); + var sa3 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa3", TestActor))); + + Watch(sa1); + Watch(sa2); + Watch(sa3); + + var writeCount = 3000; + + var sas = new List + { + sa1, + sa2, + sa3 + }; + + // hammer with write requests + Parallel.ForEach(Enumerable.Range(0, writeCount), i => + { + sas[ThreadLocalRandom.Current.Next(0, 3)].Tell(i); + }); + + // spawn more persistence actors while writes are still going(?) + var sa4 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa4", TestActor))); + var sa5 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa5", TestActor))); + var sa6 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa6", TestActor))); + + ReceiveN(writeCount).All(x => x is SaveSnapshotSuccess).Should().BeTrue("Expected all snapshot store saves to be successful, but some were not"); + + // kill the existing snapshot stores, then re-create them to force recovery while the new snapshot actors + // are still being written to. + + sa1.Tell(PoisonPill.Instance); + ExpectTerminated(sa1); + + sa2.Tell(PoisonPill.Instance); + ExpectTerminated(sa2); + + sa3.Tell(PoisonPill.Instance); + ExpectTerminated(sa3); + + var sas2 = new List + { + sa4, + sa5, + sa6 + }; + + // hammer with write requests + Parallel.ForEach(Enumerable.Range(0, writeCount), i => + { + sas2[ThreadLocalRandom.Current.Next(0, 3)].Tell(i); + }); + + // recreate the previous entities + var sa12 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa1", TestActor))); + var sa22 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa2", TestActor))); + var sa32 = Sys.ActorOf(Props.Create(() => new SnapshotActor("sa3", TestActor))); + + var sas12 = new List + { + sa12, + sa22, + sa32 + }; + + // hammer other entities + Parallel.ForEach(Enumerable.Range(0, writeCount), i => + { + sas12[ThreadLocalRandom.Current.Next(0, 3)].Tell(i); + }); + + ReceiveN(writeCount*2).All(x => x is SaveSnapshotSuccess).Should().BeTrue("Expected all snapshot store saves to be successful, but some were not"); + }); + + } + + public class SnapshotActor : ReceivePersistentActor + { + private int _count = 0; + private readonly IActorRef _reporter; + + public SnapshotActor(string persistenceId, IActorRef reporter) + { + PersistenceId = persistenceId; + _reporter = reporter; + + Recover(offer => + { + if (offer.Snapshot is int i) + { + _count = i; + } + }); + + Command(i => + { + _count += i; + + Persist(i, i1 => + { + SaveSnapshot(i); + }); + }); + + Command(success => reporter.Tell(success)); + + Command(failure => reporter.Tell(failure)); + + Command(str => str.Equals("get"), s => + { + Sender.Tell(_count); + }); + } + + public override string PersistenceId { get; } + } } } diff --git a/src/core/Akka.Persistence.Tests/OptimizedRecoverySpec.cs b/src/core/Akka.Persistence.Tests/OptimizedRecoverySpec.cs new file mode 100644 index 00000000000..09e78f3e443 --- /dev/null +++ b/src/core/Akka.Persistence.Tests/OptimizedRecoverySpec.cs @@ -0,0 +1,145 @@ +using System; +using Akka.Actor; +using Xunit; + +namespace Akka.Persistence.Tests +{ + public class OptimizedRecoverySpec : PersistenceSpec + { + + #region Internal test classes + + internal class TakeSnapshot + { } + + internal class Save + { + public string Data { get; } + + public Save(string data) + { + Data = data; + } + } + + internal class Saved : IEquatable + { + public string Data { get; } + public long SeqNr { get; } + + public Saved(string data, long seqNr) + { + Data = data; + SeqNr = seqNr; + } + + public bool Equals(Saved other) + { + return other != null && Data.Equals(other.Data) && SeqNr.Equals(other.SeqNr); + } + } + + internal sealed class PersistFromRecoveryCompleted + { + public static PersistFromRecoveryCompleted Instance { get; } = new PersistFromRecoveryCompleted(); + private PersistFromRecoveryCompleted() { } + } + + internal class TestPersistentActor : NamedPersistentActor + { + private readonly Recovery _recovery; + private readonly IActorRef _probe; + private string state = string.Empty; + + public override Recovery Recovery => _recovery; + + public TestPersistentActor(string name, Recovery recovery, IActorRef probe) + : base(name) + { + _recovery = recovery; + _probe = probe; + } + + protected override bool ReceiveCommand(object message) + { + switch (message) + { + case TakeSnapshot _: + SaveSnapshot(state); + return true; + case SaveSnapshotSuccess s: + _probe.Tell(s); + return true; + case GetState _: + _probe.Tell(state); + return true; + case Save s: + Persist(new Saved(s.Data, LastSequenceNr + 1), evt => + { + state = state + evt.Data; + _probe.Tell(evt); + }); + return true; + } + return false; + } + + protected override bool ReceiveRecover(object message) + { + switch (message) + { + case SnapshotOffer s: + _probe.Tell(s); + state = s.Snapshot.ToString(); + return true; + case Saved evt: + state = state + evt.Data; + _probe.Tell(evt); + return true; + case RecoveryCompleted _: + if (IsRecovering) throw new InvalidOperationException($"Expected !IsRecovering in RecoveryCompleted"); + _probe.Tell(RecoveryCompleted.Instance); + // Verify that persist can be used here + Persist(PersistFromRecoveryCompleted.Instance, _ => _probe.Tell(PersistFromRecoveryCompleted.Instance)); + return true; + } + return false; + } + } + + #endregion + + private string persistenceId = "p1"; + + public OptimizedRecoverySpec() : base(Configuration("OptimizedRecoverySpec")) + { + var pref = ActorOf(Props.Create(() => new TestPersistentActor(persistenceId, Recovery.Default, TestActor))); + ExpectMsg(); + ExpectMsg(); + pref.Tell(new Save("a")); + pref.Tell(new Save("b")); + ExpectMsg(new Saved("a", 2)); + ExpectMsg(new Saved("b", 3)); + pref.Tell(new TakeSnapshot()); + ExpectMsg(); + pref.Tell(new Save("c")); + ExpectMsg(new Saved("c", 4)); + pref.Tell(GetState.Instance); + ExpectMsg("abc"); + } + + [Fact] + public void Persistence_must_get_RecoveryCompleted_but_no_SnapshotOffer_and_events_when_Recovery_none() + { + var pref = ActorOf(Props.Create(() => new TestPersistentActor(persistenceId, Recovery.None, TestActor))); + ExpectMsg(); + ExpectMsg(); + + // and highest sequence number should be used, PersistFromRecoveryCompleted is 5 + pref.Tell(new Save("d")); + ExpectMsg(new Saved("d", 6)); + pref.Tell(GetState.Instance); + ExpectMsg("d"); + } + } +} diff --git a/src/core/Akka.Persistence.Tests/PersistentActorSpec.Actors.cs b/src/core/Akka.Persistence.Tests/PersistentActorSpec.Actors.cs index 784c4b8a4aa..48f7c553696 100644 --- a/src/core/Akka.Persistence.Tests/PersistentActorSpec.Actors.cs +++ b/src/core/Akka.Persistence.Tests/PersistentActorSpec.Actors.cs @@ -1121,6 +1121,45 @@ protected override bool ReceiveCommand(object message) return false; } } + + internal class PersistInRecovery : ExamplePersistentActor + { + public PersistInRecovery(string name) + : base(name) + { } + + protected override bool ReceiveRecover(object message) + { + switch (message) + { + case Evt evt when evt.Data?.ToString() == "invalid": + Persist(new Evt("invalid-recovery"), UpdateStateHandler); + return true; + case Evt evt: + return UpdateState(evt); + case RecoveryCompleted _: + PersistAsync(new Evt("rc-1"), UpdateStateHandler); + Persist(new Evt("rc-2"), UpdateStateHandler); + PersistAsync(new Evt("rc-3"), UpdateStateHandler); + return true; + } + + return false; + } + + protected override bool ReceiveCommand(object message) + { + if (CommonBehavior(message)) return true; + + if (message is Cmd cmd) + { + Persist(new Evt(cmd.Data), UpdateStateHandler); + return true; + } + + return false; + } + } } } diff --git a/src/core/Akka.Persistence.Tests/PersistentActorSpec.cs b/src/core/Akka.Persistence.Tests/PersistentActorSpec.cs index 8bc7ea38bc4..d05abc5ae15 100644 --- a/src/core/Akka.Persistence.Tests/PersistentActorSpec.cs +++ b/src/core/Akka.Persistence.Tests/PersistentActorSpec.cs @@ -611,5 +611,21 @@ public void PersistentActor_should_brecover_the_message_which_caused_the_restart persistentActor.Tell("boom"); ExpectMsg("failed with TestException while processing boom"); } + + [Fact] + public void PersistentActor_should_be_able_to_persist_events_that_happen_during_recovery() + { + var persistentActor = ActorOf(Props.Create(() => new PersistInRecovery(Name))); + persistentActor.Tell(GetState.Instance); + ExpectMsgInOrder("a-1", "a-2", "rc-1", "rc-2"); + persistentActor.Tell(GetState.Instance); + ExpectMsgInOrder("a-1", "a-2", "rc-1", "rc-2", "rc-3"); + persistentActor.Tell(new Cmd("invalid")); + persistentActor.Tell(GetState.Instance); + ExpectMsgInOrder("a-1", "a-2", "rc-1", "rc-2", "rc-3", "invalid"); + Watch(persistentActor); + persistentActor.Tell("boom"); + ExpectTerminated(persistentActor); + } } } diff --git a/src/core/Akka.Persistence.Tests/SnapshotSpec.cs b/src/core/Akka.Persistence.Tests/SnapshotSpec.cs index 6f8bcf67472..9ce043fd0d6 100644 --- a/src/core/Akka.Persistence.Tests/SnapshotSpec.cs +++ b/src/core/Akka.Persistence.Tests/SnapshotSpec.cs @@ -111,6 +111,51 @@ public override Recovery Recovery } } + internal class IgnoringSnapshotTestPersistentActor : NamedPersistentActor + { + private readonly Recovery _recovery; + private readonly IActorRef _probe; + + public IgnoringSnapshotTestPersistentActor(string name, Recovery recovery, IActorRef probe) + : base(name) + { + _probe = probe; + _recovery = recovery; + } + + protected override bool ReceiveRecover(object message) + { + switch(message) + { + case string payload: + _probe.Tell($"{payload}-{LastSequenceNr}"); + return true; + case object other when !(other is SnapshotOffer): + _probe.Tell(other); + return true; + } + return false; + } + + protected override bool ReceiveCommand(object message) + { + switch(message) + { + case string payload when payload == "done": + _probe.Tell("done"); + return true; + case string payload: + Persist(payload, _ => _probe.Tell($"{payload}-{LastSequenceNr}")); + return true; + default: + _probe.Tell(message); + return true; + } + } + + public override Recovery Recovery => _recovery; + } + public sealed class DeleteOne { public DeleteOne(SnapshotMetadata metadata) @@ -185,6 +230,21 @@ public void PersistentActor_should_recover_state_starting_from_the_most_recent_s ExpectMsg(); } + [Fact] + public void PersistentActor_should_recover_completely_if_snapshot_is_not_handled() + { + var pref = ActorOf(() => new IgnoringSnapshotTestPersistentActor(Name, new Recovery(), TestActor)); + var persistenceId = Name; + + ExpectMsg("a-1"); + ExpectMsg("b-2"); + ExpectMsg("c-3"); + ExpectMsg("d-4"); + ExpectMsg("e-5"); + ExpectMsg("f-6"); + ExpectMsg(); + } + [Fact] public void PersistentActor_should_recover_state_starting_from_the_most_recent_snapshot_matching_an_upper_sequence_number_bound() { diff --git a/src/core/Akka.Persistence/Eventsourced.Recovery.cs b/src/core/Akka.Persistence/Eventsourced.Recovery.cs index e2252eeac47..cd0e94f6e08 100644 --- a/src/core/Akka.Persistence/Eventsourced.Recovery.cs +++ b/src/core/Akka.Persistence/Eventsourced.Recovery.cs @@ -17,7 +17,7 @@ namespace Akka.Persistence internal class EventsourcedState { - public EventsourcedState(string name, bool isRecoveryRunning, StateReceive stateReceive) + public EventsourcedState(string name, Func isRecoveryRunning, StateReceive stateReceive) { Name = name; IsRecoveryRunning = isRecoveryRunning; @@ -25,9 +25,7 @@ public EventsourcedState(string name, bool isRecoveryRunning, StateReceive state } public string Name { get; } - - public bool IsRecoveryRunning { get; } - + public Func IsRecoveryRunning { get; } public StateReceive StateReceive { get; } public override string ToString() => Name; @@ -47,7 +45,7 @@ public abstract partial class Eventsourced /// private EventsourcedState WaitingRecoveryPermit(Recovery recovery) { - return new EventsourcedState("waiting for recovery permit", true, (receive, message) => + return new EventsourcedState("waiting for recovery permit", () => true, (receive, message) => { if (message is RecoveryPermitGranted) StartRecovery(recovery); @@ -59,7 +57,7 @@ private EventsourcedState WaitingRecoveryPermit(Recovery recovery) /// /// Processes a loaded snapshot, if any. A loaded snapshot is offered with a /// message to the actor's . Then initiates a message replay, either starting - /// from the loaded snapshot or from scratch, and switches to state. + /// from the loaded snapshot or from scratch, and switches to state. /// All incoming messages are stashed. /// /// Maximum number of messages to replay @@ -81,7 +79,7 @@ private EventsourcedState RecoveryStarted(long maxReplays) else return false; }; - return new EventsourcedState("recovery started - replay max: " + maxReplays, true, (receive, message) => + return new EventsourcedState("recovery started - replay max: " + maxReplays, () => true, (receive, message) => { try { @@ -90,10 +88,14 @@ private EventsourcedState RecoveryStarted(long maxReplays) timeoutCancelable.Cancel(); if (res.Snapshot != null) { - var snapshot = res.Snapshot; - LastSequenceNr = snapshot.Metadata.SequenceNr; - // Since we are recovering we can ignore the receive behavior from the stack - base.AroundReceive(recoveryBehavior, new SnapshotOffer(snapshot.Metadata, snapshot.Snapshot)); + var offer = new SnapshotOffer(res.Snapshot.Metadata, res.Snapshot.Snapshot); + var seqNr = LastSequenceNr; + LastSequenceNr = res.Snapshot.Metadata.SequenceNr; + if (!base.AroundReceive(recoveryBehavior, offer)) + { + LastSequenceNr = seqNr; + Unhandled(offer); + } } ChangeState(Recovering(recoveryBehavior, timeout)); @@ -139,11 +141,6 @@ private EventsourcedState RecoveryStarted(long maxReplays) }); } - private void ReturnRecoveryPermit() - { - Extension.RecoveryPermitter().Tell(Akka.Persistence.ReturnRecoveryPermit.Instance, Self); - } - /// /// Processes replayed messages, if any. The actor's is invoked with the replayed events. /// @@ -159,85 +156,89 @@ private EventsourcedState Recovering(Receive recoveryBehavior, TimeSpan timeout) // protect against event replay stalling forever because of journal overloaded and such var timeoutCancelable = Context.System.Scheduler.ScheduleTellRepeatedlyCancelable(timeout, timeout, Self, new RecoveryTick(false), Self); var eventSeenInInterval = false; + var recoveryRunning = true; - return new EventsourcedState("replay started", true, (receive, message) => + return new EventsourcedState("replay started", () => recoveryRunning, (receive, message) => { try { - if (message is ReplayedMessage) + switch (message) { - var m = (ReplayedMessage)message; - try - { - eventSeenInInterval = true; - UpdateLastSequenceNr(m.Persistent); - base.AroundReceive(recoveryBehavior, m.Persistent); - } - catch (Exception cause) - { + case ReplayedMessage replayed: + try + { + eventSeenInInterval = true; + UpdateLastSequenceNr(replayed.Persistent); + base.AroundReceive(recoveryBehavior, replayed.Persistent); + } + catch (Exception cause) + { + timeoutCancelable.Cancel(); + try + { + OnRecoveryFailure(cause, replayed.Persistent.Payload); + } + finally + { + Context.Stop(Self); + } + ReturnRecoveryPermit(); + } + break; + case RecoverySuccess success: timeoutCancelable.Cancel(); + OnReplaySuccess(); + _sequenceNr = success.HighestSequenceNr; + LastSequenceNr = success.HighestSequenceNr; + recoveryRunning = false; try { - OnRecoveryFailure(cause, m.Persistent.Payload); + base.AroundReceive(recoveryBehavior, RecoveryCompleted.Instance); } finally { - Context.Stop(Self); + // in finally in case exception and resume strategy + TransitToProcessingState(); } ReturnRecoveryPermit(); - } - } - else if (message is RecoverySuccess) - { - var m = (RecoverySuccess)message; - timeoutCancelable.Cancel(); - OnReplaySuccess(); - ChangeState(ProcessingCommands()); - _sequenceNr = m.HighestSequenceNr; - LastSequenceNr = m.HighestSequenceNr; - _internalStash.UnstashAll(); - - base.AroundReceive(recoveryBehavior, RecoveryCompleted.Instance); - ReturnRecoveryPermit(); - } - else if (message is ReplayMessagesFailure) - { - var failure = (ReplayMessagesFailure)message; - timeoutCancelable.Cancel(); - try - { - OnRecoveryFailure(failure.Cause, message: null); - } - finally - { - Context.Stop(Self); - } - ReturnRecoveryPermit(); - } - else if (message is RecoveryTick tick && !tick.Snapshot) - { - if (!eventSeenInInterval) - { + break; + case ReplayMessagesFailure failure: timeoutCancelable.Cancel(); try { - OnRecoveryFailure( - new RecoveryTimedOutException( - $"Recovery timed out, didn't get event within {timeout.TotalSeconds}s, highest sequence number seen {_sequenceNr}.")); + OnRecoveryFailure(failure.Cause); } finally { Context.Stop(Self); } ReturnRecoveryPermit(); - } - else - { - eventSeenInInterval = false; - } + break; + case RecoveryTick tick when !tick.Snapshot: + if (!eventSeenInInterval) + { + timeoutCancelable.Cancel(); + try + { + OnRecoveryFailure( + new RecoveryTimedOutException( + $"Recovery timed out, didn't get event within {timeout.TotalSeconds}s, highest sequence number seen {LastSequenceNr}.")); + } + finally + { + Context.Stop(Self); + } + ReturnRecoveryPermit(); + } + else + { + eventSeenInInterval = false; + } + break; + default: + StashInternally(message); + break; } - else - StashInternally(message); } catch (Exception) { @@ -247,13 +248,31 @@ private EventsourcedState Recovering(Receive recoveryBehavior, TimeSpan timeout) }); } + private void ReturnRecoveryPermit() => + Extension.RecoveryPermitter().Tell(Akka.Persistence.ReturnRecoveryPermit.Instance, Self); + + private void TransitToProcessingState() + { + if (_eventBatch.Count > 0) FlushBatch(); + + if (_pendingStashingPersistInvocations > 0) + { + ChangeState(PersistingEvents()); + } + else + { + ChangeState(ProcessingCommands()); + _internalStash.UnstashAll(); + } + } + /// - /// If event persistence is pending after processing a command, event persistence + /// Command processing state. If event persistence is pending after processing a command, event persistence /// is triggered and the state changes to . /// private EventsourcedState ProcessingCommands() { - return new EventsourcedState("processing commands", false, (receive, message) => + return new EventsourcedState("processing commands", () => false, (receive, message) => { var handled = CommonProcessingStateBehavior(message, err => { @@ -305,12 +324,12 @@ private void FlushBatch() } /// - /// Remains until pending events are persisted and then changes state to . + /// Event persisting state. Remains until pending events are persisted and then changes state to . /// Only events to be persisted are processed. All other messages are stashed internally. /// private EventsourcedState PersistingEvents() { - return new EventsourcedState("persisting events", false, (receive, message) => + return new EventsourcedState("persisting events", () => false, (receive, message) => { var handled = CommonProcessingStateBehavior(message, err => { diff --git a/src/core/Akka.Persistence/Eventsourced.cs b/src/core/Akka.Persistence/Eventsourced.cs index 72d2767f2a2..8033f49de11 100644 --- a/src/core/Akka.Persistence/Eventsourced.cs +++ b/src/core/Akka.Persistence/Eventsourced.cs @@ -19,7 +19,6 @@ namespace Akka.Persistence public interface IPendingHandlerInvocation { object Event { get; } - Action Handler { get; } } @@ -180,7 +179,7 @@ public IStash Stash /// /// Returns true if this persistent entity is currently recovering. /// - public bool IsRecovering => _currentState?.IsRecoveryRunning ?? true; + public bool IsRecovering => _currentState?.IsRecoveryRunning() ?? true; /// /// Returns true if this persistent entity has successfully finished recovery. @@ -297,6 +296,11 @@ public void DeleteSnapshots(SnapshotSelectionCriteria criteria) /// TBD public void Persist(TEvent @event, Action handler) { + if (IsRecovering) + { + throw new InvalidOperationException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later."); + } + _pendingStashingPersistInvocations++; _pendingInvocations.AddLast(new StashingHandlerInvocation(@event, o => handler((TEvent)o))); _eventBatch.AddFirst(new AtomicWrite(new Persistent(@event, persistenceId: PersistenceId, @@ -313,17 +317,23 @@ public void Persist(TEvent @event, Action handler) /// TBD public void PersistAll(IEnumerable events, Action handler) { + if (IsRecovering) + { + throw new InvalidOperationException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later."); + } + if (events == null) return; - Action inv = o => handler((TEvent)o); + void Inv(object o) => handler((TEvent)o); var persistents = ImmutableList.Empty.ToBuilder(); foreach (var @event in events) { _pendingStashingPersistInvocations++; - _pendingInvocations.AddLast(new StashingHandlerInvocation(@event, inv)); + _pendingInvocations.AddLast(new StashingHandlerInvocation(@event, Inv)); persistents.Add(new Persistent(@event, persistenceId: PersistenceId, sequenceNr: NextSequenceNr(), writerGuid: _writerGuid, sender: Sender)); } + if (persistents.Count > 0) _eventBatch.AddFirst(new AtomicWrite(persistents.ToImmutable())); } @@ -358,6 +368,11 @@ public void PersistAll(IEnumerable events, Action handle /// TBD public void PersistAsync(TEvent @event, Action handler) { + if (IsRecovering) + { + throw new InvalidOperationException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later."); + } + _pendingInvocations.AddLast(new AsyncHandlerInvocation(@event, o => handler((TEvent)o))); _eventBatch.AddFirst(new AtomicWrite(new Persistent(@event, persistenceId: PersistenceId, sequenceNr: NextSequenceNr(), writerGuid: _writerGuid, sender: Sender))); @@ -373,13 +388,20 @@ public void PersistAsync(TEvent @event, Action handler) /// TBD public void PersistAllAsync(IEnumerable events, Action handler) { - Action inv = o => handler((TEvent)o); - foreach (var @event in events) + if (IsRecovering) + { + throw new InvalidOperationException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later."); + } + + void Inv(object o) => handler((TEvent)o); + var enumerable = events as TEvent[] ?? events.ToArray(); + foreach (var @event in enumerable) { - _pendingInvocations.AddLast(new AsyncHandlerInvocation(@event, inv)); + _pendingInvocations.AddLast(new AsyncHandlerInvocation(@event, Inv)); } - _eventBatch.AddFirst(new AtomicWrite(events.Select(e => new Persistent(e, persistenceId: PersistenceId, - sequenceNr: NextSequenceNr(), writerGuid: _writerGuid, sender: Sender)) + + _eventBatch.AddFirst(new AtomicWrite(enumerable.Select(e => new Persistent(e, persistenceId: PersistenceId, + sequenceNr: NextSequenceNr(), writerGuid: _writerGuid, sender: Sender)) .ToImmutableList())); } @@ -406,6 +428,11 @@ public void PersistAllAsync(IEnumerable events, Action h /// TBD public void DeferAsync(TEvent evt, Action handler) { + if (IsRecovering) + { + throw new InvalidOperationException("Cannot persist during replay. Events can be persisted when receiving RecoveryCompleted or later."); + } + if (_pendingInvocations.Count == 0) { handler(evt); @@ -443,7 +470,7 @@ protected virtual void OnRecoveryFailure(Exception reason, object message = null if (message != null) { Log.Error(reason, "Exception in ReceiveRecover when replaying event type [{0}] with sequence number [{1}] for persistenceId [{2}]", - message.GetType(), LastSequenceNr, PersistenceId); + message.GetType(), LastSequenceNr, PersistenceId); } else { @@ -562,7 +589,7 @@ private void StashInternally(object currentMessage) { _internalStash.Stash(); } - catch(StashOverflowException e) + catch (StashOverflowException e) { var strategy = InternalStashOverflowStrategy; if (strategy is DiscardToDeadLetterStrategy) diff --git a/src/core/Akka.Persistence/Journal/AsyncWriteJournal.cs b/src/core/Akka.Persistence/Journal/AsyncWriteJournal.cs index 2409abba872..a8fca9fd23b 100644 --- a/src/core/Akka.Persistence/Journal/AsyncWriteJournal.cs +++ b/src/core/Akka.Persistence/Journal/AsyncWriteJournal.cs @@ -252,7 +252,7 @@ private void HandleReplayMessages(ReplayMessages message) { var highSequenceNr = t.Result; var toSequenceNr = Math.Min(message.ToSequenceNr, highSequenceNr); - if (highSequenceNr == 0L || message.FromSequenceNr > toSequenceNr) + if (toSequenceNr <= 0L || message.FromSequenceNr > toSequenceNr) { promise.SetResult(highSequenceNr); } diff --git a/src/core/Akka.Persistence/PersistentActor.cs b/src/core/Akka.Persistence/PersistentActor.cs index 6aa1421e10c..084f00aca8a 100644 --- a/src/core/Akka.Persistence/PersistentActor.cs +++ b/src/core/Akka.Persistence/PersistentActor.cs @@ -55,6 +55,10 @@ public sealed class Recovery /// /// Convenience method for skipping recovery in . + /// + /// It will still retrieve previously highest sequence number so that new events are persisted with + /// higher sequence numbers rather than starting from 1 and assuming that there are no + /// previous event with that sequence number. /// public static Recovery None { get; } = new Recovery(SnapshotSelectionCriteria.None, 0); diff --git a/src/core/Akka.Persistence/Snapshot/MemorySnapshotStore.cs b/src/core/Akka.Persistence/Snapshot/MemorySnapshotStore.cs index bb3a14af26c..2ea22db2176 100644 --- a/src/core/Akka.Persistence/Snapshot/MemorySnapshotStore.cs +++ b/src/core/Akka.Persistence/Snapshot/MemorySnapshotStore.cs @@ -9,69 +9,71 @@ using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; +using Akka.Util.Internal; namespace Akka.Persistence.Snapshot { + /// + /// INTERNAL API. + /// + /// In-memory SnapshotStore implementation. + /// public class MemorySnapshotStore : SnapshotStore { - private readonly List _snapshotCollection = new List(); - /// /// This is available to expose/override the snapshots in derived snapshot stores /// - protected virtual List Snapshots { get { return _snapshotCollection; } } + protected virtual List Snapshots { get; } = new List(); protected override Task DeleteAsync(SnapshotMetadata metadata) { - Func pred = x => x.PersistenceId == metadata.PersistenceId && - (metadata.SequenceNr <= 0 || metadata.SequenceNr == long.MaxValue || x.SequenceNr == metadata.SequenceNr) && - (metadata.Timestamp == DateTime.MinValue || metadata.Timestamp == DateTime.MaxValue || x.Timestamp == metadata.Timestamp.Ticks); + bool Pred(SnapshotEntry x) => x.PersistenceId == metadata.PersistenceId && (metadata.SequenceNr <= 0 || metadata.SequenceNr == long.MaxValue || x.SequenceNr == metadata.SequenceNr) + && (metadata.Timestamp == DateTime.MinValue || metadata.Timestamp == DateTime.MaxValue || x.Timestamp == metadata.Timestamp.Ticks); + - return Task.Run(() => - { - var snapshot = Snapshots.FirstOrDefault(pred); - Snapshots.Remove(snapshot); - }); + var snapshot = Snapshots.FirstOrDefault(Pred); + Snapshots.Remove(snapshot); + + return TaskEx.Completed; } protected override Task DeleteAsync(string persistenceId, SnapshotSelectionCriteria criteria) { var filter = CreateRangeFilter(persistenceId, criteria); - return Task.Run(() => { Snapshots.RemoveAll(x => filter(x)); }); + Snapshots.RemoveAll(x => filter(x)); + return TaskEx.Completed; } protected override Task LoadAsync(string persistenceId, SnapshotSelectionCriteria criteria) { var filter = CreateRangeFilter(persistenceId, criteria); - return Task.Run(() => - { - var snapshot = Snapshots.Where(filter).OrderByDescending(x => x.SequenceNr).Take(1).Select(x => ToSelectedSnapshot(x)).FirstOrDefault(); - return snapshot; - }); + + var snapshot = Snapshots.Where(filter).OrderByDescending(x => x.SequenceNr).Take(1).Select(x => ToSelectedSnapshot(x)).FirstOrDefault(); + return Task.FromResult(snapshot); } - protected override async Task SaveAsync(SnapshotMetadata metadata, object snapshot) + protected override Task SaveAsync(SnapshotMetadata metadata, object snapshot) { - await Task.Run(() => + + var snapshotEntry = ToSnapshotEntry(metadata, snapshot); + var existingSnapshot = Snapshots.FirstOrDefault(CreateSnapshotIdFilter(snapshotEntry.Id)); + + if (existingSnapshot != null) { - var snapshotEntry = ToSnapshotEntry(metadata, snapshot); - var existingSnapshot = Snapshots.FirstOrDefault(CreateSnapshotIdFilter(snapshotEntry.Id)); - - if (existingSnapshot != null) - { - existingSnapshot.Snapshot = snapshotEntry.Snapshot; - existingSnapshot.Timestamp = snapshotEntry.Timestamp; - } - else - { - Snapshots.Add(snapshotEntry); - } - }); + existingSnapshot.Snapshot = snapshotEntry.Snapshot; + existingSnapshot.Timestamp = snapshotEntry.Timestamp; + } + else + { + Snapshots.Add(snapshotEntry); + } + + return TaskEx.Completed; } - private Func CreateSnapshotIdFilter(string snapshotId) + private static Func CreateSnapshotIdFilter(string snapshotId) { return x => x.Id == snapshotId; } @@ -101,6 +103,11 @@ private static SelectedSnapshot ToSelectedSnapshot(SnapshotEntry entry) } } + /// + /// INTERNAL API. + /// + /// Represents a snapshot stored inside the in-memory + /// public class SnapshotEntry { public string Id { get; set; } diff --git a/src/core/Akka.Persistence/Snapshot/SnapshotStore.cs b/src/core/Akka.Persistence/Snapshot/SnapshotStore.cs index acb094c8cad..d477bd69743 100644 --- a/src/core/Akka.Persistence/Snapshot/SnapshotStore.cs +++ b/src/core/Akka.Persistence/Snapshot/SnapshotStore.cs @@ -56,14 +56,21 @@ private bool ReceiveSnapshotStore(object message) if (message is LoadSnapshot loadSnapshot) { - _breaker.WithCircuitBreaker(() => LoadAsync(loadSnapshot.PersistenceId, loadSnapshot.Criteria.Limit(loadSnapshot.ToSequenceNr))) - .ContinueWith(t => (!t.IsFaulted && !t.IsCanceled) - ? new LoadSnapshotResult(t.Result, loadSnapshot.ToSequenceNr) as ISnapshotResponse - : new LoadSnapshotFailed(t.IsFaulted - ? TryUnwrapException(t.Exception) - : new OperationCanceledException("LoadAsync canceled, possibly due to timing out.")), - _continuationOptions) - .PipeTo(senderPersistentActor); + if (loadSnapshot.Criteria == SnapshotSelectionCriteria.None) + { + senderPersistentActor.Tell(new LoadSnapshotResult(null, loadSnapshot.ToSequenceNr)); + } + else + { + _breaker.WithCircuitBreaker(() => LoadAsync(loadSnapshot.PersistenceId, loadSnapshot.Criteria.Limit(loadSnapshot.ToSequenceNr))) + .ContinueWith(t => (!t.IsFaulted && !t.IsCanceled) + ? new LoadSnapshotResult(t.Result, loadSnapshot.ToSequenceNr) as ISnapshotResponse + : new LoadSnapshotFailed(t.IsFaulted + ? TryUnwrapException(t.Exception) + : new OperationCanceledException("LoadAsync canceled, possibly due to timing out.")), + _continuationOptions) + .PipeTo(senderPersistentActor); + } } else if (message is SaveSnapshot saveSnapshot) { diff --git a/src/core/Akka.Remote.Tests/RemoteConfigSpec.cs b/src/core/Akka.Remote.Tests/RemoteConfigSpec.cs index 8f91ea11e0a..436c7b6b397 100644 --- a/src/core/Akka.Remote.Tests/RemoteConfigSpec.cs +++ b/src/core/Akka.Remote.Tests/RemoteConfigSpec.cs @@ -37,7 +37,7 @@ public void Remoting_should_contain_correct_configuration_values_in_ReferenceCon Assert.False(remoteSettings.LogReceive); Assert.False(remoteSettings.LogSend); Assert.False(remoteSettings.UntrustedMode); - Assert.Equal(0, remoteSettings.TrustedSelectionPaths.Count); + Assert.Empty(remoteSettings.TrustedSelectionPaths); Assert.Equal(TimeSpan.FromSeconds(10), remoteSettings.ShutdownTimeout); Assert.Equal(TimeSpan.FromSeconds(2), remoteSettings.FlushWait); Assert.Equal(TimeSpan.FromSeconds(10), remoteSettings.StartupTimeout); @@ -52,7 +52,7 @@ public void Remoting_should_contain_correct_configuration_values_in_ReferenceCon Assert.Equal(TimeSpan.FromDays(5), remoteSettings.QuarantineDuration); Assert.Equal(TimeSpan.FromDays(5), remoteSettings.QuarantineSilentSystemTimeout); Assert.Equal(TimeSpan.FromSeconds(30), remoteSettings.CommandAckTimeout); - Assert.Equal(1, remoteSettings.Transports.Length); + Assert.Single(remoteSettings.Transports); Assert.Equal(typeof(TcpTransport), Type.GetType(remoteSettings.Transports.Head().TransportClass)); Assert.Equal(typeof(PhiAccrualFailureDetector), Type.GetType(remoteSettings.WatchFailureDetectorImplementationClass)); Assert.Equal(TimeSpan.FromSeconds(1), remoteSettings.WatchHeartBeatInterval); @@ -72,7 +72,7 @@ public void Remoting_should_contain_correct_configuration_values_in_ReferenceCon var remoteSettingsAdapters = remoteSettings.Adapters.Select(kv => new KeyValuePair(kv.Key, Type.GetType(kv.Value))); - Assert.Equal(0, remoteSettingsAdapters.Except(remoteSettingsAdaptersStandart).Count()); + Assert.Empty(remoteSettingsAdapters.Except(remoteSettingsAdaptersStandart)); remoteSettings.Config.GetString("akka.remote.log-frame-size-exceeding").ShouldBe("off"); } diff --git a/src/core/Akka.Remote.Tests/Transport/GenericTransportSpec.cs b/src/core/Akka.Remote.Tests/Transport/GenericTransportSpec.cs index aff5c8922f7..dc42fb2cff7 100644 --- a/src/core/Akka.Remote.Tests/Transport/GenericTransportSpec.cs +++ b/src/core/Akka.Remote.Tests/Transport/GenericTransportSpec.cs @@ -75,9 +75,7 @@ public void Transport_must_return_an_Address_and_promise_when_listen_is_called() Assert.Equal(addressA, result.Item1); Assert.NotNull(result.Item2); - Assert.True( - registry.LogSnapshot().OfType().Any(x => x.BoundAddress == addressATest) - ); + Assert.Contains(registry.LogSnapshot().OfType(), x => x.BoundAddress == addressATest); } [Fact] @@ -104,9 +102,7 @@ public void Transport_must_associate_successfully_with_another_transport_of_its_ return null; }); - Assert.True( - registry.LogSnapshot().OfType().Any(x => x.LocalAddress == addressATest && x.RemoteAddress == addressBTest) - ); + Assert.Contains(registry.LogSnapshot().OfType(), x => x.LocalAddress == addressATest && x.RemoteAddress == addressBTest); AwaitCondition(() => registry.ExistsAssociation(addressATest, addressBTest)); } @@ -169,9 +165,7 @@ public void Transport_must_successfully_send_PDUs() return null; }); - Assert.True( - registry.LogSnapshot().OfType().Any(x => x.Sender == addressATest && x.Recipient == addressBTest && x.Payload.Equals(pdu)) - ); + Assert.Contains(registry.LogSnapshot().OfType(), x => x.Sender == addressATest && x.Recipient == addressBTest && x.Payload.Equals(pdu)); } [Fact] diff --git a/src/core/Akka.Remote/RemoteWatcher.cs b/src/core/Akka.Remote/RemoteWatcher.cs index 1a5801e290d..8006f5a3f37 100644 --- a/src/core/Akka.Remote/RemoteWatcher.cs +++ b/src/core/Akka.Remote/RemoteWatcher.cs @@ -642,10 +642,13 @@ private void ProcessTerminated(IInternalActorRef watchee, bool existenceConfirme if (!addressTerminated) { - foreach (var watcher in Watching[watchee]) + if (Watching.TryGetValue(watchee, out var watchers)) { - // ReSharper disable once ConditionIsAlwaysTrueOrFalse - watcher.SendSystemMessage(new DeathWatchNotification(watchee, existenceConfirmed, addressTerminated)); + foreach (var watcher in watchers) + { + // ReSharper disable once ConditionIsAlwaysTrueOrFalse + watcher.SendSystemMessage(new DeathWatchNotification(watchee, existenceConfirmed, addressTerminated)); + } } } diff --git a/src/core/Akka.Streams.Tests/Dsl/FlowKillSwitchSpec.cs b/src/core/Akka.Streams.Tests/Dsl/FlowKillSwitchSpec.cs index f3c8d145502..aea802db223 100644 --- a/src/core/Akka.Streams.Tests/Dsl/FlowKillSwitchSpec.cs +++ b/src/core/Akka.Streams.Tests/Dsl/FlowKillSwitchSpec.cs @@ -8,6 +8,7 @@ using System; using System.Linq; +using System.Threading; using Akka.Streams.Dsl; using Akka.Streams.TestKit; using Akka.Streams.TestKit.Tests; @@ -28,6 +29,8 @@ public FlowKillSwitchSpec(ITestOutputHelper helper) : base(helper) Materializer = ActorMaterializer.Create(Sys); } + #region unique kill switch + [Fact] public void A_UniqueKillSwitch_must_stop_a_stream_if_requested() { @@ -123,6 +126,9 @@ public void A_UniqueKillSwitch_must_ignore_completion_after_already_completed() downstream.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); } + #endregion + + #region shared kill switch [Fact] public void A_SharedKillSwitch_must_stop_a_stream_if_requested() @@ -491,5 +497,236 @@ public void A_SharedKillSwitch_must_use_its_name_on_the_flows_it_hands_out() killSwitch.Flow().ToString().Should().Be("Flow(KillSwitch(MySwitchName))"); }, Materializer); } + + #endregion + + #region cancellable kill switch + + [Fact] + public void A_CancellationToken_flow_must_stop_a_stream_if_requested() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + + var t = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: true)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var upstream = t.Item1; + var downstream = t.Item2; + + downstream.Request(1); + upstream.SendNext(1); + downstream.ExpectNext(1); + + cancel.Cancel(); + upstream.ExpectCancellation(); + downstream.ExpectComplete(); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_fail_a_stream_if_requested() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + + var t = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: false)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var upstream = t.Item1; + var downstream = t.Item2; + + downstream.Request(1); + upstream.SendNext(1); + downstream.ExpectNext(1); + + cancel.Cancel(); + upstream.ExpectCancellation(); + downstream.ExpectError().Should().BeOfType(); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_pass_through_all_elements_unmodified() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + var task = Source.From(Enumerable.Range(1, 100)) + .Via(cancel.Token.AsFlow()) + .RunWith(Sink.Seq(), Materializer); + task.Wait(TimeSpan.FromSeconds(3)).Should().BeTrue(); + task.Result.ShouldAllBeEquivalentTo(Enumerable.Range(1, 100)); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_provide_a_flow_that_if_materialized_multiple_times_with_multiple_types_stops_all_streams_if_requested() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + + var t1 = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: true)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var t2 = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: true)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + + var upstream1 = t1.Item1; + var downstream1 = t1.Item2; + var upstream2 = t2.Item1; + var downstream2 = t2.Item2; + + downstream1.Request(1); + upstream1.SendNext(1); + downstream1.ExpectNext(1); + + downstream2.Request(2); + upstream2.SendNext("A").SendNext("B"); + downstream2.ExpectNext("A", "B"); + + cancel.Cancel(); + + upstream1.ExpectCancellation(); + upstream2.ExpectCancellation(); + downstream1.ExpectComplete(); + downstream2.ExpectComplete(); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_provide_a_flow_that_if_materialized_multiple_times_with_multiple_types_fails_all_streams_if_requested() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + + var t1 = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: false)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var t2 = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: false)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + + var upstream1 = t1.Item1; + var downstream1 = t1.Item2; + var upstream2 = t2.Item1; + var downstream2 = t2.Item2; + + downstream1.Request(1); + upstream1.SendNext(1); + downstream1.ExpectNext(1); + + downstream2.Request(2); + upstream2.SendNext("A").SendNext("B"); + downstream2.ExpectNext("A", "B"); + + cancel.Cancel(); + upstream1.ExpectCancellation(); + upstream2.ExpectCancellation(); + + downstream1.ExpectError().Should().BeOfType(); + downstream2.ExpectError().Should().BeOfType(); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_ignore_subsequent_aborts_and_shutdowns_after_shutdown() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + + var t = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: true)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var upstream = t.Item1; + var downstream = t.Item2; + + downstream.Request(1); + upstream.SendNext(1); + downstream.ExpectNext(1); + + cancel.Cancel(); + upstream.ExpectCancellation(); + downstream.ExpectComplete(); + + cancel.Cancel(); + upstream.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + downstream.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + + cancel.Cancel(); + upstream.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + downstream.ExpectNoMsg(TimeSpan.FromMilliseconds(100)); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_complete_immediately_flows_materialized_after_switch_shutdown() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + cancel.Cancel(); + + var t = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: true)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var upstream = t.Item1; + var downstream = t.Item2; + + upstream.ExpectCancellation(); + downstream.ExpectSubscriptionAndComplete(); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_must_fail_immediately_flows_materialized_after_switch_failure() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + cancel.Cancel(); + + var t = this.SourceProbe() + .Via(cancel.Token.AsFlow(cancelGracefully: false)) + .ToMaterialized(this.SinkProbe(), Keep.Both) + .Run(Materializer); + var upstream = t.Item1; + var downstream = t.Item2; + + upstream.ExpectCancellation(); + downstream.ExpectSubscriptionAndError().Should().BeOfType(); + }, Materializer); + } + + [Fact] + public void A_CancellationToken_flow_should_not_cause_problems_if_switch_is_shutdown_after_flow_completed_normally() + { + this.AssertAllStagesStopped(() => + { + var cancel = new CancellationTokenSource(); + var task = Source.From(Enumerable.Range(1, 10)) + .Via(cancel.Token.AsFlow(cancelGracefully: true)) + .RunWith(Sink.Seq(), Materializer); + task.Wait(TimeSpan.FromSeconds(3)).Should().BeTrue(); + task.Result.ShouldAllBeEquivalentTo(Enumerable.Range(1, 10)); + cancel.Cancel(); + }, Materializer); + } + + #endregion } } diff --git a/src/core/Akka.Streams.Tests/Dsl/IntervalBasedRateLimiterSpec.cs b/src/core/Akka.Streams.Tests/Dsl/IntervalBasedRateLimiterSpec.cs new file mode 100644 index 00000000000..bb5c0f2fe44 --- /dev/null +++ b/src/core/Akka.Streams.Tests/Dsl/IntervalBasedRateLimiterSpec.cs @@ -0,0 +1,137 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using System.Linq; +using System.Collections.Generic; +using Akka.Streams.Dsl; +using Akka.Streams.TestKit; +using FluentAssertions; +using Xunit; + +namespace Akka.Streams.Tests.Dsl +{ + public class IntervalBasedRateLimiterSpec : Akka.TestKit.Xunit2.TestKit + { + private readonly Source _infiniteSource = Source.From(Enumerable.Range(1, int.MaxValue - 1)); + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_low_1_element_per_500ms() + { + TestCase(source: _infiniteSource, + numOfElements: 6, + maxBatchSize: 1, + minInterval: TimeSpan.FromMilliseconds(500)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_medium_10_elements_per_100ms() + { + TestCase(source: _infiniteSource, + numOfElements: 300, + maxBatchSize: 10, + minInterval: TimeSpan.FromMilliseconds(100)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_moderate_20_elements_per_100ms() + { + TestCase(source: _infiniteSource, + numOfElements: 600, + maxBatchSize: 20, + minInterval: TimeSpan.FromMilliseconds(100)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_moderate_200_elements_per_1000ms() + { + TestCase(source: _infiniteSource, + numOfElements: 600, + maxBatchSize: 200, + minInterval: TimeSpan.FromMilliseconds(1000)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_high_200_elements_per_100ms() + { + TestCase(source: _infiniteSource, + numOfElements: 6000, + maxBatchSize: 200, + minInterval: TimeSpan.FromMilliseconds(100)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_high_2_000_elements_per_1000ms() + { + TestCase(source: _infiniteSource, + numOfElements: 6000, + maxBatchSize: 2000, + minInterval: TimeSpan.FromMilliseconds(1000)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_frequency_is_very_high_50_000_elements_per_1000ms() + { + TestCase(source: _infiniteSource, + numOfElements: 150000, + maxBatchSize: 50000, + minInterval: TimeSpan.FromMilliseconds(1000)); + } + + [Fact] + public void IntervalBasedRateLimiter_should_limit_rate_of_messages_when_source_is_slow() + { + var slowInfiniteSource = _infiniteSource.Throttle(1, TimeSpan.FromMilliseconds(300), 1, ThrottleMode.Shaping); + + TestCase(source: slowInfiniteSource, + numOfElements: 10, + maxBatchSize: 1, + minInterval: TimeSpan.FromMilliseconds(100)); + } + + private void TestCase(Source source, + int numOfElements, + int maxBatchSize, + TimeSpan minInterval) + { + var flow = source + .Take(numOfElements) + .Via(IntervalBasedRateLimiter.Create(minInterval, maxBatchSize)) + .Select(batch => Tuple.Create(DateTime.Now.Ticks, batch)) + .RunWith(this.SinkProbe>>(), Sys.Materializer()); + + var timestamps = new List(); + var batches = new List>(); + + void CollectTimestampsAndBatches() + { + flow.Request(1); + var e = flow.ExpectEvent(); + + if (e is TestSubscriber.OnNext>> onNext) + { + timestamps.Add(onNext.Element.Item1); + batches.Add(onNext.Element.Item2); + + CollectTimestampsAndBatches(); + } + } + + CollectTimestampsAndBatches(); + + var intervals = timestamps + .Take(timestamps.Count - 1) + .Zip(timestamps.Skip(1), (first, second) => TimeSpan.FromTicks(second - first)); + + foreach (var interval in intervals) + interval.Should().BeGreaterOrEqualTo(minInterval); + + batches.SelectMany(x => x).ShouldBeEquivalentTo(Enumerable.Range(1, numOfElements), o => o.WithStrictOrdering()); + batches.Count.Should().BeOneOf(numOfElements / maxBatchSize, numOfElements / maxBatchSize + 1); + } + } +} diff --git a/src/core/Akka.Streams.Tests/Dsl/KeepAliveConcatSpec.cs b/src/core/Akka.Streams.Tests/Dsl/KeepAliveConcatSpec.cs new file mode 100644 index 00000000000..cc8e91bd013 --- /dev/null +++ b/src/core/Akka.Streams.Tests/Dsl/KeepAliveConcatSpec.cs @@ -0,0 +1,159 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using System.Collections.Generic; +using System.Linq; +using Akka.Streams.Dsl; +using Akka.Streams.TestKit; +using Akka.Streams.TestKit.Tests; +using FluentAssertions; +using Xunit; + +namespace Akka.Streams.Tests.Dsl +{ + public class KeepAliveConcatSpec : Akka.TestKit.Xunit2.TestKit + { + private readonly Source, NotUsed> _sampleSource = Source.From(Enumerable.Range(1, 10).Grouped(3)); + + private IEnumerable> Expand(IEnumerable lst) + { + return lst.Select(x => new[] { x }); + } + + [Fact] + public void KeepAliveConcat_should_not_emit_additional_elements_if_upstream_is_fast_enough() + { + var t = _sampleSource + .Via(new KeepAliveConcat>(5, TimeSpan.FromSeconds(1), Expand)) + .Grouped(1000) + .RunWith(Sink.First>>(), Sys.Materializer()); + + t.AwaitResult() + .SelectMany(x => x) + .ShouldBeEquivalentTo(Enumerable.Range(1, 10), o => o.WithStrictOrdering()); + } + + [Fact] + public void KeepAliveConcat_should_emit_elements_periodically_after_silent_periods() + { + var sourceWithIdleGap = Source.From(Enumerable.Range(1, 5).Grouped(3)) + .Concat + ( + Source.From(Enumerable.Range(6, 5).Grouped(3)).InitialDelay(TimeSpan.FromSeconds(2)) + ); + + var t = sourceWithIdleGap + .Via(new KeepAliveConcat>(5, TimeSpan.FromSeconds(0.6), Expand)) + .Grouped(1000) + .RunWith(Sink.First>>(), Sys.Materializer()); + + t.AwaitResult() + .SelectMany(x => x) + .ShouldBeEquivalentTo(Enumerable.Range(1, 10), o => o.WithStrictOrdering()); + } + + [Fact] + public void KeepAliveConcat_should_immediately_pull_upstream() + { + var upstream = this.CreatePublisherProbe>(); + var downstream = this.CreateSubscriberProbe>(); + + Source.FromPublisher(upstream) + .Via(new KeepAliveConcat>(2, TimeSpan.FromSeconds(1), Expand)) + .RunWith(Sink.FromSubscriber(downstream), Sys.Materializer()); + + downstream.Request(1); + + upstream.SendNext(new[] { 1 }); + downstream.ExpectNext().ShouldBeEquivalentTo(new[] { 1 }); + + upstream.SendComplete(); + downstream.ExpectComplete(); + } + + [Fact] + public void KeepAliveConcat_should_immediately_pull_upstream_after_busy_period() + { + var upstream = this.CreatePublisherProbe>(); + var downstream = this.CreateSubscriberProbe>(); + + _sampleSource.Concat(Source.FromPublisher(upstream)) + .Via(new KeepAliveConcat>(2, TimeSpan.FromSeconds(1), Expand)) + .RunWith(Sink.FromSubscriber(downstream), Sys.Materializer()); + + downstream.Request(10); + + var actual = downstream.ExpectNextN(6); + var expected = Enumerable.Range(1, 3).Grouped(1).Concat(Enumerable.Range(4, 7).Grouped(3)); + actual.ShouldBeEquivalentTo(expected, o => o.WithStrictOrdering()); + + downstream.Request(1); + + upstream.SendNext(new[] { 1 }); + downstream.ExpectNext().ShouldBeEquivalentTo(new[] { 1 }); + + upstream.SendComplete(); + downstream.ExpectComplete(); + } + + [Fact] + public void KeepAliveConcat_should_work_if_timer_fires_before_initial_request_after_busy_period() + { + var upstream = this.CreatePublisherProbe>(); + var downstream = this.CreateSubscriberProbe>(); + + _sampleSource.Concat(Source.FromPublisher(upstream)) + .Via(new KeepAliveConcat>(2, TimeSpan.FromSeconds(1), Expand)) + .RunWith(Sink.FromSubscriber(downstream), Sys.Materializer()); + + downstream.Request(10); + + var actual = downstream.ExpectNextN(6); + var expected = Enumerable.Range(1, 3).Grouped(1).Concat(Enumerable.Range(4, 7).Grouped(3)); + actual.ShouldBeEquivalentTo(expected, o => o.WithStrictOrdering()); + + downstream.ExpectNoMsg(TimeSpan.FromSeconds(1.5)); + downstream.Request(1); + + upstream.SendComplete(); + downstream.ExpectComplete(); + } + + [Fact] + public void KeepAliveConcat_should_emit_buffered_elements_when_upstream_completed() + { + var upstream = this.CreatePublisherProbe(); + var downstream = this.CreateSubscriberProbe(); + + Source.FromPublisher(upstream) + .Via(new KeepAliveConcat(5, TimeSpan.FromSeconds(60), x => new[] { x })) + .RunWith(Sink.FromSubscriber(downstream), Sys.Materializer()); + + upstream.SendNext(1); + upstream.SendNext(2); + upstream.SendComplete(); + + downstream.Request(2); + downstream.ExpectNextN(2).ShouldBeEquivalentTo(new[] { 1, 2 }, o => o.WithStrictOrdering()); + + downstream.Request(1); + downstream.ExpectComplete(); + } + } + + public static class EnumerableExtensions + { + public static IEnumerable> Grouped(this IEnumerable enumerable, int n) + { + return enumerable + .Select((x, i) => new { X = x, I = i }) + .GroupBy(g => g.I / n) + .Select(g => g.Select(p => p.X)); + } + } +} \ No newline at end of file diff --git a/src/core/Akka.Streams.Tests/Dsl/PagedSourceSpec.cs b/src/core/Akka.Streams.Tests/Dsl/PagedSourceSpec.cs new file mode 100644 index 00000000000..40757ed7ba5 --- /dev/null +++ b/src/core/Akka.Streams.Tests/Dsl/PagedSourceSpec.cs @@ -0,0 +1,141 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Akka.Streams.Dsl; +using Akka.Streams.TestKit.Tests; +using Akka.Streams.Util; +using FluentAssertions; +using Xunit; + +namespace Akka.Streams.Tests.Dsl +{ + public class PagedSourceSpec + { + public class MultiplesOfTwo : Akka.TestKit.Xunit2.TestKit + { + private class MultiplesOfTwoPage + { + private readonly int? _size; + private const int _itemsPerPage = 2; + + public MultiplesOfTwoPage(int? size = null) + { + _size = size; + } + + public Task> Page(int key) + { + var indices = Enumerable.Range(key * _itemsPerPage, _itemsPerPage); + var filteredIndices = _size.HasValue ? indices.Where(x => x < _size.Value) : indices; + + return Task.FromResult(new PagedSource.Page(filteredIndices.Select(x => x * 2), new Option(key + 1))); + } + } + + [Fact] + public void PagedSource_should_return_the_items_in_the_proper_order() + { + var source = PagedSource.Create(0, new MultiplesOfTwoPage().Page); + var t = source.Take(3).RunWith(Sink.Seq(), Sys.Materializer()); + + t.AwaitResult().ShouldBeEquivalentTo(new[] { 0, 2, 4 }, o => o.WithStrictOrdering()); + } + + [Fact] + public void PagedSource_should_return_not_more_items_then_available() + { + var source = PagedSource.Create(0, new MultiplesOfTwoPage(4).Page); + var t = source.Take(10).RunWith(Sink.Seq(), Sys.Materializer()); + + t.AwaitResult().Should().HaveCount(4); + } + } + + public class IndexedStringPages : Akka.TestKit.Xunit2.TestKit + { + private readonly Source _source = PagedSource.Create + ( + 1, + i => Task.FromResult(new PagedSource.Page(Page(i), new Option(i + 1))) + ); + + private static IEnumerable Page(int key) + { + if (key == 1) + return new[] { "a", "b", "c" }; + + if (key == 2) + return new[] { "d", "e" }; + + return null; + } + + [Fact] + public void PagedSource_should_return_the_items_in_the_proper_order() + { + var t = _source.Take(4).RunWith(Sink.Seq(), Sys.Materializer()); + + t.AwaitResult().ShouldBeEquivalentTo(new[] { "a", "b", "c", "d" }, o => o.WithStrictOrdering()); + } + + [Fact] + public void PagedSource_should_close_stream_when_received_empty_page() + { + var t = _source.RunWith(Sink.Seq(), Sys.Materializer()); + + t.AwaitResult().ShouldBeEquivalentTo(new[] { "a", "b", "c", "d", "e" }, o => o.WithStrictOrdering()); + } + } + + public class LinkedIntPages : Akka.TestKit.Xunit2.TestKit + { + private readonly Source _source = PagedSource.Create + ( + "first", + key => + { + var t = Page(key); + var items = t.Item1; + var next = t.Item2; + + return Task.FromResult(new PagedSource.Page(items, next == "" ? Option.None : new Option(next))); + } + ); + + private static Tuple Page(string key) + { + if (key == "first") + return Tuple.Create(new[] { 1, 2 }, "second"); + + if (key == "second") + return Tuple.Create(new[] { 3, 4, 5 }, ""); + + return Tuple.Create(new[] { 6 }, ""); + } + + [Fact] + public void PagedSource_should_return_the_items_in_the_proper_order() + { + var t = _source.Take(4).RunWith(Sink.Seq(), Sys.Materializer()); + + t.AwaitResult().ShouldBeEquivalentTo(new[] { 1, 2, 3, 4 }, o => o.WithStrictOrdering()); + } + + [Fact] + public void PagedSource_should_close_stream_when_received_empty_link() + { + var t = _source.RunWith(Sink.Seq(), Sys.Materializer()); + + t.AwaitResult().ShouldBeEquivalentTo(new[] { 1, 2, 3, 4, 5 }, o => o.WithStrictOrdering()); + } + } + } +} diff --git a/src/core/Akka.Streams.Tests/Dsl/SourceSpec.cs b/src/core/Akka.Streams.Tests/Dsl/SourceSpec.cs index 56af56fb43f..cf34e00e34c 100644 --- a/src/core/Akka.Streams.Tests/Dsl/SourceSpec.cs +++ b/src/core/Akka.Streams.Tests/Dsl/SourceSpec.cs @@ -268,6 +268,41 @@ public void Composite_Source_must_combine_from_two_inputs_with_simplified_API() outProbe.ExpectComplete(); } + [Fact] + public async Task Composite_Source_must_combine_from_two_inputs_with_CombineMaterialized_and_take_a_materialized_value() + { + var queueSource = Source.Queue(1, OverflowStrategy.DropBuffer); + var intSequenceSource = Source.From(new[] { 1, 2, 3 }); + + var combined1 = Source.CombineMaterialized(queueSource, intSequenceSource, + i => new Concat(i), Keep.Left); // Keep.left (i.e. preserve queueSource's materialized value) + var materialized1 = combined1.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Materializer); + var queue1 = materialized1.Item1; + var sinkProbe1 = materialized1.Item2; + + sinkProbe1.Request(6); + await queue1.OfferAsync(10); + await queue1.OfferAsync(20); + await queue1.OfferAsync(30); + queue1.Complete(); // complete queueSource so that combined1 with `Concat` then pulls elements from intSequenceSource + sinkProbe1.ExpectNextN(new[] { 10, 20, 30, 1, 2, 3 }); + + // queueSource to be the second of combined source + var combined2 = Source.CombineMaterialized(intSequenceSource, queueSource, + i => new Concat(i), Keep.Right); // Keep.right (i.e. preserve queueSource's materialized value) + var materialized2 = combined2.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Materializer); + var queue2 = materialized2.Item1; + var sinkProbe2 = materialized2.Item2; + + sinkProbe2.Request(6); + await queue2.OfferAsync(10); + await queue2.OfferAsync(20); + await queue2.OfferAsync(30); + queue2.Complete(); + sinkProbe2.ExpectNextN(new[] { 1, 2, 3 }); //as intSequenceSource is the first in combined source, elements from intSequenceSource come first + sinkProbe2.ExpectNextN(new[] { 10, 20, 30 }); // after intSequenceSource run out elements, queueSource elements come + } + [Fact] public void Repeat_Source_must_repeat_as_long_as_it_takes() { diff --git a/src/core/Akka.Streams.Tests/Dsl/UnfoldFlowSpec.cs b/src/core/Akka.Streams.Tests/Dsl/UnfoldFlowSpec.cs new file mode 100644 index 00000000000..3140440da64 --- /dev/null +++ b/src/core/Akka.Streams.Tests/Dsl/UnfoldFlowSpec.cs @@ -0,0 +1,481 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using Akka.Streams.Dsl; +using Akka.Streams.TestKit; +using Akka.Streams.Util; +using FluentAssertions; +using Xunit; + +namespace Akka.Streams.Tests.Dsl +{ + public class UnfoldFlowSpec + { + private static readonly TimeSpan _timeout = TimeSpan.FromMilliseconds(300); + private static readonly int[] _outputs = new[] + { + 27, 82, 41, 124, 62, 31, 94, 47, 142, 71, 214, 107, 322, 161, 484, 242, 121, 364, 182, 91, 274, 137, + 412, 206, 103, 310, 155, 466, 233, 700, 350, 175, 526, 263, 790, 395, 1186, 593, 1780, 890, 445, 1336, + 668, 334, 167, 502, 251, 754, 377, 1132, 566, 283, 850, 425, 1276, 638, 319, 958, 479, 1438, 719, 2158, + 1079, 3238, 1619, 4858, 2429, 7288, 3644, 1822, 911, 2734, 1367, 4102, 2051, 6154, 3077, 9232, 4616, + 2308, 1154, 577, 1732, 866, 433, 1300, 650, 325, 976, 488, 244, 122, 61, 184, 92, 46, 23, 70, 35, 106, + 53, 160, 80, 40, 20, 10, 5, 16, 8, 4, 2 + }; + + public class WithSimpleFlow : Akka.TestKit.Xunit2.TestKit + { + private readonly Exception _done = new Exception("done"); + private readonly Source, TestPublisher.Probe>>> _source; + + public WithSimpleFlow() + { + var controlledFlow = Flow.FromSinkAndSource(this.SinkProbe(), this.SourceProbe>(), Keep.Both); + _source = SourceGen.UnfoldFlow(1, controlledFlow, _timeout); + } + + [Fact] + public void UnfoldFlow_should_unfold_Collatz_conjecture_with_a_sequence_of_111_elements_with_flow() + { + Tuple Map(int x) + { + if (x == 1) + throw _done; + + if (x % 2 == 0) + return Tuple.Create(x / 2, x); + + return Tuple.Create(x * 3 + 1, x); + }; + + var source = SourceGen.UnfoldFlow(27, + Flow.FromFunction>(Map) + .Recover(ex => + { + if (ex == _done) + return new Option>(Tuple.Create(1, 1)); + + return Option>.None; + }), + _timeout); + + var sink = source.RunWith(this.SinkProbe(), Sys.Materializer()); + + foreach (var output in _outputs) + { + sink.Request(1); + sink.ExpectNext(output); + } + + sink.Request(1); + sink.ExpectNext(1); + sink.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_unfold_Collatz_conjecture_with_a_sequence_of_111_elements_with_buffered_flow() + { + Tuple Map(int x) + { + if (x == 1) + throw _done; + + if (x % 2 == 0) + return Tuple.Create(x / 2, x); + + return Tuple.Create(x * 3 + 1, x); + }; + + Source BufferedSource(int buffSize) + { + return + SourceGen.UnfoldFlow(27, + Flow.FromFunction>(Map) + .Recover(ex => + { + if (ex == _done) + return new Option>(Tuple.Create(1, 1)); + + return Option>.None; + }), _timeout) + .Buffer(buffSize, OverflowStrategy.Backpressure); + } + + var sink = BufferedSource(10).RunWith(this.SinkProbe(), Sys.Materializer()); + + sink.Request(_outputs.Length); + foreach (var output in _outputs) + sink.ExpectNext(output); + + sink.Request(1); + sink.ExpectNext(1); + sink.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_instantly_when_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + var kill = new Exception("KILL!"); + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + pub.SendError(kill); + snk.ExpectError().Should().Be(kill); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_after_timeout_when_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.ExpectNoMsg(_timeout - TimeSpan.FromMilliseconds(50)); + snk.ExpectError(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_when_inner_stream_is_canceled_and_pulled_before_completion() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.Request(1); + snk.ExpectNoMsg(_timeout - TimeSpan.FromMilliseconds(50)); + snk.ExpectError(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_when_inner_stream_is_canceled_pulled_before_completion_and_finally_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + var kill = new Exception("KILL!"); + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.Request(1); + pub.SendError(kill); + snk.ExpectError().Should().Be(kill); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_after_3_elements_when_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + var kill = new Exception("KILL!"); + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + snk.Request(1); + sub.RequestNext(1); + pub.SendNext(Tuple.Create(2, 1)); + snk.ExpectNext(1); + snk.Request(1); + sub.RequestNext(2); + pub.SendNext(Tuple.Create(3, 2)); + snk.ExpectNext(2); + snk.Request(1); + sub.RequestNext(3); + pub.SendNext(Tuple.Create(4, 3)); + snk.ExpectNext(3); + pub.SendError(kill); + snk.ExpectError().Should().Be(kill); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_complete_gracefully_instantly_when_stopped() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + pub.SendComplete(); + snk.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_complete_gracefully_after_timeout_when_stopped() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.ExpectNoMsg(_timeout - TimeSpan.FromMilliseconds(50)); + pub.SendComplete(); + snk.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_complete_gracefully_after_3_elements_when_stopped() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + snk.Request(1); + sub.RequestNext(1); + pub.SendNext(Tuple.Create(2, 1)); + snk.ExpectNext(1); + snk.Request(1); + sub.RequestNext(2); + pub.SendNext(Tuple.Create(3, 2)); + snk.ExpectNext(2); + snk.Request(1); + sub.RequestNext(3); + pub.SendNext(Tuple.Create(4, 3)); + snk.ExpectNext(3); + pub.SendComplete(); + snk.ExpectComplete(); + } + } + + public class WithFunction : Akka.TestKit.Xunit2.TestKit + { + private readonly Source, TestPublisher.Probe>> _source; + + public WithFunction() + { + var controlledFlow = Flow.FromSinkAndSource(this.SinkProbe(), this.SourceProbe(), Keep.Both); + _source = SourceGen.UnfoldFlowWith(1, controlledFlow, n => new Option>(Tuple.Create(n + 1, n)), _timeout); + } + + [Fact] + public void UnfoldFlow_should_unfold_Collatz_conjecture_with_a_sequence_of_111_elements_with_function() + { + Option> Map(int x) + { + if (x == 1) + return Option>.None; + + if (x % 2 == 0) + return new Option>(Tuple.Create(x / 2, x)); + + return new Option>(Tuple.Create(x * 3 + 1, x)); + } + + var source = SourceGen.UnfoldFlowWith(27, Flow.FromFunction(x => x), Map, _timeout); + var sink = source.RunWith(this.SinkProbe(), Sys.Materializer()); + foreach (var output in _outputs) + { + sink.Request(1); + sink.ExpectNext(output); + } + sink.Request(1); + sink.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_instantly_when_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + var kill = new Exception("KILL!"); + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + pub.SendError(kill); + snk.ExpectError().Should().Be(kill); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_after_timeout_when_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.ExpectNoMsg(_timeout - TimeSpan.FromMilliseconds(50)); + snk.ExpectError(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_when_inner_stream_is_canceled_and_pulled_before_completion() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.Request(1); + snk.ExpectNoMsg(_timeout - TimeSpan.FromMilliseconds(50)); + snk.ExpectError(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_when_inner_stream_is_canceled_pulled_before_completion_and_finally_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + var kill = new Exception("KILL!"); + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.Request(1); + pub.SendError(kill); + snk.ExpectError().Should().Be(kill); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_fail_after_3_elements_when_aborted() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + var kill = new Exception("KILL!"); + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + snk.Request(1); + sub.RequestNext(1); + pub.SendNext(1); + snk.ExpectNext(1); + snk.Request(1); + sub.RequestNext(2); + pub.SendNext(2); + snk.ExpectNext(2); + snk.Request(1); + sub.RequestNext(3); + pub.SendNext(3); + snk.ExpectNext(3); + pub.SendError(kill); + snk.ExpectError().Should().Be(kill); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_complete_gracefully_instantly_when_stopped() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + pub.SendComplete(); + snk.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_complete_gracefully_after_timeout_when_stopped() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + sub.Cancel(); + snk.ExpectNoMsg(_timeout - TimeSpan.FromMilliseconds(50)); + pub.SendComplete(); + snk.ExpectComplete(); + } + + [Fact] + public void UnfoldFlow_should_increment_integers_and_handle_KillSwitch_and_complete_gracefully_after_3_elements_when_stopped() + { + var t = _source.ToMaterialized(this.SinkProbe(), Keep.Both).Run(Sys.Materializer()); + + var sub = t.Item1.Item1; + var pub = t.Item1.Item2; + var snk = t.Item2; + + sub.EnsureSubscription(); + pub.EnsureSubscription(); + snk.EnsureSubscription(); + snk.Request(1); + sub.RequestNext(1); + pub.SendNext(1); + snk.ExpectNext(1); + snk.Request(1); + sub.RequestNext(2); + pub.SendNext(2); + snk.ExpectNext(2); + snk.Request(1); + sub.RequestNext(3); + pub.SendNext(3); + snk.ExpectNext(3); + pub.SendComplete(); + snk.ExpectComplete(); + } + } + } +} diff --git a/src/core/Akka.Streams.Tests/IO/FileSourceSpec.cs b/src/core/Akka.Streams.Tests/IO/FileSourceSpec.cs index ca4ea35542d..f3baf225b49 100644 --- a/src/core/Akka.Streams.Tests/IO/FileSourceSpec.cs +++ b/src/core/Akka.Streams.Tests/IO/FileSourceSpec.cs @@ -201,6 +201,31 @@ public void FileSource_should_complete_only_when_all_contents_of_a_file_have_bee }, _materializer); } + [Fact] + public void FileSource_should_open_file_in_shared_mode_for_reading_multiple_times() + { + this.AssertAllStagesStopped(() => + { + var testFile = TestFile(); + var p1 = FileIO.FromFile(testFile).RunWith(Sink.AsPublisher(false), _materializer); + var p2 = FileIO.FromFile(testFile).RunWith(Sink.AsPublisher(false), _materializer); + + var c1 = this.CreateManualSubscriberProbe(); + var c2 = this.CreateManualSubscriberProbe(); + p1.Subscribe(c1); + p2.Subscribe(c2); + var s1 = c1.ExpectSubscription(); + var s2 = c2.ExpectSubscription(); + + s1.Request(5000); + s2.Request(5000); + + c1.ExpectNext(); + c2.ExpectNext(); + + }, _materializer); + } + [Fact] public void FileSource_should_onError_with_failure_and_return_a_failed_IOResult_when_trying_to_read_from_file_which_does_not_exist() { diff --git a/src/core/Akka.Streams/Attributes.cs b/src/core/Akka.Streams/Attributes.cs index 8b248c512b5..a265658a277 100644 --- a/src/core/Akka.Streams/Attributes.cs +++ b/src/core/Akka.Streams/Attributes.cs @@ -355,7 +355,7 @@ public static Attributes CreateName(string name) /// Logging a certain operation can be completely disabled by using /// /// Passing in null as any of the arguments sets the level to its default value, which is: - /// for and , and for . + /// for and , and for . /// /// TBD /// TBD diff --git a/src/core/Akka.Streams/Dsl/Internal/InternalFlowOperations.cs b/src/core/Akka.Streams/Dsl/Internal/InternalFlowOperations.cs index 0062183bca1..97bb0210232 100644 --- a/src/core/Akka.Streams/Dsl/Internal/InternalFlowOperations.cs +++ b/src/core/Akka.Streams/Dsl/Internal/InternalFlowOperations.cs @@ -416,12 +416,14 @@ public static IFlow WhereNot(this IFlow flow, Predica /// /// Cancels when returned false or downstream cancels /// - /// + /// + /// /// - /// TBD + /// TBD /// TBD /// TBD /// TBD + /// TBD /// TBD public static IFlow TakeWhile(this IFlow flow, Predicate predicate, bool inclusive) { diff --git a/src/core/Akka.Streams/Dsl/IntervalBasedRateLimiter.cs b/src/core/Akka.Streams/Dsl/IntervalBasedRateLimiter.cs new file mode 100644 index 00000000000..825a3aee827 --- /dev/null +++ b/src/core/Akka.Streams/Dsl/IntervalBasedRateLimiter.cs @@ -0,0 +1,34 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using System.Collections.Generic; +using Akka.Annotations; + +namespace Akka.Streams.Dsl +{ + public static class IntervalBasedRateLimiter + { + /// + /// Specialized type of rate limiter which emits batches of elements (with size limited by the parameter) + /// with a minimum time interval of . + /// + /// Because the next emit is scheduled after we downstream the current batch, the effective throughput, + /// depending on the minimal interval length, may never reach the maximum allowed one. + /// You can minimize these delays by sending bigger batches less often. + /// + /// type of element + /// minimal pause to be kept before downstream the next batch. Should be >= 10 milliseconds. + /// maximum number of elements to send in the single batch + /// + [ApiMayChange] + public static IGraph>, NotUsed> Create(TimeSpan minInterval, int maxBatchSize) + { + return Flow.Create().GroupedWithin(maxBatchSize, minInterval).Via(new DelayFlow>(minInterval)); + } + } +} diff --git a/src/core/Akka.Streams/Dsl/KeepAliveConcat.cs b/src/core/Akka.Streams/Dsl/KeepAliveConcat.cs new file mode 100644 index 00000000000..a65d6b921b4 --- /dev/null +++ b/src/core/Akka.Streams/Dsl/KeepAliveConcat.cs @@ -0,0 +1,136 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using System.Collections.Generic; +using Akka.Streams.Stage; + +namespace Akka.Streams.Dsl +{ + /// + /// Sends elements from buffer if upstream does not emit for a configured amount of time. In other words, this + /// stage attempts to maintains a base rate of emitted elements towards the downstream using elements from upstream. + /// + /// If upstream emits new elements until the accumulated elements in the buffer exceed the specified minimum size + /// used as the keep alive elements, then the base rate is no longer maintained until we reach another period without + /// elements form upstream. + /// + /// The keep alive period is the keep alive failover size times the interval. + /// + /// Emits when upstream emits an element or if the upstream was idle for the configured period + /// + /// + /// Backpressures when downstream backpressures + /// + /// + /// Completes when upstream completes + /// + /// Cancels when downstream cancels + /// + /// type of element + public class KeepAliveConcat : GraphStage> + { + private readonly int _keepAliveFailoverSize; + private readonly TimeSpan _interval; + private readonly Func> _extrapolate; + + #region Logic + + private sealed class Logic : TimerGraphStageLogic, IInHandler, IOutHandler + { + private readonly KeepAliveConcat _keepAliveConcat; + private readonly Queue _buffer; + + public Logic(KeepAliveConcat keepAliveConcat) : base(keepAliveConcat.Shape) + { + _keepAliveConcat = keepAliveConcat; + _buffer = new Queue(_keepAliveConcat._keepAliveFailoverSize); + + SetHandler(_keepAliveConcat.In, this); + SetHandler(_keepAliveConcat.Out, this); + } + + public void OnPush() + { + var elem = Grab(_keepAliveConcat.In); + if (_buffer.Count < _keepAliveConcat._keepAliveFailoverSize) + { + foreach (var t in _keepAliveConcat._extrapolate(elem)) + _buffer.Enqueue(t); + } + else + _buffer.Enqueue(elem); + + + if (IsAvailable(_keepAliveConcat.Out) && _buffer.Count > 0) + Push(_keepAliveConcat.Out, _buffer.Dequeue()); + else + Pull(_keepAliveConcat.In); + } + + public void OnUpstreamFinish() + { + if (_buffer.Count == 0) + CompleteStage(); + } + + public void OnUpstreamFailure(Exception e) => FailStage(e); + + public void OnPull() + { + if (IsClosed(_keepAliveConcat.In)) + { + if (_buffer.Count == 0) + CompleteStage(); + else + Push(_keepAliveConcat.Out, _buffer.Dequeue()); + } + else if (_buffer.Count > _keepAliveConcat._keepAliveFailoverSize) + Push(_keepAliveConcat.Out, _buffer.Dequeue()); + else if (!HasBeenPulled(_keepAliveConcat.In)) + Pull(_keepAliveConcat.In); + } + + public void OnDownstreamFinish() => CompleteStage(); + + protected internal override void OnTimer(object timerKey) + { + if (IsAvailable(_keepAliveConcat.Out) && _buffer.Count > 0) + Push(_keepAliveConcat.Out, _buffer.Dequeue()); + } + + public override void PreStart() + { + ScheduleRepeatedly("KeepAliveConcatTimer", _keepAliveConcat._interval); + Pull(_keepAliveConcat.In); + } + } + + #endregion + + public KeepAliveConcat(int keepAliveFailoverSize, TimeSpan interval, Func> extrapolate) + { + if (keepAliveFailoverSize <= 0) + throw new ArgumentException("The buffer keep alive failover size must be greater than 0.", nameof(keepAliveFailoverSize)); + + _keepAliveFailoverSize = keepAliveFailoverSize; + _interval = interval; + _extrapolate = extrapolate; + + In = new Inlet("KeepAliveConcat.in"); + Out = new Outlet("KeepAliveConcat.out"); + Shape = new FlowShape(In, Out); + } + + protected override GraphStageLogic CreateLogic(Attributes inheritedAttributes) => new Logic(this); + + public override FlowShape Shape { get; } + + public Inlet In { get; } + public Outlet Out { get; } + } +} diff --git a/src/core/Akka.Streams/Dsl/PagedSource.cs b/src/core/Akka.Streams/Dsl/PagedSource.cs new file mode 100644 index 00000000000..50b30fcc977 --- /dev/null +++ b/src/core/Akka.Streams/Dsl/PagedSource.cs @@ -0,0 +1,69 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using Akka.Annotations; +using Akka.Streams.Util; + +namespace Akka.Streams.Dsl +{ + public static class PagedSource + { + /// + /// Page for . + /// + /// type of page items + /// type of page keys + public class Page + { + public IEnumerable Items { get; } + public Option NextKey { get; } + + public Page(IEnumerable items, Option nextKey) + { + Items = items; + NextKey = nextKey; + } + } + + /// + /// Defines a factory for "paged source". + /// + /// "Paged source" is a Source streaming items from a paged API. + /// The paged API is accessed with a page key and returns data. + /// This data contain items and optional information about the key of the next page. + /// + /// + /// type of page items + /// type of page keys + /// key of first page + /// maps page key to Task of page data + [ApiMayChange] + public static Source Create(TKey firstKey, Func>> pageFactory) + { + var pageSource = + Source.UnfoldAsync + ( + new Option(firstKey), + async key => + { + var page = key.HasValue ? await pageFactory(key.Value) : new Page(Enumerable.Empty(), Option.None); + + if (page.Items != null && page.Items.Any()) + return Tuple.Create(page.NextKey, page); + else + return null; + } + ); + + return pageSource.ConcatMany(page => Source.From(page.Items)); + } + } +} diff --git a/src/core/Akka.Streams/Dsl/Source.cs b/src/core/Akka.Streams/Dsl/Source.cs index f394b194de3..e8e1f224137 100644 --- a/src/core/Akka.Streams/Dsl/Source.cs +++ b/src/core/Akka.Streams/Dsl/Source.cs @@ -411,7 +411,7 @@ public static Source FromPublisher(IPublisher publisher) /// /// Start a new from the given function that produces an . /// The produced stream of elements will continue until the enumerator runs empty - /// or fails during evaluation of the method. + /// or fails during evaluation of the IEnumerator<T>.MoveNext method. /// Elements are pulled out of the enumerator in accordance with the demand coming /// from the downstream transformation steps. /// @@ -725,6 +725,29 @@ public static Source Combine(Source first, return new SourceShape(c.Out); })); + /// + /// Combines two sources with fan-in strategy like or and returns with a materialized value. + /// + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + public static Source CombineMaterialized(Source first, Source second, Func, NotUsed>> strategy, Func combineMaterializers) + { + var secondPartiallyCombined = GraphDsl.Create(second, (b, secondShape) => + { + var c = b.Add(strategy(2)); + b.From(secondShape).To(c.In(1)); + return new FlowShape(c.In(0), c.Out); + }); + return first.ViaMaterialized(secondPartiallyCombined, combineMaterializers); + } /// /// Combines the elements of multiple streams into a stream of lists. @@ -768,13 +791,13 @@ public static Source ZipWithN(Func, /// there is no space available in the buffer. /// /// Acknowledgement mechanism is available. - /// returns + /// ISourceQueueWithComplete<T>.OfferAsync returns /// which completes with if element was added to buffer or sent downstream. /// It completes with if element was dropped. /// Can also complete with - when stream failed /// or when downstream is completed. /// - /// The strategy will not complete when buffer is full. + /// The strategy will not complete ISourceQueueWithComplete<T>.OfferAsync when buffer is full. /// /// The buffer can be disabled by using of 0 and then received messages will wait /// for downstream demand unless there is another message waiting for downstream demand, in that case diff --git a/src/core/Akka.Streams/Dsl/SourceGen.cs b/src/core/Akka.Streams/Dsl/SourceGen.cs new file mode 100644 index 00000000000..86303217f9f --- /dev/null +++ b/src/core/Akka.Streams/Dsl/SourceGen.cs @@ -0,0 +1,155 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using Akka.Annotations; +using Akka.Streams.Stage; +using Akka.Streams.Util; + +namespace Akka.Streams.Dsl +{ + public static class SourceGen + { + /// + /// EXPERIMENTAL API + /// + /// Create a source that will unfold a value of type by + /// passing it through a flow. The flow should emit a + /// pair of the next state and output elements of type . + /// Source completes when the flow completes. + /// + /// + /// The parameter specifies waiting time after inner + /// flow provided by the user for unfold flow API cancels + /// upstream, to get also the downstream cancelation (as + /// graceful completion or failure which is propagated). + /// If inner flow fails to complete/fail downstream, stage is failed. + /// + /// + /// IMPORTANT CAVEAT: + /// The given flow must not change the number of elements passing through it(i.e.it should output + /// exactly one element for every received element). Ignoring this, will have an unpredicted result, + /// and may result in a deadlock. + /// + /// + /// state type + /// output elements type + /// materialized value type + /// intial state + /// flow, through which value is passed + /// timeout + [ApiMayChange] + public static Source UnfoldFlow(TState seed, IGraph>, TMat> flow, TimeSpan timeout) + { + return UnfoldFlowGraph(new FanOut2UnfoldingStage, TState, TOut>(shape => new UnfoldFlowGraphStageLogic(shape, seed, timeout)), flow); + } + + /// + /// EXPERIMENTAL API + /// + /// Create a source that will unfold a value of type by + /// passing it through a flow. The flow should emit an output + /// value of type , that when fed to the unfolding function, + /// generates a pair of the next state and output elements of type . + /// + /// + /// The parameter specifies waiting time after inner + /// flow provided by the user for unfold flow API cancels + /// upstream, to get also the downstream cancelation(as + /// graceful completion or failure which is propagated). + /// If inner flow fails to complete/fail downstream, stage is failed. + /// + /// + /// IMPORTANT CAVEAT: + /// The given flow must not change the number of elements passing through it(i.e.it should output + /// exactly one element for every received element). Ignoring this, will have an unpredicted result, + /// and may result in a deadlock. + /// + /// + /// output elements type + /// state type + /// flow output value type + /// materialized value type + /// intial state + /// flow through which value is passed + /// unfolding function + /// timeout + [ApiMayChange] + public static Source UnfoldFlowWith(TState seed, IGraph, TMat> flow, Func>> unfoldWith, TimeSpan timeout) + { + return UnfoldFlowGraph(new FanOut2UnfoldingStage(shape => new UnfoldFlowWithGraphStageLogic(shape, seed, unfoldWith, timeout)), flow); + } + + private class UnfoldFlowGraphStageLogic : UnfoldFlowGraphStageLogic, TState, TOut>, IInHandler + { + public UnfoldFlowGraphStageLogic(FanOutShape, TState, TOut> shape, TState seed, TimeSpan timeout) : base(shape, seed, timeout) + { + SetHandler(_nextElem, this); + } + + public void OnPush() + { + var t = Grab(_nextElem); + var s = t.Item1; + var e = t.Item2; + _pending = s; + Push(_output, e); + _pushedToCycle = false; + } + + public void OnUpstreamFinish() => CompleteStage(); + + public void OnUpstreamFailure(Exception e) => FailStage(e); + } + + private class UnfoldFlowWithGraphStageLogic : UnfoldFlowGraphStageLogic, IInHandler + { + private readonly Func>> _unfoldWith; + + public UnfoldFlowWithGraphStageLogic(FanOutShape shape, TState seed, Func>> unfoldWith, TimeSpan timeout) : base(shape, seed, timeout) + { + _unfoldWith = unfoldWith; + + SetHandler(_nextElem, this); + } + + public void OnPush() + { + var elem = Grab(_nextElem); + var unfolded = _unfoldWith(elem); + + if (unfolded.HasValue) + { + var s = unfolded.Value.Item1; + var e = unfolded.Value.Item2; + _pending = s; + Push(_output, e); + _pushedToCycle = false; + } + else + CompleteStage(); + } + + public void OnUpstreamFinish() => CompleteStage(); + + public void OnUpstreamFailure(Exception e) => FailStage(e); + } + + private static Source UnfoldFlowGraph(GraphStage> fanOut2Stage, IGraph, TMat> flow) + { + var graph = GraphDsl.Create(flow, (b, f) => + { + var fo2 = b.Add(fanOut2Stage); + b.From(fo2.Out0).Via(f).To(fo2.In); + + return new SourceShape(fo2.Out1); + }); + + return Source.FromGraph(graph); + } + } +} diff --git a/src/core/Akka.Streams/Dsl/SourceOperations.cs b/src/core/Akka.Streams/Dsl/SourceOperations.cs index bebd0b0e161..3c8ee2e8de6 100644 --- a/src/core/Akka.Streams/Dsl/SourceOperations.cs +++ b/src/core/Akka.Streams/Dsl/SourceOperations.cs @@ -394,6 +394,7 @@ public static Source WhereNot(this Source fl /// TBD /// TBD /// TBD + /// TBD /// TBD public static Source TakeWhile(this Source flow, Predicate predicate, bool inclusive = false) { @@ -2039,6 +2040,24 @@ public static Source Concat(this Source flow return (Source)InternalFlowOperations.Concat(flow, other); } + /// + /// Combines the given to this with fan-in strategy like or and returns with a materialized value. + /// + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + /// TBD + public static Source CombineMaterialized(this Source flow, Source other, Func, NotUsed>> strategy, Func combineMaterializers) + { + return Source.CombineMaterialized(flow, other, strategy, combineMaterializers); + } + /// /// Prepend the given to this , meaning that before elements /// are generated from this , the Source's elements will be produced until it diff --git a/src/core/Akka.Streams/Dsl/SubFlowOperations.cs b/src/core/Akka.Streams/Dsl/SubFlowOperations.cs index 7d2a1744a23..4822251c851 100644 --- a/src/core/Akka.Streams/Dsl/SubFlowOperations.cs +++ b/src/core/Akka.Streams/Dsl/SubFlowOperations.cs @@ -402,8 +402,10 @@ public static SubFlow WhereNot(this Su /// /// TBD /// TBD + /// TBD /// TBD /// TBD + /// TBD /// TBD public static SubFlow TakeWhile(this SubFlow flow, Predicate predicate, bool inclusive = false) { diff --git a/src/core/Akka.Streams/Dsl/UnfoldFlow.cs b/src/core/Akka.Streams/Dsl/UnfoldFlow.cs new file mode 100644 index 00000000000..aeb4d9b89dc --- /dev/null +++ b/src/core/Akka.Streams/Dsl/UnfoldFlow.cs @@ -0,0 +1,95 @@ +//----------------------------------------------------------------------- +// +// Copyright (C) 2009-2018 Lightbend Inc. +// Copyright (C) 2013-2018 .NET Foundation +// +//----------------------------------------------------------------------- + +using System; +using Akka.Annotations; +using Akka.Streams.Stage; + +namespace Akka.Streams.Dsl +{ + [InternalApi] + internal abstract class UnfoldFlowGraphStageLogic : GraphStageLogic, IOutHandler + { + private readonly TimeSpan _timeout; + private readonly Outlet _feedback; + protected readonly Outlet _output; + protected readonly Inlet _nextElem; + + protected TState _pending; + protected bool _pushedToCycle; + + protected UnfoldFlowGraphStageLogic(FanOutShape shape, TState seed, TimeSpan timeout) : base(shape) + { + _timeout = timeout; + + _feedback = shape.Out0; + _output = shape.Out1; + _nextElem = shape.In; + + _pending = seed; + _pushedToCycle = false; + + SetHandler(_feedback, this); + + SetHandler(_output, onPull: () => + { + Pull(_nextElem); + if (!_pushedToCycle && IsAvailable(_feedback)) + { + Push(_feedback, _pending); + _pending = default(TState); + _pushedToCycle = true; + } + }); + } + + public void OnPull() + { + if (!_pushedToCycle && IsAvailable(_output)) + { + Push(_feedback, _pending); + _pending = default(TState); + _pushedToCycle = true; + } + } + + public void OnDownstreamFinish() + { + // Do Nothing until `timeout` to try and intercept completion as downstream, + // but cancel stream after timeout if inlet is not closed to prevent deadlock. + Materializer.ScheduleOnce(_timeout, () => + { + var cb = GetAsyncCallback(() => + { + if (!IsClosed(_nextElem)) + FailStage(new InvalidOperationException($"unfoldFlow source's inner flow canceled only upstream, while downstream remain available for {_timeout}")); + }); + cb(); + }); + } + } + + [InternalApi] + internal class FanOut2UnfoldingStage : GraphStage> + { + private readonly Func, UnfoldFlowGraphStageLogic> _generateGraphStageLogic; + + public FanOut2UnfoldingStage(Func, UnfoldFlowGraphStageLogic> generateGraphStageLogic) + { + _generateGraphStageLogic = generateGraphStageLogic; + + Shape = new FanOutShape("unfoldFlow"); + } + + public override FanOutShape Shape { get; } + + protected override GraphStageLogic CreateLogic(Attributes inheritedAttributes) + { + return _generateGraphStageLogic(Shape); + } + } +} diff --git a/src/core/Akka.Streams/Implementation/IO/FilePublisher.cs b/src/core/Akka.Streams/Implementation/IO/FilePublisher.cs index 7d740e50112..0c9e99c3a46 100644 --- a/src/core/Akka.Streams/Implementation/IO/FilePublisher.cs +++ b/src/core/Akka.Streams/Implementation/IO/FilePublisher.cs @@ -107,7 +107,8 @@ protected override void PreStart() { try { - _chan = _f.Open(FileMode.Open, FileAccess.Read); + // Allow opening the same file for reading multiple times + _chan = _f.Open(FileMode.Open, FileAccess.Read, FileShare.Read); if (_startPosition > 0) _chan.Position = _startPosition; } diff --git a/src/core/Akka.Streams/Implementation/IO/TcpStages.cs b/src/core/Akka.Streams/Implementation/IO/TcpStages.cs index fe0330263f9..c6cf0ca7e5c 100644 --- a/src/core/Akka.Streams/Implementation/IO/TcpStages.cs +++ b/src/core/Akka.Streams/Implementation/IO/TcpStages.cs @@ -124,17 +124,19 @@ private void Receive(Tuple args) _listener.Tell(new Tcp.ResumeAccepting(1), StageActorRef); var thisStage = StageActorRef; - _bindingPromise.TrySetResult(new StreamTcp.ServerBinding(bound.LocalAddress, () => + var binding = new StreamTcp.ServerBinding(bound.LocalAddress, () => { // Beware, sender must be explicit since stageActor.ref will be invalid to access after the stage stopped thisStage.Tell(Tcp.Unbind.Instance, thisStage); return _unbindPromise.Task; - })); + }); + + _bindingPromise.NonBlockingTrySetResult(binding); } else if (msg is Tcp.CommandFailed) { var ex = BindFailedException.Instance; - _bindingPromise.TrySetException(ex); + _bindingPromise.NonBlockingTrySetException(ex); _unbindPromise.TrySetResult(NotUsed.Instance); FailStage(ex); } @@ -162,7 +164,7 @@ private void Receive(Tuple args) public override void PostStop() { _unbindPromise.TrySetResult(NotUsed.Instance); - _bindingPromise.TrySetException( + _bindingPromise.NonBlockingTrySetException( new NoSuchElementException("Binding was unbound before it was completely finished")); } } @@ -220,7 +222,7 @@ public ConnectionSourceStage(IActorRef tcpManager, EndPoint endpoint, int backlo /// TBD public override ILogicAndMaterializedValue> CreateLogicAndMaterializedValue(Attributes inheritedAttributes) { - var bindingPromise = new TaskCompletionSource(); + var bindingPromise = TaskEx.NonBlockingTaskCompletionSource(); var logic = new ConnectionSourceStageLogic(Shape, this, bindingPromise); return new LogicAndMaterializedValue>(logic, bindingPromise.Task); } diff --git a/src/core/Akka.Streams/KillSwitch.cs b/src/core/Akka.Streams/KillSwitch.cs index 243a2047464..a6c216b7d3a 100644 --- a/src/core/Akka.Streams/KillSwitch.cs +++ b/src/core/Akka.Streams/KillSwitch.cs @@ -6,6 +6,8 @@ //----------------------------------------------------------------------- using System; +using System.Runtime.CompilerServices; +using System.Threading; using System.Threading.Tasks; using Akka.Streams.Stage; @@ -52,6 +54,98 @@ public static class KillSwitches public static IGraph, UniqueKillSwitch> SingleBidi () => UniqueBidiKillSwitchStage.Instance; + /// + /// Returns a flow, which works like a kill switch stage based on a provided . + /// Since unlike cancellation tokens, kill switches expose ability to finish a stream either gracefully via + /// or abruptly via , this distinction is + /// handled by specifying parameter. + /// + /// + /// Cancellation token used to create a cancellation flow. + /// + /// When set to true, will close stream gracefully via completting the stage. + /// When set to false, will close stream by failing the stage with . + /// + /// + public static IGraph, NotUsed> AsFlow(this CancellationToken cancellationToken, bool cancelGracefully = false) + { + return new CancellableKillSwitchStage(cancellationToken, cancelGracefully); + } + + internal sealed class CancellableKillSwitchStage : GraphStage> + { + #region logic + + private sealed class Logic : InAndOutGraphStageLogic + { + private readonly CancellableKillSwitchStage _stage; + private CancellationTokenRegistration? _registration = null; + + public Logic(CancellableKillSwitchStage stage) + : base(stage.Shape) + { + _stage = stage; + SetHandler(stage.Inlet, this); + SetHandler(stage.Outlet, this); + } + + public override void PreStart() + { + if (_stage._cancellationToken.IsCancellationRequested) + { + if (_stage._cancelGracefully) + OnCancelComplete(); + else + OnCancelFail(); + } + else + { + var onCancel = _stage._cancelGracefully + ? GetAsyncCallback(OnCancelComplete) + : GetAsyncCallback(OnCancelFail); + + _registration = _stage._cancellationToken.Register(onCancel); + } + } + + public override void PostStop() + { + _registration?.Dispose(); + base.PostStop(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public override void OnPush() => Push(_stage.Outlet, Grab(_stage.Inlet)); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public override void OnPull() => Pull(_stage.Inlet); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void OnCancelComplete() => CompleteStage(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void OnCancelFail() => FailStage(new OperationCanceledException($"Stage cancelled due to cancellation token request.", _stage._cancellationToken)); + } + + #endregion + + private readonly CancellationToken _cancellationToken; + private readonly bool _cancelGracefully; + + public CancellableKillSwitchStage(CancellationToken cancellationToken, bool cancelGracefully) + { + _cancellationToken = cancellationToken; + _cancelGracefully = cancelGracefully; + Shape = new FlowShape(Inlet, Outlet); + } + + public Inlet Inlet { get; } = new Inlet("cancel.in"); + public Outlet Outlet { get; } = new Outlet("cancel.out"); + + public override FlowShape Shape { get; } + protected override GraphStageLogic CreateLogic(Attributes inheritedAttributes) => new Logic(this); + } + /// /// TBD /// diff --git a/src/core/Akka.Tests/Actor/ActorSystemSpec.cs b/src/core/Akka.Tests/Actor/ActorSystemSpec.cs index 0b4c0ab40ba..472f793da21 100644 --- a/src/core/Akka.Tests/Actor/ActorSystemSpec.cs +++ b/src/core/Akka.Tests/Actor/ActorSystemSpec.cs @@ -242,8 +242,7 @@ public void Reliable_deny_creation_of_actors_while_shutting_down() var nonTerminatedOrNonstartedActors = created.Cast() .Where(actor => !actor.IsTerminated && !(actor.Underlying is UnstartedCell)).ToList(); - Assert.Equal(0, - nonTerminatedOrNonstartedActors.Count); + Assert.Empty(nonTerminatedOrNonstartedActors); } #region Extensions tests diff --git a/src/core/Akka.Tests/Actor/ActorSystemTests.cs b/src/core/Akka.Tests/Actor/ActorSystemTests.cs index 691546211ee..1b0549040eb 100644 --- a/src/core/Akka.Tests/Actor/ActorSystemTests.cs +++ b/src/core/Akka.Tests/Actor/ActorSystemTests.cs @@ -34,7 +34,7 @@ public void ActorSystem_ActorOf_adds_a_child_to_Guardian() //assert var children = system.Provider.Guardian.Children; - Assert.True(children.Any(c => c == child)); + Assert.Contains(children, c => c == child); } [Fact] diff --git a/src/core/Akka.Tests/Actor/CoordinatedShutdownSpec.cs b/src/core/Akka.Tests/Actor/CoordinatedShutdownSpec.cs index 1e5f6cdb7be..a75eb1aef6c 100644 --- a/src/core/Akka.Tests/Actor/CoordinatedShutdownSpec.cs +++ b/src/core/Akka.Tests/Actor/CoordinatedShutdownSpec.cs @@ -50,6 +50,13 @@ private List CheckTopologicalSort(Dictionary phases) return result; } + private class CustomReason : CoordinatedShutdown.Reason + { + } + + private static CoordinatedShutdown.Reason customReason = new CustomReason(); + + [Fact] public void CoordinatedShutdown_must_sort_phases_in_topological_order() { @@ -200,7 +207,7 @@ public void CoordinatedShutdown_must_run_ordered_phases() return TaskEx.Completed; }); - co.Run().Wait(RemainingOrDefault); + co.Run(CoordinatedShutdown.UnknownReason.Instance).Wait(RemainingOrDefault); ReceiveN(4).Should().Equal(new object[] { "A", "B", "B", "C" }); } @@ -233,8 +240,9 @@ public void CoordinatedShutdown_must_run_from_given_phase() return TaskEx.Completed; }); - co.Run("b").Wait(RemainingOrDefault); + co.Run(customReason, "b").Wait(RemainingOrDefault); ReceiveN(2).Should().Equal(new object[] { "B", "C" }); + co.ShutdownReason.ShouldBeEquivalentTo(customReason); } [Fact] @@ -252,11 +260,14 @@ public void CoordinatedShutdown_must_only_run_once() return TaskEx.Completed; }); - co.Run().Wait(RemainingOrDefault); + co.ShutdownReason.Should().BeNull(); + co.Run(customReason).Wait(RemainingOrDefault); + co.ShutdownReason.ShouldBeEquivalentTo(customReason); ExpectMsg("A"); - co.Run().Wait(RemainingOrDefault); + co.Run(CoordinatedShutdown.UnknownReason.Instance).Wait(RemainingOrDefault); TestActor.Tell("done"); ExpectMsg("done"); // no additional A + co.ShutdownReason.ShouldBeEquivalentTo(customReason); } [Fact] @@ -295,7 +306,7 @@ public void CoordinatedShutdown_must_continue_after_timeout_or_failure() return TaskEx.Completed; }); - co.Run().Wait(RemainingOrDefault); + co.Run(CoordinatedShutdown.UnknownReason.Instance).Wait(RemainingOrDefault); ExpectMsg("A"); ExpectMsg("A"); ExpectMsg("B"); @@ -324,7 +335,7 @@ public void CoordinatedShutdown_must_abort_if_recover_is_off() return TaskEx.Completed; }); - var result = co.Run(); + var result = co.Run(CoordinatedShutdown.UnknownReason.Instance); ExpectMsg("B"); Intercept(() => { @@ -362,7 +373,7 @@ public void CoordinatedShutdown_must_be_possible_to_add_tasks_in_later_phase_fro return TaskEx.Completed; }); - co.Run().Wait(RemainingOrDefault); + co.Run(CoordinatedShutdown.UnknownReason.Instance).Wait(RemainingOrDefault); ExpectMsg("A"); ExpectMsg("B"); } @@ -394,10 +405,11 @@ public void CoordinatedShutdown_must_be_possible_to_parse_phases_from_config() [Fact] public void CoordinatedShutdown_must_terminate_ActorSystem() { - var shutdownSystem = CoordinatedShutdown.Get(Sys).Run(); + var shutdownSystem = CoordinatedShutdown.Get(Sys).Run(customReason); shutdownSystem.Wait(TimeSpan.FromSeconds(10)).Should().BeTrue(); Sys.WhenTerminated.IsCompleted.Should().BeTrue(); + CoordinatedShutdown.Get(Sys).ShutdownReason.ShouldBeEquivalentTo(customReason); } } } diff --git a/src/core/Akka.Tests/Actor/ReceiveTimeoutSpec.cs b/src/core/Akka.Tests/Actor/ReceiveTimeoutSpec.cs index 5c10aa732d2..9bcf92fe266 100644 --- a/src/core/Akka.Tests/Actor/ReceiveTimeoutSpec.cs +++ b/src/core/Akka.Tests/Actor/ReceiveTimeoutSpec.cs @@ -8,6 +8,7 @@ using System; using System.Threading; using Akka.Actor; +using Akka.Actor.Dsl; using Akka.Event; using Akka.TestKit; using Akka.Util.Internal; @@ -183,6 +184,27 @@ public void An_actor_with_receive_timeout_must_get_timeout_while_receiving_NotIn Sys.Stop(timeoutActor); } + [Fact] + public void An_actor_with_receive_timeout_must_get_timeout_while_receiving_only_NotInfluenceReceiveTimeout_messages() + { + var timeoutLatch = new TestLatch(2); + + Action actor = d => + { + d.OnPreStart = c => c.SetReceiveTimeout(TimeSpan.FromSeconds(1)); + d.Receive((o, c) => + { + c.Self.Tell(new TransparentTick()); + timeoutLatch.CountDown(); + }); + d.Receive((_, __) => { }); + }; + var timeoutActor = Sys.ActorOf(Props.Create(() => new Act(actor))); + + timeoutLatch.Ready(TestKitSettings.DefaultTimeout); + Sys.Stop(timeoutActor); + } + [Fact] public void Issue469_An_actor_with_receive_timeout_must_cancel_receive_timeout_when_terminated() { diff --git a/src/core/Akka.Tests/MatchHandler/MatchExpressionBuilder_CreateArgumentValuesArray_Tests.cs b/src/core/Akka.Tests/MatchHandler/MatchExpressionBuilder_CreateArgumentValuesArray_Tests.cs index f1d1b7fae0c..6e382f750e5 100644 --- a/src/core/Akka.Tests/MatchHandler/MatchExpressionBuilder_CreateArgumentValuesArray_Tests.cs +++ b/src/core/Akka.Tests/MatchHandler/MatchExpressionBuilder_CreateArgumentValuesArray_Tests.cs @@ -26,7 +26,7 @@ public void Given_no_arguments_When_creating_Then_empty_array_is_returned() var result = builder.CreateArgumentValuesArray(emptyArguments); Assert.NotNull(result); - Assert.Equal(0, result.Length); + Assert.Empty(result); } @@ -39,7 +39,7 @@ public void Given_one_argument_When_creating_Then_array_with_the_value_is_return var result = builder.CreateArgumentValuesArray(arguments); Assert.NotNull(result); - Assert.Equal(1, result.Length); + Assert.Single(result); Assert.Same(argument.Value, result[0]); } diff --git a/src/core/Akka/Actor/ActorCell.Children.cs b/src/core/Akka/Actor/ActorCell.Children.cs index 6682b11ecf1..a99a04e4424 100644 --- a/src/core/Akka/Actor/ActorCell.Children.cs +++ b/src/core/Akka/Actor/ActorCell.Children.cs @@ -7,6 +7,7 @@ using System; using System.Collections.Generic; +using System.Runtime.CompilerServices; using System.Threading; using Akka.Actor.Internal; using Akka.Serialization; @@ -25,7 +26,7 @@ public partial class ActorCell /// public IChildrenContainer ChildrenContainer { - get { return _childrenContainerDoNotCallMeDirectly; } + get { return _childrenContainerDoNotCallMeDirectly; } } private IReadOnlyCollection Children @@ -86,7 +87,7 @@ private IActorRef ActorOf(Props props, string name, bool isAsync, bool isSystemS return MakeChild(props, name, isAsync, isSystemService); } - + private string GetRandomActorName() { var id = Interlocked.Increment(ref _nextRandomNameDoNotCallMeDirectly); @@ -105,46 +106,22 @@ public void Stop(IActorRef child) var repointableActorRef = child as RepointableActorRef; if (repointableActorRef == null || repointableActorRef.IsStarted) { - UpdateChildrenRefs(c => c.ShallDie(child)); + while (true) + { + var oldChildren = ChildrenContainer; + var newChildren = oldChildren.ShallDie(child); + + if (SwapChildrenRefs(oldChildren, newChildren)) break; + } } } ((IInternalActorRef)child).Stop(); } - /// - /// Swaps out the children container, by calling to produce the new container. - /// If the underlying container has been updated while was called, - /// will be called again with the new container. This will repeat until the - /// container can be swapped out, or until contains false. - /// The returned tuple should contain: - /// Item1: true if the container should be updated; false to not update and return Item3 - /// Item2: The new container (will only be used if Item1=true) - /// Item3: The return value - /// - /// A function that returns a new container. - /// The third value of the tuple that returned. - private TReturn UpdateChildrenRefs(Func> updater) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private bool SwapChildrenRefs(IChildrenContainer oldChildren, IChildrenContainer newChildren) { - while (true) - { - var current = ChildrenContainer; - var t = updater(current); - if (!t.Item1) return t.Item3; - if (Interlocked.CompareExchange(ref _childrenContainerDoNotCallMeDirectly, t.Item2, current) == current) return t.Item3; - } - } - - /// - /// Swaps out the children container, by calling to produce the new container. - /// If the underlying container has been updated while was called, - /// will be called again with the new container. This will repeat until the - /// container can be swapped out. - /// - /// A function that returns a new container. - /// The new updated - private IChildrenContainer UpdateChildrenRefs(Func updater) - { - return InterlockedSpin.Swap(ref _childrenContainerDoNotCallMeDirectly, updater); + return ReferenceEquals(Interlocked.CompareExchange(ref _childrenContainerDoNotCallMeDirectly, newChildren, oldChildren), oldChildren); } /// @@ -153,7 +130,13 @@ private IChildrenContainer UpdateChildrenRefs(FuncTBD public void ReserveChild(string name) { - UpdateChildrenRefs(c => c.Reserve(name)); + while (true) + { + var oldChildren = ChildrenContainer; + var newChildren = oldChildren.Reserve(name); + + if (SwapChildrenRefs(oldChildren, newChildren)) break; + } } /// @@ -162,8 +145,13 @@ public void ReserveChild(string name) /// TBD protected void UnreserveChild(string name) { - UpdateChildrenRefs(c => c.Unreserve(name)); + while (true) + { + var oldChildren = ChildrenContainer; + var newChildren = oldChildren.Unreserve(name); + if (SwapChildrenRefs(oldChildren, newChildren)) break; + } } /// @@ -173,29 +161,25 @@ protected void UnreserveChild(string name) /// TBD public ChildRestartStats InitChild(IInternalActorRef actor) { - return UpdateChildrenRefs(cc => + var name = actor.Path.Name; + while (true) { - IChildStats stats; - var name = actor.Path.Name; - if (cc.TryGetByName(name, out stats)) + var cc = ChildrenContainer; + if (cc.TryGetByName(name, out var old)) { - var old = stats as ChildRestartStats; - if (old != null) - { - //Do not update. Return old - return new Tuple(false, cc, old); - } - if (stats is ChildNameReserved) + switch (old) { - var crs = new ChildRestartStats(actor); - var updatedContainer = cc.Add(name, crs); - //Update (if it's still cc) and return the new crs - return new Tuple(true, updatedContainer, crs); + case ChildRestartStats restartStats: + return restartStats; + case ChildNameReserved _: + var crs = new ChildRestartStats(actor); + if (SwapChildrenRefs(cc, cc.Add(name, crs))) + return crs; + break; } } - //Do not update. Return null - return new Tuple(false, cc, null); - }); + else return null; + } } /// @@ -205,16 +189,15 @@ public ChildRestartStats InitChild(IInternalActorRef actor) /// TBD protected bool SetChildrenTerminationReason(SuspendReason reason) { - return UpdateChildrenRefs(cc => + while (true) { - var c = cc as TerminatingChildrenContainer; - if (c != null) - //The arguments says: Update; with a new reason; and return true - return new Tuple(true, c.CreateCopyWithReason(reason), true); - - //The arguments says:Do NOT update; any container will do since it wont be updated; return false - return new Tuple(false, cc, false); - }); + if (ChildrenContainer is TerminatingChildrenContainer c) + { + var n = c.CreateCopyWithReason(reason); + if (SwapChildrenRefs(c, n)) return true; + } + else return false; + } } /// @@ -222,7 +205,7 @@ protected bool SetChildrenTerminationReason(SuspendReason reason) /// protected void SetTerminated() { - UpdateChildrenRefs(c => TerminatedChildrenContainer.Instance); + Interlocked.Exchange(ref _childrenContainerDoNotCallMeDirectly, TerminatedChildrenContainer.Instance); } /// @@ -381,14 +364,28 @@ protected SuspendReason RemoveChildAndGetStateChange(IActorRef child) var terminating = ChildrenContainer as TerminatingChildrenContainer; if (terminating != null) { - var newContainer = UpdateChildrenRefs(c => c.Remove(child)); - if (newContainer is TerminatingChildrenContainer) return null; - return terminating.Reason; + var n = RemoveChild(child); + if (!(n is TerminatingChildrenContainer)) + return terminating.Reason; + else + return null; } - UpdateChildrenRefs(c => c.Remove(child)); + + RemoveChild(child); return null; } + private IChildrenContainer RemoveChild(IActorRef child) + { + while (true) + { + var oldChildren = ChildrenContainer; + var newChildren = oldChildren.Remove(child); + + if (SwapChildrenRefs(oldChildren, newChildren)) return newChildren; + } + } + private static string CheckName(string name) { if (name == null) throw new InvalidActorNameException("Actor name must not be null."); @@ -460,7 +457,7 @@ private IInternalActorRef MakeChild(Props props, string name, bool async, bool s if (Mailbox != null && IsFailed) { - for(var i = 1; i <= Mailbox.SuspendCount(); i++) + for (var i = 1; i <= Mailbox.SuspendCount(); i++) actor.Suspend(); } diff --git a/src/core/Akka/Actor/ActorCell.ReceiveTimeout.cs b/src/core/Akka/Actor/ActorCell.ReceiveTimeout.cs index 0d48fd63e82..e928a0587ba 100644 --- a/src/core/Akka/Actor/ActorCell.ReceiveTimeout.cs +++ b/src/core/Akka/Actor/ActorCell.ReceiveTimeout.cs @@ -10,7 +10,7 @@ namespace Akka.Actor { /// - /// TBD + /// Marker interface to indicate that a message should not reset the receive timeout. /// public interface INotInfluenceReceiveTimeout { @@ -25,7 +25,7 @@ public partial class ActorCell /// TBD /// /// TBD - public void SetReceiveTimeout(TimeSpan? timeout=null) + public void SetReceiveTimeout(TimeSpan? timeout = null) { _receiveTimeoutDuration = timeout; } @@ -47,7 +47,7 @@ public TimeSpan? ReceiveTimeout public void CheckReceiveTimeout() { CancelReceiveTimeout(); - if (_receiveTimeoutDuration != null && !Mailbox.HasMessages) + if (_receiveTimeoutDuration != null) { _pendingReceiveTimeout = System.Scheduler.ScheduleTellOnceCancelable(_receiveTimeoutDuration.Value, Self, Akka.Actor.ReceiveTimeout.Instance, Self); } diff --git a/src/core/Akka/Actor/ActorRef.cs b/src/core/Akka/Actor/ActorRef.cs index 9f0007f585d..824c8ff416d 100644 --- a/src/core/Akka/Actor/ActorRef.cs +++ b/src/core/Akka/Actor/ActorRef.cs @@ -89,6 +89,7 @@ public FutureActorRef(TaskCompletionSource result, Action unregister, Ac /// TBD /// TBD /// TBD + /// TBD public FutureActorRef(TaskCompletionSource result, Action unregister, ActorPath path, bool tcsWasCreatedWithRunContinuationsAsynchronouslyAvailable) { if (ActorCell.Current != null) diff --git a/src/core/Akka/Actor/ChildrenContainer/Internal/TerminatedChildrenContainer.cs b/src/core/Akka/Actor/ChildrenContainer/Internal/TerminatedChildrenContainer.cs index 5c1e222e5f4..a500e885d32 100644 --- a/src/core/Akka/Actor/ChildrenContainer/Internal/TerminatedChildrenContainer.cs +++ b/src/core/Akka/Actor/ChildrenContainer/Internal/TerminatedChildrenContainer.cs @@ -6,6 +6,7 @@ //----------------------------------------------------------------------- using System; +using System.Runtime.CompilerServices; namespace Akka.Actor.Internal { @@ -22,10 +23,15 @@ private TerminatedChildrenContainer() { //Intentionally left blank } + /// /// TBD /// - public new static IChildrenContainer Instance { get { return _instance; } } + public new static IChildrenContainer Instance + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get => _instance; + } /// /// TBD diff --git a/src/core/Akka/Actor/CoordinatedShutdown.cs b/src/core/Akka/Actor/CoordinatedShutdown.cs index 7ffc32b013d..bb0d7fd8760 100644 --- a/src/core/Akka/Actor/CoordinatedShutdown.cs +++ b/src/core/Akka/Actor/CoordinatedShutdown.cs @@ -152,6 +152,76 @@ public static CoordinatedShutdown Get(ActorSystem sys) public const string PhaseBeforeActorSystemTerminate = "before-actor-system-terminate"; public const string PhaseActorSystemTerminate = "actor-system-terminate"; + + + /// + /// Reason for the shutdown, which can be used by tasks in case they need to do + /// different things depending on what caused the shutdown. There are some + /// predefined reasons, but external libraries applications may also define + /// other reasons. + /// + public class Reason + { + protected Reason() + { + + } + } + + /// + /// The reason for the shutdown was unknown. Needed for backwards compatibility. + /// + public class UnknownReason : Reason + { + public static Reason Instance = new UnknownReason(); + + private UnknownReason() + { + + } + } + + /// + /// The shutdown was initiated by a CLR shutdown hook + /// + public class ClrExitReason : Reason + { + public static Reason Instance = new ClrExitReason(); + + private ClrExitReason() + { + + } + } + + + /// + /// The shutdown was initiated by Cluster downing. + /// + public class ClusterDowningReason : Reason + { + public static Reason Instance = new ClusterDowningReason(); + + private ClusterDowningReason() + { + + } + } + + + /// + /// The shutdown was initiated by Cluster leaving. + /// + public class ClusterLeavingReason : Reason + { + public static Reason Instance = new ClusterLeavingReason(); + + private ClusterLeavingReason() + { + + } + } + /// /// The /// @@ -176,7 +246,7 @@ public static CoordinatedShutdown Get(ActorSystem sys) private readonly ConcurrentBag>> _clrShutdownTasks = new ConcurrentBag>>(); private readonly ConcurrentDictionary>>>> _tasks = new ConcurrentDictionary>>>>(); - private readonly AtomicBoolean _runStarted = new AtomicBoolean(false); + private readonly AtomicReference _runStarted = new AtomicReference(null); private readonly AtomicBoolean _clrHooksStarted = new AtomicBoolean(false); private readonly TaskCompletionSource _runPromise = new TaskCompletionSource(); private readonly TaskCompletionSource _hooksRunPromise = new TaskCompletionSource(); @@ -185,14 +255,14 @@ public static CoordinatedShutdown Get(ActorSystem sys) /// /// INTERNAL API - /// + /// /// Signals when CLR shutdown hooks have been completed /// internal Task ClrShutdownTask => _hooksRunPromise.Task; /// /// Add a task to a phase. It doesn't remove previously added tasks. - /// + /// /// Tasks added to the same phase are executed in parallel without any /// ordering assumptions. Next phase will not start until all tasks of /// previous phase have completed. @@ -204,8 +274,8 @@ public static CoordinatedShutdown Get(ActorSystem sys) /// Tasks should typically be registered as early as possible after system /// startup. When running the tasks that have been /// registered will be performed but tasks that are added too late will not be run. - /// - /// + /// + /// /// It is possible to add a task to a later phase from within a task in an earlier phase /// and it will be performed. /// @@ -230,7 +300,7 @@ public void AddTask(string phase, string taskName, Func> task) /// /// Add a shutdown hook that will execute when the CLR process begins /// its shutdown sequence, invoked via . - /// + /// /// Added hooks may run in any order concurrently, but they are run before /// the Akka.NET internal shutdown hooks execute. /// @@ -246,10 +316,10 @@ internal void AddClrShutdownHook(Func> hook) /// /// INTERNAL API - /// + /// /// Should only be called directly by the event /// in production. - /// + /// /// Safe to call multiple times, but hooks will only be run once. /// /// Returns a that will be completed once the process exits. @@ -283,6 +353,12 @@ private Task RunClrHooks() return ClrShutdownTask; } + /// + /// The for the shutdown as passed to the method. if the shutdown + /// has not been started. + /// + public Reason ShutdownReason => _runStarted.Value; + /// /// Run tasks of all phases including and after the given phase. /// @@ -290,11 +366,27 @@ private Task RunClrHooks() /// A task that is completed when all such tasks have been completed, or /// there is failure when is disabled. /// - /// It is safe to call this method multiple times. It will only run once. + /// It is safe to call this method multiple times. It will only run the shutdown sequence once. /// + [Obsolete("Use the method with 'reason' parameter instead")] public Task Run(string fromPhase = null) { - if (_runStarted.CompareAndSet(false, true)) + return Run(UnknownReason.Instance, fromPhase); + } + + /// + /// Run tasks of all phases including and after the given phase. + /// + /// Reason of the shutdown + /// Optional. The phase to start the run from. + /// A task that is completed when all such tasks have been completed, or + /// there is failure when is disabled. + /// + /// It is safe to call this method multiple times. It will only run the shutdown sequence once. + /// + public Task Run(Reason reason, string fromPhase = null) + { + if (_runStarted.CompareAndSet(null, reason)) { var debugEnabled = Log.IsDebugEnabled; @@ -412,7 +504,7 @@ Task Loop(List remainingPhases) var done = Loop(runningPhases); done.ContinueWith(tr => { - if(!tr.IsFaulted && !tr.IsCanceled) + if (!tr.IsFaulted && !tr.IsCanceled) _runPromise.SetResult(tr.Result); else { @@ -511,7 +603,7 @@ void DepthFirstSearch(string u) /// /// INTERNAL API - /// + /// /// Primes the with the default phase for /// /// @@ -554,7 +646,8 @@ internal static void InitPhaseActorSystemTerminate(ActorSystem system, Config co } return Done.Instance; }); - } else if (exitClr) + } + else if (exitClr) { Environment.Exit(0); return TaskEx.Completed; @@ -599,7 +692,7 @@ internal static void InitClrHook(ActorSystem system, Config conf, CoordinatedShu coord.Log.Info("Starting coordinated shutdown from CLR termination hook."); try { - coord.Run().Wait(coord.TotalTimeout); + coord.Run(ClrExitReason.Instance).Wait(coord.TotalTimeout); } catch (Exception ex) { diff --git a/src/core/Akka/Actor/Futures.cs b/src/core/Akka/Actor/Futures.cs index 81d8002a301..b9bef521445 100644 --- a/src/core/Akka/Actor/Futures.cs +++ b/src/core/Akka/Actor/Futures.cs @@ -146,23 +146,11 @@ internal static IActorRefProvider ResolveProvider(ICanTell self) return null; } - - private const int RunContinuationsAsynchronously = 64; - private static readonly bool isRunContinuationsAsynchronouslyAvailable = Enum.IsDefined(typeof(TaskCreationOptions), RunContinuationsAsynchronously); - - + private static async Task Ask(ICanTell self, Func messageFactory, IActorRefProvider provider, TimeSpan? timeout, CancellationToken cancellationToken) { - TaskCompletionSource result; - if (isRunContinuationsAsynchronouslyAvailable) - { - result = new TaskCompletionSource((TaskCreationOptions)RunContinuationsAsynchronously); - } - else - { - result = new TaskCompletionSource(); - } + TaskCompletionSource result = TaskEx.NonBlockingTaskCompletionSource(); CancellationTokenSource timeoutCancellation = null; timeout = timeout ?? provider.Settings.AskTimeout; @@ -188,7 +176,7 @@ private static async Task Ask(ICanTell self, Func mes //create a new tempcontainer path ActorPath path = provider.TempPath(); - var future = new FutureActorRef(result, () => { }, path, isRunContinuationsAsynchronouslyAvailable); + var future = new FutureActorRef(result, () => { }, path, TaskEx.IsRunContinuationsAsynchronouslyAvailable); //The future actor needs to be registered in the temp container provider.RegisterTempActor(future, path); var message = messageFactory(future); diff --git a/src/core/Akka/Actor/Settings.cs b/src/core/Akka/Actor/Settings.cs index ea93e5c868e..f71a8d39ea1 100644 --- a/src/core/Akka/Actor/Settings.cs +++ b/src/core/Akka/Actor/Settings.cs @@ -177,7 +177,7 @@ private static string GetProviderClass(string provider) public bool SerializeAllCreators { get; private set; } /// - /// Gets the default timeout for calls. + /// Gets the default timeout for Futures.Ask calls. /// /// The ask timeout. public TimeSpan AskTimeout { get; private set; } diff --git a/src/core/Akka/Dispatch/SysMsg/ISystemMessage.cs b/src/core/Akka/Dispatch/SysMsg/ISystemMessage.cs index 3749d37464d..34451d67acb 100644 --- a/src/core/Akka/Dispatch/SysMsg/ISystemMessage.cs +++ b/src/core/Akka/Dispatch/SysMsg/ISystemMessage.cs @@ -258,11 +258,12 @@ internal interface IStashWhenWaitingForChildren { } /// Stash this when the actor is in a failed state. /// internal interface IStashWhenFailed { } - /** - * public API - */ + + // public API + //@SerialVersionUID(1L) //private[akka] case class Create(failure: Option[ActorInitializationException]) extends ISystemMessage // sent to self from Dispatcher.register + /// /// Class ISystemMessage. /// diff --git a/src/core/Akka/Event/LoggingAdapterBase.cs b/src/core/Akka/Event/LoggingAdapterBase.cs index c1199f07034..ccc3c7b7d93 100644 --- a/src/core/Akka/Event/LoggingAdapterBase.cs +++ b/src/core/Akka/Event/LoggingAdapterBase.cs @@ -135,7 +135,7 @@ protected void NotifyLog(LogLevel logLevel, object message) /// /// The message that is being logged. /// An optional list of items used to format the message. - public void Debug(string format, params object[] args) + public virtual void Debug(string format, params object[] args) { if (!IsDebugEnabled) return; @@ -155,7 +155,7 @@ public void Debug(string format, params object[] args) /// /// N/A /// N/A - public void Warn(string format, params object[] args) + public virtual void Warn(string format, params object[] args) { Warning(format, args); } @@ -165,7 +165,7 @@ public void Warn(string format, params object[] args) /// /// The message that is being logged. /// An optional list of items used to format the message. - public void Warning(string format, params object[] args) + public virtual void Warning(string format, params object[] args) { if (!IsWarningEnabled) return; @@ -186,7 +186,7 @@ public void Warning(string format, params object[] args) /// The exception associated with this message. /// The message that is being logged. /// An optional list of items used to format the message. - public void Error(Exception cause, string format, params object[] args) + public virtual void Error(Exception cause, string format, params object[] args) { if (!IsErrorEnabled) return; @@ -206,7 +206,7 @@ public void Error(Exception cause, string format, params object[] args) /// /// The message that is being logged. /// An optional list of items used to format the message. - public void Error(string format, params object[] args) + public virtual void Error(string format, params object[] args) { if (!IsErrorEnabled) return; @@ -226,7 +226,7 @@ public void Error(string format, params object[] args) /// /// The message that is being logged. /// An optional list of items used to format the message. - public void Info(string format, params object[] args) + public virtual void Info(string format, params object[] args) { if (!IsInfoEnabled) return; @@ -247,7 +247,7 @@ public void Info(string format, params object[] args) /// The level used to log the message. /// The message that is being logged. /// An optional list of items used to format the message. - public void Log(LogLevel logLevel, string format, params object[] args) + public virtual void Log(LogLevel logLevel, string format, params object[] args) { if (args == null || args.Length == 0) { diff --git a/src/core/Akka/Pattern/BackoffOptions.cs b/src/core/Akka/Pattern/BackoffOptions.cs index fcec1a5f650..8e1be3ef9fa 100644 --- a/src/core/Akka/Pattern/BackoffOptions.cs +++ b/src/core/Akka/Pattern/BackoffOptions.cs @@ -11,7 +11,7 @@ namespace Akka.Pattern { /// - /// Builds back-off options for creating a back-off supervisor. You can pass to . + /// Builds back-off options for creating a back-off supervisor. You can pass to . /// public static class Backoff { diff --git a/src/core/Akka/Pattern/CircuitBreaker.cs b/src/core/Akka/Pattern/CircuitBreaker.cs index 69078e4d956..259785e535a 100644 --- a/src/core/Akka/Pattern/CircuitBreaker.cs +++ b/src/core/Akka/Pattern/CircuitBreaker.cs @@ -15,29 +15,29 @@ namespace Akka.Pattern { /// - /// Provides circuit breaker functionality to provide stability when working with + /// Provides circuit breaker functionality to provide stability when working with /// "dangerous" operations, e.g. calls to remote systems - /// + /// /// /// /// Transitions through three states: /// /// /// In *Closed* state, - /// calls pass through until the maxFailures count is reached. - /// This causes the circuit breaker to open. Both exceptions and calls exceeding + /// calls pass through until the maxFailures count is reached. + /// This causes the circuit breaker to open. Both exceptions and calls exceeding /// callTimeout are considered failures. /// /// /// In *Open* state, - /// calls fail-fast with an exception. After resetTimeout, + /// calls fail-fast with an exception. After resetTimeout, /// circuit breaker transitions to half-open state. /// /// /// In *Half-Open* state, - /// the first call will be allowed through, if it succeeds - /// the circuit breaker will reset to closed state. If it fails, the circuit - /// breaker will re-open to open state. All calls beyond the first that execute + /// the first call will be allowed through, if it succeeds + /// the circuit breaker will reset to closed state. If it fails, the circuit + /// breaker will re-open to open state. All calls beyond the first that execute /// while the first is running will fail-fast with an exception. /// /// @@ -173,7 +173,7 @@ public void WithSyncCircuitBreaker(Action body) /// /// Wraps invocations of asynchronous calls that need to be protected /// If this does not complete within the time allotted, it should return default() - /// + /// /// /// Await.result( /// withCircuitBreaker(try Future.successful(body) catch { case NonFatal(t) ⇒ Future.failed(t) }), @@ -238,10 +238,7 @@ private void Transition(AtomicState fromState, AtomicState toState) Debug.WriteLine("Successful transition from {0} to {1}", fromState, toState); toState.Enter(); } - else - { - throw new IllegalStateException($"Illegal transition attempted from {fromState} to {toState}"); - } + // else some other thread already swapped state } /// diff --git a/src/core/Akka/Util/Internal/TaskEx.cs b/src/core/Akka/Util/Internal/TaskEx.cs index b94af98e95c..38baceaf3a7 100644 --- a/src/core/Akka/Util/Internal/TaskEx.cs +++ b/src/core/Akka/Util/Internal/TaskEx.cs @@ -18,6 +18,55 @@ namespace Akka.Util.Internal /// internal static class TaskEx { + private const int RunContinuationsAsynchronously = 64; + public static readonly bool IsRunContinuationsAsynchronouslyAvailable = Enum.IsDefined(typeof(TaskCreationOptions), RunContinuationsAsynchronously); + + /// + /// Creates a new which will run in asynchronous, + /// non-blocking fashion upon calling . + /// + /// This behavior is not available on all supported versions of .NET framework, in this case it + /// should be used only together with and + /// . + /// + public static TaskCompletionSource NonBlockingTaskCompletionSource() + { + if (IsRunContinuationsAsynchronouslyAvailable) + { + return new TaskCompletionSource((TaskCreationOptions)RunContinuationsAsynchronously); + } + else + { + return new TaskCompletionSource(); + } + } + + /// + /// Tries to complete given in asynchronous, non-blocking + /// fashion. For safety reasons, this method should be called only on tasks created via + /// method. + /// + public static void NonBlockingTrySetResult(this TaskCompletionSource taskCompletionSource, T value) + { + if (IsRunContinuationsAsynchronouslyAvailable) + taskCompletionSource.TrySetResult(value); + else + Task.Run(() => taskCompletionSource.TrySetResult(value)); + } + + /// + /// Tries to set given + /// in asynchronous, non-blocking fashion. For safety reasons, this method should be called only + /// on tasks created via method. + /// + public static void NonBlockingTrySetException(this TaskCompletionSource taskCompletionSource, Exception exception) + { + if (IsRunContinuationsAsynchronouslyAvailable) + taskCompletionSource.TrySetException(exception); + else + Task.Run(() => taskCompletionSource.TrySetException(exception)); + } + /// /// A completed task ///