diff --git a/CHANGELOG.md b/CHANGELOG.md index f12a546143..e0bfcdb639 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Updated docker documentation [#488](https://github.com/ie3-institute/simona/issues/488) - Added support classes for transformer tap position calculation [#1543](https://github.com/ie3-institute/simona/issues/1543) - Added basic external em service [#1566](https://github.com/ie3-institute/simona/issues/1566) +- Added external result provider [#1530](https://github.com/ie3-institute/simona/issues/1530) ### Changed - Upgraded `scala2` to `scala3` [#53](https://github.com/ie3-institute/simona/issues/53) diff --git a/build.gradle b/build.gradle index 4622ad1285..c8300159a1 100644 --- a/build.gradle +++ b/build.gradle @@ -206,8 +206,6 @@ tasks.shadowJar { tasks.withType(ScalaCompile).configureEach { scalaCompileOptions.additionalParameters = scala3compilerOptions + [ "-Xplugin:" + configurations.scalaCompilerPlugin.asPath, - "-P:scapegoat:dataDir:" + project.layout.buildDirectory.get().asFile.absolutePath + "/reports/scapegoat/src/", - "-P:scapegoat:disabledInspections:TryGet" ] scalaCompileOptions.forkOptions.jvmArgs = [ '-Xss2m', @@ -219,8 +217,6 @@ tasks.withType(ScalaCompile).configureEach { compileTestScala { scalaCompileOptions.additionalParameters = scala3compilerOptions + [ "-Xplugin:" + configurations.scalaCompilerPlugin.asPath, - "-P:scapegoat:dataDir:" + project.layout.buildDirectory.get().asFile.absolutePath + "/reports/scapegoat/testsrc/", - "-P:scapegoat:disabledInspections:TryGet" ] } diff --git a/input/samples/vn_simona/vn_simona.conf b/input/samples/vn_simona/vn_simona.conf index a82d6ac695..fa51c56221 100644 --- a/input/samples/vn_simona/vn_simona.conf +++ b/input/samples/vn_simona/vn_simona.conf @@ -23,13 +23,13 @@ simona.time.schedulerReadyCheckWindow = 900 # Input Parameters ################################################################## simona.input.primary.csvParams = { - directoryPath: "input/samples/vn_simona/fullGrid" + directoryPath: "simona/input/samples/vn_simona/fullGridEm" csvSep: "," isHierarchic: false } simona.input.grid.datasource.id = "csv" simona.input.grid.datasource.csvParams = { - directoryPath: "input/samples/vn_simona/fullGrid" + directoryPath: "simona/input/samples/vn_simona/fullGridEm" csvSep: "," isHierarchic: false } @@ -44,7 +44,7 @@ simona.input.weather.datasource = { ################################################################## # Output Parameters ################################################################## -simona.output.base.dir = "output/vn_simona" +simona.output.base.dir = "simona/output/vn_simonaEM" simona.output.base.addTimestampToOutputDir = true simona.output.sink.csv { diff --git a/src/main/resources/reference.conf b/src/main/resources/reference.conf new file mode 100644 index 0000000000..2ac51454dc --- /dev/null +++ b/src/main/resources/reference.conf @@ -0,0 +1,1531 @@ +# SPDX-License-Identifier: Apache-2.0 + +pekko.actor.typed { + + # List FQCN of `org.apache.pekko.actor.typed.ExtensionId`s which shall be loaded at actor system startup. + # Should be on the format: 'extensions = ["com.example.MyExtId1", "com.example.MyExtId2"]' etc. + # See the Pekko Documentation for more info about Extensions + extensions = [] + + # List FQCN of extensions which shall be loaded at actor system startup. + # Library extensions are regular extensions that are loaded at startup and are + # available for third party library authors to enable auto-loading of extensions when + # present on the classpath. This is done by appending entries: + # 'library-extensions += "Extension"' in the library `reference.conf`. + # + # Should not be set by end user applications in 'application.conf', use the extensions property for that + # + library-extensions = ${?pekko.actor.typed.library-extensions} [] + + # Receptionist is started eagerly to allow clustered receptionist to gather remote registrations early on. + library-extensions += "org.apache.pekko.actor.typed.receptionist.Receptionist$" + + # While an actor is restarted (waiting for backoff to expire and children to stop) + # incoming messages and signals are stashed, and delivered later to the newly restarted + # behavior. This property defines the capacity in number of messages of the stash + # buffer. If the capacity is exceed then additional incoming messages are dropped. + restart-stash-capacity = 1000 + + # Typed mailbox defaults to the single consumer mailbox as balancing dispatcher is not supported + default-mailbox { + mailbox-type = "org.apache.pekko.dispatch.SingleConsumerOnlyUnboundedMailbox" + } +} + +# Load typed extensions by a classic extension. +pekko.library-extensions += "org.apache.pekko.actor.typed.internal.adapter.ActorSystemAdapter$LoadTypedExtensions" + +pekko.actor { + serializers { + typed-misc = "org.apache.pekko.actor.typed.internal.MiscMessageSerializer" + service-key = "org.apache.pekko.actor.typed.internal.receptionist.ServiceKeySerializer" + } + + serialization-identifiers { + "org.apache.pekko.actor.typed.internal.MiscMessageSerializer" = 24 + "org.apache.pekko.actor.typed.internal.receptionist.ServiceKeySerializer" = 26 + } + + serialization-bindings { + "org.apache.pekko.actor.typed.ActorRef" = typed-misc + "org.apache.pekko.actor.typed.internal.adapter.ActorRefAdapter" = typed-misc + "org.apache.pekko.actor.typed.internal.receptionist.DefaultServiceKey" = service-key + } +} + +# When using Pekko Typed (having pekko-actor-typed in classpath) the +# org.apache.pekko.event.slf4j.Slf4jLogger is enabled instead of the DefaultLogger +# even though it has not been explicitly defined in `pekko.loggers` +# configuration. +# +# Slf4jLogger will be used for all Pekko classic logging via eventStream, +# including logging from Pekko internals. The Slf4jLogger is then using +# an ordinary org.slf4j.Logger to emit the log events. +# +# The Slf4jLoggingFilter is also enabled automatically. +# +# This behavior can be disabled by setting this property to `off`. +pekko.use-slf4j = on + +pekko.reliable-delivery { + producer-controller { + + # To avoid head of line blocking from serialization and transfer + # of large messages this can be enabled. + # Large messages are chunked into pieces of the given size in bytes. The + # chunked messages are sent separately and assembled on the consumer side. + # Serialization and deserialization is performed by the ProducerController and + # ConsumerController respectively instead of in the remote transport layer. + chunk-large-messages = off + + durable-queue { + # The ProducerController uses this timeout for the requests to + # the durable queue. If there is no reply within the timeout it + # will be retried. + request-timeout = 3s + + # The ProducerController retries requests to the durable queue this + # number of times before failing. + retry-attempts = 10 + + # The ProducerController retries sending the first message with this interval + # until it has been confirmed. + resend-first-interval = 1s + } + } + + consumer-controller { + # Number of messages in flight between ProducerController and + # ConsumerController. The ConsumerController requests for more messages + # when half of the window has been used. + flow-control-window = 50 + + # The ConsumerController resends flow control messages to the + # ProducerController with the resend-interval-min, and increasing + # it gradually to resend-interval-max when idle. + resend-interval-min = 2s + resend-interval-max = 30s + + # If this is enabled lost messages will not be resent, but flow control is used. + # This can be more efficient since messages don't have to be + # kept in memory in the `ProducerController` until they have been + # confirmed, but the drawback is that lost messages will not be delivered. + only-flow-control = false + } + + work-pulling { + producer-controller = ${pekko.reliable-delivery.producer-controller} + producer-controller { + # Limit of how many messages that can be buffered when there + # is no demand from the consumer side. + buffer-size = 1000 + + # Ask timeout for sending message to worker until receiving Ack from worker + internal-ask-timeout = 60s + + # Chunked messages not implemented for work-pulling yet. Override to not + # propagate property from pekko.reliable-delivery.producer-controller. + chunk-large-messages = off + } + } +} + +# SPDX-License-Identifier: Apache-2.0 + +##################################### +# Pekko Actor Reference Config File # +##################################### + +# This is the reference config file that contains all the default settings. +# Make your edits/overrides in your application.conf. + +# Pekko version, checked against the runtime version of Pekko. Loaded from generated conf file. +include "version" + +pekko { + # Home directory of Pekko, modules in the deploy directory will be loaded + home = "" + + # Loggers to register at boot time (org.apache.pekko.event.Logging$DefaultLogger logs + # to STDOUT) + loggers = ["org.apache.pekko.event.Logging$DefaultLogger"] + + # Filter of log events that is used by the LoggingAdapter before + # publishing log events to the eventStream. It can perform + # fine grained filtering based on the log source. The default + # implementation filters on the `loglevel`. + # FQCN of the LoggingFilter. The Class of the FQCN must implement + # org.apache.pekko.event.LoggingFilter and have a public constructor with + # (org.apache.pekko.actor.ActorSystem.Settings, org.apache.pekko.event.EventStream) parameters. + logging-filter = "org.apache.pekko.event.DefaultLoggingFilter" + + # Specifies the default loggers dispatcher + loggers-dispatcher = "pekko.actor.default-dispatcher" + + # Loggers are created and registered synchronously during ActorSystem + # start-up, and since they are actors, this timeout is used to bound the + # waiting time + logger-startup-timeout = 5s + + # Log level used by the configured loggers (see "loggers") as soon + # as they have been started; before that, see "stdout-loglevel" + # Options: OFF, ERROR, WARNING, INFO, DEBUG + loglevel = "INFO" + + # Log level for the very basic logger activated during ActorSystem startup. + # This logger prints the log messages to stdout (System.out). + # Options: OFF, ERROR, WARNING, INFO, DEBUG + stdout-loglevel = "WARNING" + + # Log the complete configuration at INFO level when the actor system is started. + # This is useful when you are uncertain of what configuration is used. + log-config-on-start = off + + # Log at info level when messages are sent to dead letters, or published to + # eventStream as `DeadLetter`, `Dropped` or `UnhandledMessage`. + # Possible values: + # on: all dead letters are logged + # off: no logging of dead letters + # n: positive integer, number of dead letters that will be logged + log-dead-letters = 10 + + # Possibility to turn off logging of dead letters while the actor system + # is shutting down. Logging is only done when enabled by 'log-dead-letters' + # setting. + log-dead-letters-during-shutdown = off + + # When log-dead-letters is enabled, this will re-enable the logging after configured duration. + # infinite: suspend the logging forever; + # or a duration (eg: 5 minutes), after which the logging will be re-enabled. + log-dead-letters-suspend-duration = 5 minutes + + # List FQCN of extensions which shall be loaded at actor system startup. + # Library extensions are regular extensions that are loaded at startup and are + # available for third party library authors to enable auto-loading of extensions when + # present on the classpath. This is done by appending entries: + # 'library-extensions += "Extension"' in the library `reference.conf`. + # + # Should not be set by end user applications in 'application.conf', use the extensions property for that + # + library-extensions = ${?pekko.library-extensions} ["org.apache.pekko.serialization.SerializationExtension$"] + + # List FQCN of extensions which shall be loaded at actor system startup. + # Should be on the format: 'extensions = ["foo", "bar"]' etc. + # See the Pekko Documentation for more info about Extensions + extensions = [] + + # Toggles whether threads created by this ActorSystem should be daemons or not + daemonic = off + + # JVM shutdown, System.exit(-1), in case of a fatal error, + # such as OutOfMemoryError + jvm-exit-on-fatal-error = on + + # Pekko installs JVM shutdown hooks by default, e.g. in CoordinatedShutdown and Artery. This property will + # not disable user-provided hooks registered using `CoordinatedShutdown#addCancellableJvmShutdownHook`. + # This property is related to `pekko.coordinated-shutdown.run-by-jvm-shutdown-hook` below. + # This property makes it possible to disable all such hooks if the application itself + # or a higher level framework such as Play prefers to install the JVM shutdown hook and + # terminate the ActorSystem itself, with or without using CoordinatedShutdown. + jvm-shutdown-hooks = on + + # Version must be the same across all modules and if they are different the startup + # will fail. It's possible but not recommended, to disable this check, and only log a warning, + # by setting this property to `off`. + fail-mixed-versions = on + + # Some modules (remoting only right now) can emit custom events to the Java Flight Recorder if running + # on JDK 11 or later. If you for some reason do not want that, it can be disabled and switched to no-ops + # with this toggle. + java-flight-recorder { + enabled = true + } + + actor { + + # Either one of "local", "remote" or "cluster" or the + # FQCN of the ActorRefProvider to be used; the below is the built-in default, + # note that "remote" and "cluster" requires the pekko-remote and pekko-cluster + # artifacts to be on the classpath. + provider = "local" + + # The guardian "/user" will use this class to obtain its supervisorStrategy. + # It needs to be a subclass of org.apache.pekko.actor.SupervisorStrategyConfigurator. + # In addition to the default there is org.apache.pekko.actor.StoppingSupervisorStrategy. + guardian-supervisor-strategy = "org.apache.pekko.actor.DefaultSupervisorStrategy" + + # Timeout for Extension creation and a few other potentially blocking + # initialization tasks. + creation-timeout = 20s + + # Serializes and deserializes (non-primitive) messages to ensure immutability, + # this is only intended for testing. + serialize-messages = off + + # Serializes and deserializes creators (in Props) to ensure that they can be + # sent over the network, this is only intended for testing. Purely local deployments + # as marked with deploy.scope == LocalScope are exempt from verification. + serialize-creators = off + + # If serialize-messages or serialize-creators are enabled classes that starts with + # a prefix listed here are not verified. + no-serialization-verification-needed-class-prefix = ["org.apache.pekko."] + + # Timeout for send operations to top-level actors which are in the process + # of being started. This is only relevant if using a bounded mailbox or the + # CallingThreadDispatcher for a top-level actor. + unstarted-push-timeout = 10s + + # TypedActor deprecated since Akka 2.6.0. + typed { + # Default timeout for the deprecated TypedActor (not the new actor APIs in Akka 2.6) + # methods with non-void return type. + timeout = 5s + } + + # Mapping between ´deployment.router' short names to fully qualified class names + router.type-mapping { + from-code = "org.apache.pekko.routing.NoRouter" + round-robin-pool = "org.apache.pekko.routing.RoundRobinPool" + round-robin-group = "org.apache.pekko.routing.RoundRobinGroup" + random-pool = "org.apache.pekko.routing.RandomPool" + random-group = "org.apache.pekko.routing.RandomGroup" + balancing-pool = "org.apache.pekko.routing.BalancingPool" + smallest-mailbox-pool = "org.apache.pekko.routing.SmallestMailboxPool" + broadcast-pool = "org.apache.pekko.routing.BroadcastPool" + broadcast-group = "org.apache.pekko.routing.BroadcastGroup" + scatter-gather-pool = "org.apache.pekko.routing.ScatterGatherFirstCompletedPool" + scatter-gather-group = "org.apache.pekko.routing.ScatterGatherFirstCompletedGroup" + tail-chopping-pool = "org.apache.pekko.routing.TailChoppingPool" + tail-chopping-group = "org.apache.pekko.routing.TailChoppingGroup" + consistent-hashing-pool = "org.apache.pekko.routing.ConsistentHashingPool" + consistent-hashing-group = "org.apache.pekko.routing.ConsistentHashingGroup" + } + + deployment { + + # deployment id pattern - on the format: /parent/child etc. + default { + + # The id of the dispatcher to use for this actor. + # If undefined or empty the dispatcher specified in code + # (Props.withDispatcher) is used, or default-dispatcher if not + # specified at all. + dispatcher = "" + + # The id of the mailbox to use for this actor. + # If undefined or empty the default mailbox of the configured dispatcher + # is used or if there is no mailbox configuration the mailbox specified + # in code (Props.withMailbox) is used. + # If there is a mailbox defined in the configured dispatcher then that + # overrides this setting. + mailbox = "" + + # routing (load-balance) scheme to use + # - available: "from-code", "round-robin", "random", "smallest-mailbox", + # "scatter-gather", "broadcast" + # - or: Fully qualified class name of the router class. + # The class must extend org.apache.pekko.routing.CustomRouterConfig and + # have a public constructor with com.typesafe.config.Config + # and optional org.apache.pekko.actor.DynamicAccess parameter. + # - default is "from-code"; + # Whether or not an actor is transformed to a Router is decided in code + # only (Props.withRouter). The type of router can be overridden in the + # configuration; specifying "from-code" means that the values specified + # in the code shall be used. + # In case of routing, the actors to be routed to can be specified + # in several ways: + # - nr-of-instances: will create that many children + # - routees.paths: will route messages to these paths using ActorSelection, + # i.e. will not create children + # - resizer: dynamically resizable number of routees as specified in + # resizer below + router = "from-code" + + # number of children to create in case of a router; + # this setting is ignored if routees.paths is given + nr-of-instances = 1 + + # within is the timeout used for routers containing future calls + within = 5 seconds + + # number of virtual nodes per node for consistent-hashing router + virtual-nodes-factor = 10 + + tail-chopping-router { + # interval is duration between sending message to next routee + interval = 10 milliseconds + } + + routees { + # Alternatively to giving nr-of-instances you can specify the full + # paths of those actors which should be routed to. This setting takes + # precedence over nr-of-instances + paths = [] + } + + # To use a dedicated dispatcher for the routees of the pool you can + # define the dispatcher configuration inline with the property name + # 'pool-dispatcher' in the deployment section of the router. + # For example: + # pool-dispatcher { + # fork-join-executor.parallelism-min = 5 + # fork-join-executor.parallelism-max = 5 + # } + + # Routers with dynamically resizable number of routees; this feature is + # enabled by including (parts of) this section in the deployment + resizer { + + enabled = off + + # The fewest number of routees the router should ever have. + lower-bound = 1 + + # The most number of routees the router should ever have. + # Must be greater than or equal to lower-bound. + upper-bound = 10 + + # Threshold used to evaluate if a routee is considered to be busy + # (under pressure). Implementation depends on this value (default is 1). + # 0: number of routees currently processing a message. + # 1: number of routees currently processing a message has + # some messages in mailbox. + # > 1: number of routees with at least the configured pressure-threshold + # messages in their mailbox. Note that estimating mailbox size of + # default UnboundedMailbox is O(N) operation. + pressure-threshold = 1 + + # Percentage to increase capacity whenever all routees are busy. + # For example, 0.2 would increase 20% (rounded up), i.e. if current + # capacity is 6 it will request an increase of 2 more routees. + rampup-rate = 0.2 + + # Minimum fraction of busy routees before backing off. + # For example, if this is 0.3, then we'll remove some routees only when + # less than 30% of routees are busy, i.e. if current capacity is 10 and + # 3 are busy then the capacity is unchanged, but if 2 or less are busy + # the capacity is decreased. + # Use 0.0 or negative to avoid removal of routees. + backoff-threshold = 0.3 + + # Fraction of routees to be removed when the resizer reaches the + # backoffThreshold. + # For example, 0.1 would decrease 10% (rounded up), i.e. if current + # capacity is 9 it will request an decrease of 1 routee. + backoff-rate = 0.1 + + # Number of messages between resize operation. + # Use 1 to resize before each message. + messages-per-resize = 10 + } + + # Routers with dynamically resizable number of routees based on + # performance metrics. + # This feature is enabled by including (parts of) this section in + # the deployment, cannot be enabled together with default resizer. + optimal-size-exploring-resizer { + + enabled = off + + # The fewest number of routees the router should ever have. + lower-bound = 1 + + # The most number of routees the router should ever have. + # Must be greater than or equal to lower-bound. + upper-bound = 10 + + # probability of doing a ramping down when all routees are busy + # during exploration. + chance-of-ramping-down-when-full = 0.2 + + # Interval between each resize attempt + action-interval = 5s + + # If the routees have not been fully utilized (i.e. all routees busy) + # for such length, the resizer will downsize the pool. + downsize-after-underutilized-for = 72h + + # Duration exploration, the ratio between the largest step size and + # current pool size. E.g. if the current pool size is 50, and the + # explore-step-size is 0.1, the maximum pool size change during + # exploration will be +- 5 + explore-step-size = 0.1 + + # Probability of doing an exploration v.s. optimization. + chance-of-exploration = 0.4 + + # When downsizing after a long streak of under-utilization, the resizer + # will downsize the pool to the highest utilization multiplied by + # a downsize ratio. This downsize ratio determines the new pools size + # in comparison to the highest utilization. + # E.g. if the highest utilization is 10, and the down size ratio + # is 0.8, the pool will be downsized to 8 + downsize-ratio = 0.8 + + # When optimizing, the resizer only considers the sizes adjacent to the + # current size. This number indicates how many adjacent sizes to consider. + optimization-range = 16 + + # The weight of the latest metric over old metrics when collecting + # performance metrics. + # E.g. if the last processing speed is 10 millis per message at pool + # size 5, and if the new processing speed collected is 6 millis per + # message at pool size 5. Given a weight of 0.3, the metrics + # representing pool size 5 will be 6 * 0.3 + 10 * 0.7, i.e. 8.8 millis + # Obviously, this number should be between 0 and 1. + weight-of-latest-metric = 0.5 + } + } + + "/IO-DNS/inet-address" { + mailbox = "unbounded" + router = "consistent-hashing-pool" + nr-of-instances = 4 + } + + "/IO-DNS/inet-address/*" { + dispatcher = "pekko.actor.default-blocking-io-dispatcher" + } + + "/IO-DNS/async-dns" { + mailbox = "unbounded" + router = "round-robin-pool" + nr-of-instances = 1 + } + } + + default-dispatcher { + # Must be one of the following + # Dispatcher, PinnedDispatcher, or a FQCN to a class inheriting + # MessageDispatcherConfigurator with a public constructor with + # both com.typesafe.config.Config parameter and + # org.apache.pekko.dispatch.DispatcherPrerequisites parameters. + # PinnedDispatcher must be used together with executor=thread-pool-executor. + type = "Dispatcher" + + # Which kind of ExecutorService to use for this dispatcher + # Valid options: + # - "default-executor" requires a "default-executor" section + # - "fork-join-executor" requires a "fork-join-executor" section + # - "virtual-thread-executor" requires a "virtual-thread-executor" section + # - "thread-pool-executor" requires a "thread-pool-executor" section + # - "affinity-pool-executor" requires an "affinity-pool-executor" section + # - A FQCN of a class extending ExecutorServiceConfigurator + executor = "default-executor" + + # This will be used if you have set "executor = "default-executor"". + # If an ActorSystem is created with a given ExecutionContext, this + # ExecutionContext will be used as the default executor for all + # dispatchers in the ActorSystem configured with + # executor = "default-executor". Note that "default-executor" + # is the default value for executor, and therefore used if not + # specified otherwise. If no ExecutionContext is given, + # the executor configured in "fallback" will be used. + default-executor { + fallback = "fork-join-executor" + } + + # This will be used if you have set "executor = "affinity-pool-executor"" + # Underlying thread pool implementation is org.apache.pekko.dispatch.affinity.AffinityPool. + # This executor is classified as "ApiMayChange". + affinity-pool-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 4 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 0.8 + + # Max number of threads to cap factor-based parallelism number to. + parallelism-max = 64 + + # Each worker in the pool uses a separate bounded MPSC queue. This value + # indicates the upper bound of the queue. Whenever an attempt to enqueue + # a task is made and the queue does not have capacity to accommodate + # the task, the rejection handler created by the rejection handler specified + # in "rejection-handler" is invoked. + task-queue-size = 512 + + # FQCN of the Rejection handler used in the pool. + # Must have an empty public constructor and must + # implement org.apache.pekko.actor.affinity.RejectionHandlerFactory. + rejection-handler = "org.apache.pekko.dispatch.affinity.ThrowOnOverflowRejectionHandler" + + # Level of CPU time used, on a scale between 1 and 10, during backoff/idle. + # The tradeoff is that to have low latency more CPU time must be used to be + # able to react quickly on incoming messages or send as fast as possible after + # backoff backpressure. + # Level 1 strongly prefer low CPU consumption over low latency. + # Level 10 strongly prefer low latency over low CPU consumption. + idle-cpu-level = 5 + + # FQCN of the org.apache.pekko.dispatch.affinity.QueueSelectorFactory. + # The Class of the FQCN must have a public constructor with a + # (com.typesafe.config.Config) parameter. + # A QueueSelectorFactory create instances of org.apache.pekko.dispatch.affinity.QueueSelector, + # that is responsible for determining which task queue a Runnable should be enqueued in. + queue-selector = "org.apache.pekko.dispatch.affinity.FairDistributionHashCache" + + # When using the "org.apache.pekko.dispatch.affinity.FairDistributionHashCache" queue selector + # internally the AffinityPool uses two methods to determine which task + # queue to allocate a Runnable to: + # - map based - maintains a round robin counter and a map of Runnable + # hashcodes to queues that they have been associated with. This ensures + # maximum fairness in terms of work distribution, meaning that each worker + # will get approximately equal amount of mailboxes to execute. This is suitable + # in cases where we have a small number of actors that will be scheduled on + # the pool and we want to ensure the maximum possible utilization of the + # available threads. + # - hash based - the task - queue in which the runnable should go is determined + # by using an uniformly distributed int to int hash function which uses the + # hash code of the Runnable as an input. This is preferred in situations where we + # have enough number of distinct actors to ensure statistically uniform + # distribution of work across threads or we are ready to sacrifice the + # former for the added benefit of avoiding map look-ups. + fair-work-distribution { + # The value serves as a threshold which determines the point at which the + # pool switches from the first to the second work distribution schemes. + # For example, if the value is set to 128, the pool can observe up to + # 128 unique actors and schedule their mailboxes using the map based + # approach. Once this number is reached the pool switches to hash based + # task distribution mode. If the value is set to 0, the map based + # work distribution approach is disabled and only the hash based is + # used irrespective of the number of unique actors. Valid range is + # 0 to 2048 (inclusive) + threshold = 128 + } + } + + # This will be used if you have set "executor = "fork-join-executor"" + # Underlying thread pool implementation is java.util.concurrent.ForkJoinPool + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # The parallelism factor is used to determine thread pool size using the + # following formula: ceil(available processors * factor). Resulting size + # is then bounded by the parallelism-min and parallelism-max values. + parallelism-factor = 1.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + + # Setting to "FIFO" to use queue like peeking mode which "poll" or "LIFO" to use stack + # like peeking mode which "pop". + task-peeking-mode = "FIFO" + + # This config is new in Pekko v1.1.0 and only has an effect if you are running with JDK 9 and above. + # Read the documentation on `java.util.concurrent.ForkJoinPool` to find out more. Default in hex is 0x7fff. + maximum-pool-size = 32767 + + # This config is new in Pekko v1.2.0 and only has an effect if you are running with JDK 21 and above, + # When set to `on` but underlying runtime does not support virtual threads, an Exception will throw. + # Virtualize this dispatcher as a virtual-thread-executor + # Valid values are: `on`, `off` + # + # Requirements: + # 1. JDK 21+ + # 2. add options to the JVM: + # --add-opens=java.base/jdk.internal.misc=ALL-UNNAMED + # --add-opens=java.base/java.lang=ALL-UNNAMED + virtualize = off + } + + # This will be used if you have set "executor = "thread-pool-executor"" + # Underlying thread pool implementation is java.util.concurrent.ThreadPoolExecutor + thread-pool-executor { + # Keep alive time for threads + keep-alive-time = 60s + + # Define a fixed thread pool size with this property. The corePoolSize + # and the maximumPoolSize of the ThreadPoolExecutor will be set to this + # value, if it is defined. Then the other pool-size properties will not + # be used. + # + # Valid values are: `off` or a positive integer. + fixed-pool-size = off + + # Min number of threads to cap factor-based corePoolSize number to + core-pool-size-min = 8 + + # The core-pool-size-factor is used to determine corePoolSize of the + # ThreadPoolExecutor using the following formula: + # ceil(available processors * factor). + # Resulting size is then bounded by the core-pool-size-min and + # core-pool-size-max values. + core-pool-size-factor = 3.0 + + # Max number of threads to cap factor-based corePoolSize number to + core-pool-size-max = 64 + + # Minimum number of threads to cap factor-based maximumPoolSize number to + max-pool-size-min = 8 + + # The max-pool-size-factor is used to determine maximumPoolSize of the + # ThreadPoolExecutor using the following formula: + # ceil(available processors * factor) + # The maximumPoolSize will not be less than corePoolSize. + # It is only used if using a bounded task queue. + max-pool-size-factor = 3.0 + + # Max number of threads to cap factor-based maximumPoolSize number to + max-pool-size-max = 64 + + # Specifies the bounded capacity of the task queue (< 1 == unbounded) + task-queue-size = -1 + + # Specifies which type of task queue will be used, can be "array" or + # "linked" (default) + task-queue-type = "linked" + + # Allow core threads to time out + allow-core-timeout = on + } + + # This will be used if you have set "executor = "virtual-thread-executor" + # This executor will execute the every task on a new virtual thread. + # Underlying thread pool implementation is java.util.concurrent.ForkJoinPool for JDK <= 22 + # If the current runtime does not support virtual thread, + # then the executor configured in "fallback" will be used. + virtual-thread-executor { + #Please set the the underlying pool with system properties below: + #jdk.virtualThreadScheduler.parallelism + #jdk.virtualThreadScheduler.maxPoolSize + #jdk.virtualThreadScheduler.minRunnable + #jdk.unparker.maxPoolSize + fallback = "fork-join-executor" + } + # How long time the dispatcher will wait for new actors until it shuts down + shutdown-timeout = 1s + + # Throughput defines the number of messages that are processed in a batch + # before the thread is returned to the pool. Set to 1 for as fair as possible. + throughput = 5 + + # Throughput deadline for Dispatcher, set to 0 or negative for no deadline + throughput-deadline-time = 0ms + + # For BalancingDispatcher: If the balancing dispatcher should attempt to + # schedule idle actors using the same dispatcher when a message comes in, + # and the dispatchers ExecutorService is not fully busy already. + attempt-teamwork = on + + # If this dispatcher requires a specific type of mailbox, specify the + # fully-qualified class name here; the actually created mailbox will + # be a subtype of this type. The empty string signifies no requirement. + mailbox-requirement = "" + } + + # Default separate internal dispatcher to run Pekko internal tasks and actors on + # protecting them against starvation because of accidental blocking in user actors (which run on the + # default dispatcher) + internal-dispatcher { + type = "Dispatcher" + executor = "fork-join-executor" + throughput = 5 + fork-join-executor { + parallelism-min = 4 + parallelism-factor = 1.0 + parallelism-max = 64 + } + } + + default-blocking-io-dispatcher { + type = "Dispatcher" + executor = "thread-pool-executor" + throughput = 1 + + thread-pool-executor { + fixed-pool-size = 16 + } + } + + default-mailbox { + # FQCN of the MailboxType. The Class of the FQCN must have a public + # constructor with + # (org.apache.pekko.actor.ActorSystem.Settings, com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox" + + # If the mailbox is bounded then it uses this setting to determine its + # capacity. The provided value must be positive. + # NOTICE: + # Up to version 2.1 the mailbox type was determined based on this setting; + # this is no longer the case, the type must explicitly be a bounded mailbox. + mailbox-capacity = 1000 + + # If the mailbox is bounded then this is the timeout for enqueueing + # in case the mailbox is full. Negative values signify infinite + # timeout, which should be avoided as it bears the risk of dead-lock. + mailbox-push-timeout-time = 10s + + # For Actor with Stash: The default capacity of the stash. + # If negative (or zero) then an unbounded stash is used (default) + # If positive then a bounded stash is used and the capacity is set using + # the property + stash-capacity = -1 + } + + mailbox { + # Mapping between message queue semantics and mailbox configurations. + # Used by org.apache.pekko.dispatch.RequiresMessageQueue[T] to enforce different + # mailbox types on actors. + # If your Actor implements RequiresMessageQueue[T], then when you create + # an instance of that actor its mailbox type will be decided by looking + # up a mailbox configuration via T in this mapping + requirements { + "org.apache.pekko.dispatch.UnboundedMessageQueueSemantics" = + pekko.actor.mailbox.unbounded-queue-based + "org.apache.pekko.dispatch.BoundedMessageQueueSemantics" = + pekko.actor.mailbox.bounded-queue-based + "org.apache.pekko.dispatch.DequeBasedMessageQueueSemantics" = + pekko.actor.mailbox.unbounded-deque-based + "org.apache.pekko.dispatch.UnboundedDequeBasedMessageQueueSemantics" = + pekko.actor.mailbox.unbounded-deque-based + "org.apache.pekko.dispatch.BoundedDequeBasedMessageQueueSemantics" = + pekko.actor.mailbox.bounded-deque-based + "org.apache.pekko.dispatch.MultipleConsumerSemantics" = + pekko.actor.mailbox.unbounded-queue-based + "org.apache.pekko.dispatch.ControlAwareMessageQueueSemantics" = + pekko.actor.mailbox.unbounded-control-aware-queue-based + "org.apache.pekko.dispatch.UnboundedControlAwareMessageQueueSemantics" = + pekko.actor.mailbox.unbounded-control-aware-queue-based + "org.apache.pekko.dispatch.BoundedControlAwareMessageQueueSemantics" = + pekko.actor.mailbox.bounded-control-aware-queue-based + "org.apache.pekko.event.LoggerMessageQueueSemantics" = + pekko.actor.mailbox.logger-queue + } + + unbounded-queue-based { + # FQCN of the MailboxType, The Class of the FQCN must have a public + # constructor with (org.apache.pekko.actor.ActorSystem.Settings, + # com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.UnboundedMailbox" + } + + bounded-queue-based { + # FQCN of the MailboxType, The Class of the FQCN must have a public + # constructor with (org.apache.pekko.actor.ActorSystem.Settings, + # com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.BoundedMailbox" + } + + unbounded-deque-based { + # FQCN of the MailboxType, The Class of the FQCN must have a public + # constructor with (org.apache.pekko.actor.ActorSystem.Settings, + # com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.UnboundedDequeBasedMailbox" + } + + bounded-deque-based { + # FQCN of the MailboxType, The Class of the FQCN must have a public + # constructor with (org.apache.pekko.actor.ActorSystem.Settings, + # com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.BoundedDequeBasedMailbox" + } + + unbounded-control-aware-queue-based { + # FQCN of the MailboxType, The Class of the FQCN must have a public + # constructor with (org.apache.pekko.actor.ActorSystem.Settings, + # com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.UnboundedControlAwareMailbox" + } + + bounded-control-aware-queue-based { + # FQCN of the MailboxType, The Class of the FQCN must have a public + # constructor with (org.apache.pekko.actor.ActorSystem.Settings, + # com.typesafe.config.Config) parameters. + mailbox-type = "org.apache.pekko.dispatch.BoundedControlAwareMailbox" + } + + # The LoggerMailbox will drain all messages in the mailbox + # when the system is shutdown and deliver them to the StandardOutLogger. + # Do not change this unless you know what you are doing. + logger-queue { + mailbox-type = "org.apache.pekko.event.LoggerMailboxType" + } + } + + debug { + # enable function of Actor.loggable(), which is to log any received message + # at DEBUG level, see the “Testing Actor Systems” section of the Pekko + # Documentation at https://pekko.apache.org/docs/pekko/current/ + receive = off + + # enable DEBUG logging of all AutoReceiveMessages (Kill, PoisonPill etc.) + autoreceive = off + + # enable DEBUG logging of actor lifecycle changes + lifecycle = off + + # enable DEBUG logging of all LoggingFSMs for events, transitions and timers + fsm = off + + # enable DEBUG logging of subscription changes on the eventStream + event-stream = off + + # enable DEBUG logging of unhandled messages + unhandled = off + + # enable WARN logging of misconfigured routers + router-misconfiguration = off + } + + # SECURITY BEST-PRACTICE is to disable java serialization for its multiple + # known attack surfaces. + # + # This setting is a short-cut to + # - using DisabledJavaSerializer instead of JavaSerializer + # + # Completely disable the use of `org.apache.pekko.serialization.JavaSerialization` by the + # Pekko Serialization extension, instead DisabledJavaSerializer will + # be inserted which will fail explicitly if attempts to use java serialization are made. + # + # The log messages emitted by such serializer SHOULD be treated as potential + # attacks which the serializer prevented, as they MAY indicate an external operator + # attempting to send malicious messages intending to use java serialization as attack vector. + # The attempts are logged with the SECURITY marker. + # + # Please note that this option does not stop you from manually invoking java serialization + # + allow-java-serialization = off + + # Log warnings when the Java serialization is used to serialize messages. + # Java serialization is not very performant and should not be used in production + # environments unless you don't care about performance and security. In that case + # you can turn this off. + warn-about-java-serializer-usage = on + + # To be used with the above warn-about-java-serializer-usage + # When warn-about-java-serializer-usage = on, and this warn-on-no-serialization-verification = off, + # warnings are suppressed for classes extending NoSerializationVerificationNeeded + # to reduce noise. + warn-on-no-serialization-verification = on + + # Entries for pluggable serializers and their bindings. + serializers { + java = "org.apache.pekko.serialization.JavaSerializer" + bytes = "org.apache.pekko.serialization.ByteArraySerializer" + primitive-long = "org.apache.pekko.serialization.LongSerializer" + primitive-int = "org.apache.pekko.serialization.IntSerializer" + primitive-string = "org.apache.pekko.serialization.StringSerializer" + primitive-bytestring = "org.apache.pekko.serialization.ByteStringSerializer" + primitive-boolean = "org.apache.pekko.serialization.BooleanSerializer" + } + + # Class to Serializer binding. You only need to specify the name of an + # interface or abstract base class of the messages. In case of ambiguity it + # is using the most specific configured class, or giving a warning and + # choosing the “first” one. + # + # To disable one of the default serializers, assign its class to "none", like + # "java.io.Serializable" = none + serialization-bindings { + "[B" = bytes + "java.io.Serializable" = java + + "java.lang.String" = primitive-string + "org.apache.pekko.util.ByteString$ByteString1C" = primitive-bytestring + "org.apache.pekko.util.ByteString$ByteString1" = primitive-bytestring + "org.apache.pekko.util.ByteString$ByteStrings" = primitive-bytestring + "java.lang.Long" = primitive-long + "scala.Long" = primitive-long + "java.lang.Integer" = primitive-int + "scala.Int" = primitive-int + "java.lang.Boolean" = primitive-boolean + "scala.Boolean" = primitive-boolean + } + + # Configuration namespace of serialization identifiers. + # Each serializer implementation must have an entry in the following format: + # `org.apache.pekko.actor.serialization-identifiers."FQCN" = ID` + # where `FQCN` is fully qualified class name of the serializer implementation + # and `ID` is globally unique serializer identifier number. + # Identifier values from 0 to 40 are reserved for Pekko internal usage. + serialization-identifiers { + "org.apache.pekko.serialization.JavaSerializer" = 1 + "org.apache.pekko.serialization.ByteArraySerializer" = 4 + + primitive-long = 18 + primitive-int = 19 + primitive-string = 20 + primitive-bytestring = 21 + primitive-boolean = 35 + } + + } + + serialization.protobuf { + # deprecated, use `allowed-classes` instead + whitelist-class = [ + "com.google.protobuf.GeneratedMessage", + "com.google.protobuf.GeneratedMessageV3", + "scalapb.GeneratedMessageCompanion", + "org.apache.pekko.protobufv3.internal.GeneratedMessage" + ] + + # Additional classes that are allowed even if they are not defined in `serialization-bindings`. + # It can be exact class name or name of super class or interfaces (one level). + # This is useful when a class is not used for serialization any more and therefore removed + # from `serialization-bindings`, but should still be possible to deserialize. + allowed-classes = ${pekko.serialization.protobuf.whitelist-class} + + } + + # Used to set the behavior of the scheduler. + # Changing the default values may change the system behavior drastically so make + # sure you know what you're doing! See the Scheduler section of the Pekko + # Documentation for more details. + scheduler { + # The LightArrayRevolverScheduler is used as the default scheduler in the + # system. It does not execute the scheduled tasks on exact time, but on every + # tick, it will run everything that is (over)due. You can increase or decrease + # the accuracy of the execution timing by specifying smaller or larger tick + # duration. If you are scheduling a lot of tasks you should consider increasing + # the ticks per wheel. + # Note that it might take up to 1 tick to stop the Timer, so setting the + # tick-duration to a high value will make shutting down the actor system + # take longer. + # + # Requirements: + # 1. The minimum supported tick-duration on Windows is 10ms, + # 2. The minimum supported tick-duration is 1ms + tick-duration = 10ms + + # An error will be throw when the tick-duration does not meet the requirements. + # When this is set to off, the tick-duration will be adjusted to meet the requirements + # and a warning will be logged. + error-on-tick-duration-verification-failed = on + + # The timer uses a circular wheel of buckets to store the timer tasks. + # This should be set such that the majority of scheduled timeouts (for high + # scheduling frequency) will be shorter than one rotation of the wheel + # (ticks-per-wheel * ticks-duration) + # THIS MUST BE A POWER OF TWO! + ticks-per-wheel = 512 + + # This setting selects the timer implementation which shall be loaded at + # system start-up. + # The class given here must implement the org.apache.pekko.actor.Scheduler interface + # and offer a public constructor which takes three arguments: + # 1) com.typesafe.config.Config + # 2) org.apache.pekko.event.LoggingAdapter + # 3) java.util.concurrent.ThreadFactory + implementation = org.apache.pekko.actor.LightArrayRevolverScheduler + + # When shutting down the scheduler, there will typically be a thread which + # needs to be stopped, and this timeout determines how long to wait for + # that to happen. In case of timeout the shutdown of the actor system will + # proceed without running possibly still enqueued tasks. + shutdown-timeout = 5s + } + + io { + + # By default the select loops run on dedicated threads, hence using a + # PinnedDispatcher + pinned-dispatcher { + type = "PinnedDispatcher" + executor = "thread-pool-executor" + thread-pool-executor.allow-core-timeout = off + } + + tcp { + + # The number of selectors to stripe the served channels over; each of + # these will use one select loop on the selector-dispatcher. + nr-of-selectors = 1 + + # Maximum number of open channels supported by this TCP module; there is + # no intrinsic general limit, this setting is meant to enable DoS + # protection by limiting the number of concurrently connected clients. + # Also note that this is a "soft" limit; in certain cases the implementation + # will accept a few connections more or a few less than the number configured + # here. Must be an integer > 0 or "unlimited". + max-channels = 256000 + + # When trying to assign a new connection to a selector and the chosen + # selector is at full capacity, retry selector choosing and assignment + # this many times before giving up + selector-association-retries = 10 + + # The maximum number of connection that are accepted in one go, + # higher numbers decrease latency, lower numbers increase fairness on + # the worker-dispatcher + batch-accept-limit = 10 + + # The number of bytes per direct buffer in the pool used to read or write + # network data from the kernel. + direct-buffer-size = 128 KiB + + # The maximal number of direct buffers kept in the direct buffer pool for + # reuse. + direct-buffer-pool-limit = 1000 + + # The duration a connection actor waits for a `Register` message from + # its commander before aborting the connection. + register-timeout = 5s + + # The maximum number of bytes delivered by a `Received` message. Before + # more data is read from the network the connection actor will try to + # do other work. + # The purpose of this setting is to impose a smaller limit than the + # configured receive buffer size. When using value 'unlimited' it will + # try to read all from the receive buffer. + max-received-message-size = unlimited + + # Enable fine grained logging of what goes on inside the implementation. + # Be aware that this may log more than once per message sent to the actors + # of the tcp implementation. + trace-logging = off + + # Fully qualified config path which holds the dispatcher configuration + # to be used for running the select() calls in the selectors + selector-dispatcher = "pekko.io.pinned-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # for the read/write worker actors + worker-dispatcher = "pekko.actor.internal-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # for the selector management actors + management-dispatcher = "pekko.actor.internal-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # on which file IO tasks are scheduled + file-io-dispatcher = "pekko.actor.default-blocking-io-dispatcher" + + # The maximum number of bytes (or "unlimited") to transfer in one batch + # when using `WriteFile` command which uses `FileChannel.transferTo` to + # pipe files to a TCP socket. On some OS like Linux `FileChannel.transferTo` + # may block for a long time when network IO is faster than file IO. + # Decreasing the value may improve fairness while increasing may improve + # throughput. + file-io-transferTo-limit = 512 KiB + + # The number of times to retry the `finishConnect` call after being notified about + # OP_CONNECT. Retries are needed if the OP_CONNECT notification doesn't imply that + # `finishConnect` will succeed, which is the case on Android. + finish-connect-retries = 5 + + # On Windows connection aborts are not reliably detected unless an OP_READ is + # registered on the selector _after_ the connection has been reset. This + # workaround enables an OP_CONNECT which forces the abort to be visible on Windows. + # Enabling this setting on other platforms than Windows will cause various failures + # and undefined behavior. + # Possible values of this key are on, off and auto where auto will enable the + # workaround if Windows is detected automatically. + windows-connection-abort-workaround-enabled = off + } + + udp { + + # The number of selectors to stripe the served channels over; each of + # these will use one select loop on the selector-dispatcher. + nr-of-selectors = 1 + + # Maximum number of open channels supported by this UDP module Generally + # UDP does not require a large number of channels, therefore it is + # recommended to keep this setting low. + max-channels = 4096 + + # The select loop can be used in two modes: + # - setting "infinite" will select without a timeout, hogging a thread + # - setting a positive timeout will do a bounded select call, + # enabling sharing of a single thread between multiple selectors + # (in this case you will have to use a different configuration for the + # selector-dispatcher, e.g. using "type=Dispatcher" with size 1) + # - setting it to zero means polling, i.e. calling selectNow() + select-timeout = infinite + + # When trying to assign a new connection to a selector and the chosen + # selector is at full capacity, retry selector choosing and assignment + # this many times before giving up + selector-association-retries = 10 + + # The maximum number of datagrams that are read in one go, + # higher numbers decrease latency, lower numbers increase fairness on + # the worker-dispatcher + receive-throughput = 3 + + # The number of bytes per direct buffer in the pool used to read or write + # network data from the kernel. + direct-buffer-size = 128 KiB + + # The maximal number of direct buffers kept in the direct buffer pool for + # reuse. + direct-buffer-pool-limit = 1000 + + # Enable fine grained logging of what goes on inside the implementation. + # Be aware that this may log more than once per message sent to the actors + # of the tcp implementation. + trace-logging = off + + # Fully qualified config path which holds the dispatcher configuration + # to be used for running the select() calls in the selectors + selector-dispatcher = "pekko.io.pinned-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # for the read/write worker actors + worker-dispatcher = "pekko.actor.internal-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # for the selector management actors + management-dispatcher = "pekko.actor.internal-dispatcher" + } + + udp-connected { + + # The number of selectors to stripe the served channels over; each of + # these will use one select loop on the selector-dispatcher. + nr-of-selectors = 1 + + # Maximum number of open channels supported by this UDP module Generally + # UDP does not require a large number of channels, therefore it is + # recommended to keep this setting low. + max-channels = 4096 + + # The select loop can be used in two modes: + # - setting "infinite" will select without a timeout, hogging a thread + # - setting a positive timeout will do a bounded select call, + # enabling sharing of a single thread between multiple selectors + # (in this case you will have to use a different configuration for the + # selector-dispatcher, e.g. using "type=Dispatcher" with size 1) + # - setting it to zero means polling, i.e. calling selectNow() + select-timeout = infinite + + # When trying to assign a new connection to a selector and the chosen + # selector is at full capacity, retry selector choosing and assignment + # this many times before giving up + selector-association-retries = 10 + + # The maximum number of datagrams that are read in one go, + # higher numbers decrease latency, lower numbers increase fairness on + # the worker-dispatcher + receive-throughput = 3 + + # The number of bytes per direct buffer in the pool used to read or write + # network data from the kernel. + direct-buffer-size = 128 KiB + + # The maximal number of direct buffers kept in the direct buffer pool for + # reuse. + direct-buffer-pool-limit = 1000 + + # Enable fine grained logging of what goes on inside the implementation. + # Be aware that this may log more than once per message sent to the actors + # of the tcp implementation. + trace-logging = off + + # Fully qualified config path which holds the dispatcher configuration + # to be used for running the select() calls in the selectors + selector-dispatcher = "pekko.io.pinned-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # for the read/write worker actors + worker-dispatcher = "pekko.actor.internal-dispatcher" + + # Fully qualified config path which holds the dispatcher configuration + # for the selector management actors + management-dispatcher = "pekko.actor.internal-dispatcher" + } + + dns { + # Fully qualified config path which holds the dispatcher configuration + # for the manager and resolver router actors. + # For actual router configuration see pekko.actor.deployment./IO-DNS/* + dispatcher = "pekko.actor.internal-dispatcher" + + # Name of the subconfig at path pekko.io.dns, see inet-address below + # + # Change to `async-dns` to use the new "native" DNS resolver, + # which is also capable of resolving SRV records. + resolver = "inet-address" + + # To-be-deprecated DNS resolver implementation which uses the Java InetAddress to resolve DNS records. + # To be replaced by `pekko.io.dns.async` which implements the DNS protocol natively and without blocking (which InetAddress does) + inet-address { + # Must implement org.apache.pekko.io.DnsProvider + provider-object = "org.apache.pekko.io.InetAddressDnsProvider" + + # To set the time to cache name resolutions + # Possible values: + # default: sun.net.InetAddressCachePolicy.get() and getNegative() + # forever: cache forever + # never: no caching + # n [time unit]: positive timeout with unit, for example 30s + positive-ttl = default + negative-ttl = default + + # How often to sweep out expired cache entries. + # Note that this interval has nothing to do with TTLs + cache-cleanup-interval = 120s + } + + async-dns { + provider-object = "org.apache.pekko.io.dns.internal.AsyncDnsProvider" + + # Set upper bound for caching successfully resolved dns entries + # if the DNS record has a smaller TTL value than the setting that + # will be used. Default is to use the record TTL with no cap. + # Possible values: + # forever: always use the minimum TTL from the found records + # never: never cache + # n [time unit] = cap the caching to this value + positive-ttl = forever + + # Set how long the fact that a DNS record could not be found is + # cached. If a new resolution is done while the fact is cached it will + # be failed and not result in an actual DNS resolution. Default is + # to never cache. + # Possible values: + # never: never cache + # forever: cache a missing DNS record forever (you probably will not want to do this) + # n [time unit] = cache for this long + negative-ttl = never + + # Configures nameservers to query during DNS resolution. + # Defaults to the nameservers that would be used by the JVM by default. + # Set to a list of IPs to override the servers, e.g. [ "8.8.8.8", "8.8.4.4" ] for Google's servers + # If multiple are defined then they are tried in order until one responds + nameservers = default + + # The time that a request is allowed to live before being discarded + # given no reply. The lower bound of this should always be the amount + # of time to reasonably expect a DNS server to reply within. + # If multiple name servers are provided then each gets this long to response before trying + # the next one + resolve-timeout = 5s + + # How often to sweep out expired cache entries. + # Note that this interval has nothing to do with TTLs + cache-cleanup-interval = 120s + + # Configures the list of search domains. + # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on + # other platforms, will not make any attempt to lookup the search domains). Set to a single domain, or + # a list of domains, eg, [ "example.com", "example.net" ]. + search-domains = default + + # Any hosts that have a number of dots less than this will not be looked up directly, instead, a search on + # the search domains will be tried first. This corresponds to the ndots option in /etc/resolv.conf, see + # https://linux.die.net/man/5/resolver for more info. + # Defaults to a system dependent lookup (on Unix like OSes, will attempt to parse /etc/resolv.conf, on + # other platforms, will default to 1). + ndots = default + + # The policy used to generate dns transaction ids. Options are `thread-local-random`, + # `enhanced-double-hash-random` or `secure-random`. Defaults to `enhanced-double-hash-random` which uses an + # enhanced double hashing algorithm optimized for minimizing collisions with a FIPS compliant initial seed. + # `thread-local-random` is similar to Netty and `secure-random` produces FIPS compliant random numbers every + # time but could block looking for entropy (these are short integers so are easy to brute-force, use + # `enhanced-double-hash-random` unless you really require FIPS compliant random numbers). + id-generator-policy = enhanced-double-hash-random + } + } + } + + + # CoordinatedShutdown is an extension that will perform registered + # tasks in the order that is defined by the phases. It is started + # by calling CoordinatedShutdown(system).run(). This can be triggered + # by different things, for example: + # - JVM shutdown hook will by default run CoordinatedShutdown + # - Cluster node will automatically run CoordinatedShutdown when it + # sees itself as Exiting + # - A management console or other application specific command can + # run CoordinatedShutdown + coordinated-shutdown { + # The timeout that will be used for a phase if not specified with + # 'timeout' in the phase + default-phase-timeout = 5 s + + # Terminate the ActorSystem in the last phase actor-system-terminate. + terminate-actor-system = on + + # Exit the JVM (System.exit(0)) in the last phase actor-system-terminate + # if this is set to 'on'. It is done after termination of the + # ActorSystem if terminate-actor-system=on, otherwise it is done + # immediately when the last phase is reached. + exit-jvm = off + + # Exit status to use on System.exit(int) when 'exit-jvm' is 'on'. + exit-code = 0 + + # Run the coordinated shutdown when the JVM process exits, e.g. + # via kill SIGTERM signal (SIGINT ctrl-c doesn't work). + # This property is related to `pekko.jvm-shutdown-hooks` above. + run-by-jvm-shutdown-hook = on + + # Run the coordinated shutdown when ActorSystem.terminate is called. + # Enabling this and disabling terminate-actor-system is not a supported + # combination (will throw ConfigurationException at startup). + run-by-actor-system-terminate = on + + # When Coordinated Shutdown is triggered an instance of `Reason` is + # required. That value can be used to override the default settings. + # Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be + # overridden depending on the reason. + reason-overrides { + # Overrides are applied using the `reason.getClass.getName`. + # Overrides the `exit-code` when the `Reason` is a cluster + # Downing or a Cluster Join Unsuccessful event + "org.apache.pekko.actor.CoordinatedShutdown$ClusterDowningReason$" { + exit-code = -1 + } + "org.apache.pekko.actor.CoordinatedShutdown$ClusterJoinUnsuccessfulReason$" { + exit-code = -1 + } + } + + #//#coordinated-shutdown-phases + # CoordinatedShutdown is enabled by default and will run the tasks that + # are added to these phases by individual Pekko modules and user logic. + # + # The phases are ordered as a DAG by defining the dependencies between the phases + # to make sure shutdown tasks are run in the right order. + # + # In general user tasks belong in the first few phases, but there may be use + # cases where you would want to hook in new phases or register tasks later in + # the DAG. + # + # Each phase is defined as a named config section with the + # following optional properties: + # - timeout=15s: Override the default-phase-timeout for this phase. + # - recover=off: If the phase fails the shutdown is aborted + # and depending phases will not be executed. + # - enabled=off: Skip all tasks registered in this phase. DO NOT use + # this to disable phases unless you are absolutely sure what the + # consequences are. Many of the built in tasks depend on other tasks + # having been executed in earlier phases and may break if those are disabled. + # depends-on=[]: Run the phase after the given phases + phases { + + # The first pre-defined phase that applications can add tasks to. + # Note that more phases can be added in the application's + # configuration by overriding this phase with an additional + # depends-on. + before-service-unbind { + } + + # Stop accepting new incoming connections. + # This is where you can register tasks that makes a server stop accepting new connections. Already + # established connections should be allowed to continue and complete if possible. + service-unbind { + depends-on = [before-service-unbind] + } + + # Wait for requests that are in progress to be completed. + # This is where you register tasks that will wait for already established connections to complete, potentially + # also first telling them that it is time to close down. + service-requests-done { + depends-on = [service-unbind] + } + + # Final shutdown of service endpoints. + # This is where you would add tasks that forcefully kill connections that are still around. + service-stop { + depends-on = [service-requests-done] + } + + # Phase for custom application tasks that are to be run + # after service shutdown and before cluster shutdown. + before-cluster-shutdown { + depends-on = [service-stop] + } + + # Graceful shutdown of the Cluster Sharding regions. + # This phase is not meant for users to add tasks to. + cluster-sharding-shutdown-region { + timeout = 10 s + depends-on = [before-cluster-shutdown] + } + + # Emit the leave command for the node that is shutting down. + # This phase is not meant for users to add tasks to. + cluster-leave { + depends-on = [cluster-sharding-shutdown-region] + } + + # Shutdown cluster singletons + # This is done as late as possible to allow the shard region shutdown triggered in + # the "cluster-sharding-shutdown-region" phase to complete before the shard coordinator is shut down. + # This phase is not meant for users to add tasks to. + cluster-exiting { + timeout = 10 s + depends-on = [cluster-leave] + } + + # Wait until exiting has been completed + # This phase is not meant for users to add tasks to. + cluster-exiting-done { + depends-on = [cluster-exiting] + } + + # Shutdown the cluster extension + # This phase is not meant for users to add tasks to. + cluster-shutdown { + depends-on = [cluster-exiting-done] + } + + # Phase for custom application tasks that are to be run + # after cluster shutdown and before ActorSystem termination. + before-actor-system-terminate { + depends-on = [cluster-shutdown] + } + + # Last phase. See terminate-actor-system and exit-jvm above. + # Don't add phases that depends on this phase because the + # dispatcher and scheduler of the ActorSystem have been shutdown. + # This phase is not meant for users to add tasks to. + actor-system-terminate { + timeout = 10 s + depends-on = [before-actor-system-terminate] + } + } + #//#coordinated-shutdown-phases + } + + #//#circuit-breaker-default + # Configuration for circuit breakers created with the APIs accepting an id to + # identify or look up the circuit breaker. + # Note: Circuit breakers created without ids are not affected by this configuration. + # A child configuration section with the same name as the circuit breaker identifier + # will be used, with fallback to the `pekko.circuit-breaker.default` section. + circuit-breaker { + + # Default configuration that is used if a configuration section + # with the circuit breaker identifier is not defined. + default { + # Number of failures before opening the circuit. + max-failures = 10 + + # Duration of time after which to consider a call a failure. + call-timeout = 10s + + # Duration of time in open state after which to attempt to close + # the circuit, by first entering the half-open state. + reset-timeout = 15s + + # The upper bound of reset-timeout + max-reset-timeout = 36500d + + # Exponential backoff + # For details see https://en.wikipedia.org/wiki/Exponential_backoff + exponential-backoff = 1.0 + + # Additional random delay based on this factor is added to backoff + # For example 0.2 adds up to 20% delay + # In order to skip this additional delay set as 0 + random-factor = 0.0 + + # A allow-list of fqcn of Exceptions that the CircuitBreaker + # should not consider failures. By default all exceptions are + # considered failures. + exception-allowlist = [] + } + } + #//#circuit-breaker-default + +} \ No newline at end of file diff --git a/src/main/scala/edu/ie3/simona/agent/EnvironmentRefs.scala b/src/main/scala/edu/ie3/simona/agent/EnvironmentRefs.scala index 340c4d4502..158b33b4e9 100644 --- a/src/main/scala/edu/ie3/simona/agent/EnvironmentRefs.scala +++ b/src/main/scala/edu/ie3/simona/agent/EnvironmentRefs.scala @@ -10,6 +10,7 @@ import edu.ie3.simona.event.RuntimeEvent import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} import edu.ie3.simona.service.em.ExtEmDataService import edu.ie3.simona.service.ev.ExtEvDataService +import edu.ie3.simona.service.results.ResultServiceProxy import org.apache.pekko.actor.typed.ActorRef /** Container class, that gather together reference to relevant entities, that @@ -21,6 +22,8 @@ import org.apache.pekko.actor.typed.ActorRef * Reference to the runtime event listener. * @param primaryServiceProxy * Reference to the primary service proxy. + * @param resultProxy + * Reference to the result service proxy. * @param weather * Reference to the service, that provides weather information. * @param loadProfiles @@ -34,6 +37,7 @@ final case class EnvironmentRefs( scheduler: ActorRef[SchedulerMessage], runtimeEventListener: ActorRef[RuntimeEvent], primaryServiceProxy: ActorRef[ServiceMessage], + resultProxy: ActorRef[ResultServiceProxy.Message], weather: ActorRef[ServiceMessage], loadProfiles: ActorRef[ServiceMessage], emDataService: Option[ActorRef[ExtEmDataService.Message]], diff --git a/src/main/scala/edu/ie3/simona/agent/em/EmAgent.scala b/src/main/scala/edu/ie3/simona/agent/em/EmAgent.scala index d277c0c2a5..2893f55414 100644 --- a/src/main/scala/edu/ie3/simona/agent/em/EmAgent.scala +++ b/src/main/scala/edu/ie3/simona/agent/em/EmAgent.scala @@ -63,7 +63,7 @@ object EmAgent { * agent is em-controlled, or a [[Left]] with a reference to the scheduler * that is activating this agent. * @param listener - * A collection of result event listeners. + * A listener for result events. * @param emDataService * An energy management service. */ @@ -74,8 +74,8 @@ object EmAgent { modelStrategy: String, simulationStartDate: ZonedDateTime, parent: Either[ActorRef[SchedulerMessage], ActorRef[FlexResponse]], - listener: Iterable[ActorRef[ResultEvent]], - emDataService: Option[ActorRef[ExtEmDataService.Message]] = None, + listener: ActorRef[ResultEvent], + emDataService: Option[ActorRef[ExtEmDataService.Message]], ): Behavior[Message] = Behaviors.setup[Message] { ctx => val parentData = emDataService match { @@ -200,8 +200,18 @@ object EmAgent { case (_, msg: Activation) => activate(emData, modelShell, core, msg.tick) - case (_, msg: FlexActivation) => - activate(emData, modelShell, core, msg.tick) + case (ctx, msg: FlexActivation) => + val tick = msg.tick + ctx.log.info(s"EmAgent (${modelShell.uuid}) activated for tick $tick") + + activate(emData, modelShell, core, tick) + + case (ctx, msg: FlexShiftActivation) => + val tick = msg.tick + ctx.log.info( + s"EmAgent (${modelShell.uuid}) activated by service for tick $tick" + ) + activate(emData, modelShell, core.gotoTick(tick), tick) case (ctx, msg: IssueFlexControl) => val flexOptionsCore = core.activate(msg.tick) @@ -265,9 +275,7 @@ object EmAgent { ) ) - emData.listener.foreach { - _ ! FlexOptionsResultEvent(flexResult) - } + emData.listener ! FlexOptionsResultEvent(flexResult) } emData.parent match { @@ -392,6 +400,11 @@ object EmAgent { inactiveCore, lastActiveTick = updatedCore.activeTick, )(using ctx.self) + + ctx.log.info( + s"${modelShell.uuid} -> inactive, next tick ${completion.requestAtTick}" + ) + inactive(emData, modelShell, inactiveCore) } .getOrElse { @@ -422,17 +435,15 @@ object EmAgent { } maybeResult.foreach { result => - emData.listener.foreach { - _ ! ParticipantResultEvent( - new EmResult( - lastActiveTick - .toDateTime(using emData.simulationStartDate), - modelShell.uuid, - result.p.toMegawatts.asMegaWatt, - result.q.toMegavars.asMegaVar, - ) + emData.listener ! ParticipantResultEvent( + new EmResult( + lastActiveTick + .toDateTime(using emData.simulationStartDate), + modelShell.uuid, + result.p.toMegawatts.asMegaWatt, + result.q.toMegavars.asMegaVar, ) - } + ) emData.parent.foreach { _ ! FlexResult(modelShell.uuid, result) @@ -463,13 +474,13 @@ object EmAgent { * agent is em-controlled, or a [[Left]] with a reference to the scheduler * that is activating this agent. * @param listener - * A collection of result event listeners. + * A listener for result events. */ private final case class EmData( outputConfig: NotifierConfig, simulationStartDate: ZonedDateTime, parent: Either[ActorRef[SchedulerMessage], ActorRef[FlexResponse]], - listener: Iterable[ActorRef[ResultEvent]], + listener: ActorRef[ResultEvent], ) /** The existence of this data object indicates that the corresponding agent diff --git a/src/main/scala/edu/ie3/simona/agent/em/EmDataCore.scala b/src/main/scala/edu/ie3/simona/agent/em/EmDataCore.scala index d582c0a60b..1f05444ccb 100644 --- a/src/main/scala/edu/ie3/simona/agent/em/EmDataCore.scala +++ b/src/main/scala/edu/ie3/simona/agent/em/EmDataCore.scala @@ -81,6 +81,18 @@ object EmDataCore { modelToActor = modelToActor.updated(asset, actor) ) + def gotoTick(newTick: Long): Inactive = { + // remove the activations + activationQueue.headKeyOption.foreach { nextScheduledTick => + if newTick > nextScheduledTick then { + val toActivate = activationQueue.getAndRemoveSet(nextScheduledTick) + activationQueue.set(newTick, toActivate) + } + } + + this + } + /** Tries to handle an activation of the EmAgent for given tick. If the * activation for the tick is not valid, a [[CriticalFailureException]] is * thrown. If successful, an [[AwaitingFlexOptions]] data core is returned diff --git a/src/main/scala/edu/ie3/simona/agent/grid/GridAgent.scala b/src/main/scala/edu/ie3/simona/agent/grid/GridAgent.scala index bd6e9b462e..9a545ba95f 100644 --- a/src/main/scala/edu/ie3/simona/agent/grid/GridAgent.scala +++ b/src/main/scala/edu/ie3/simona/agent/grid/GridAgent.scala @@ -32,7 +32,10 @@ import edu.ie3.simona.ontology.messages.SchedulerMessage.{ Completion, ScheduleActivation, } +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.util.TickUtil.TickLong +import edu.ie3.util.TimeUtil +import edu.ie3.simona.util.TickUtil.{RichZonedDateTime, TickLong} import org.apache.pekko.actor.typed.scaladsl.AskPattern.Askable import org.apache.pekko.actor.typed.scaladsl.{ ActorContext, @@ -68,7 +71,6 @@ object GridAgent extends DBFSAlgorithm with DCMAlgorithm { def apply( environmentRefs: EnvironmentRefs, simonaConfig: SimonaConfig, - listener: Iterable[ActorRef[ResultEvent]], ): Behavior[Message] = Behaviors.withStash(100) { buffer => val cfg = simonaConfig.simona @@ -83,7 +85,6 @@ object GridAgent extends DBFSAlgorithm with DCMAlgorithm { val agentValues = GridAgentConstantData( environmentRefs, simonaConfig, - listener, resolution, simStartTime, simEndTime, @@ -229,6 +230,13 @@ object GridAgent extends DBFSAlgorithm with DCMAlgorithm { ctx.self, Some(activation.tick), ) + + // inform the result proxy that this grid agent will send new results + constantData.environmentRefs.resultProxy ! ExpectResult( + gridAgentBaseData.assets, + activation.tick, + ) + buffer.unstashAll(simulateGrid(gridAgentBaseData, activation.tick)) case (_, msg: Message) => @@ -279,7 +287,13 @@ object GridAgent extends DBFSAlgorithm with DCMAlgorithm { // check if congestion management is enabled if gridAgentBaseData.congestionManagementParams.detectionEnabled then { - startCongestionManagement(gridAgentBaseData, currentTick, results, ctx) + startCongestionManagement( + gridAgentBaseData, + currentTick, + nextTick, + results, + ctx, + ) } else { // clean up agent and go back to idle gotoIdle(gridAgentBaseData, nextTick, results, ctx) diff --git a/src/main/scala/edu/ie3/simona/agent/grid/GridAgentBuilder.scala b/src/main/scala/edu/ie3/simona/agent/grid/GridAgentBuilder.scala index 2bf33b60f8..b27504baee 100644 --- a/src/main/scala/edu/ie3/simona/agent/grid/GridAgentBuilder.scala +++ b/src/main/scala/edu/ie3/simona/agent/grid/GridAgentBuilder.scala @@ -308,9 +308,9 @@ object GridAgentBuilder { given ParticipantRefs = ParticipantRefs( gridAgentContext.self, - constantData.environmentRefs.primaryServiceProxy, + environmentRefs.primaryServiceProxy, + environmentRefs.resultProxy, serviceMap, - constantData.listener, ) given SimulationParameters = SimulationParameters( @@ -488,7 +488,7 @@ object GridAgentBuilder { maybeControllingEm.toRight( constantData.environmentRefs.scheduler ), - constantData.listener, + constantData.environmentRefs.resultProxy, emDataService, ), actorName(classOf[EmAgent.type], emInput.getId), diff --git a/src/main/scala/edu/ie3/simona/agent/grid/GridAgentData.scala b/src/main/scala/edu/ie3/simona/agent/grid/GridAgentData.scala index b8ed1645ee..abe97f2d5d 100644 --- a/src/main/scala/edu/ie3/simona/agent/grid/GridAgentData.scala +++ b/src/main/scala/edu/ie3/simona/agent/grid/GridAgentData.scala @@ -46,9 +46,6 @@ object GridAgentData { * the grid agent. * @param simonaConfig * Configuration of SIMONA, that is used for. - * @param listener - * A sequence of listeners, that will receive the results from the grid - * agent. * @param resolution * That is used for the power flow. If no power flow should be carried out, * this value is set to [[Long.MaxValue]]. @@ -60,14 +57,12 @@ object GridAgentData { final case class GridAgentConstantData( environmentRefs: EnvironmentRefs, simonaConfig: SimonaConfig, - listener: Iterable[ActorRef[ResultEvent]], resolution: Long, simStartTime: ZonedDateTime, simEndTime: ZonedDateTime, ) { - def notifyListeners(event: ResultEvent): Unit = { - listener.foreach(_ ! event) - } + def notifyListeners(event: ResultEvent): Unit = + environmentRefs.resultProxy ! event val participantConfigUtil: ParticipantConfigUtil = ConfigUtil.ParticipantConfigUtil(simonaConfig.simona.runtime.participant) @@ -323,6 +318,15 @@ object GridAgentData { ) extends GridAgentData with GridAgentDataHelper { + val assets: Seq[UUID] = { + val components = gridEnv.gridModel.gridComponents + components.nodes.map(_.uuid) ++ components.lines.map( + _.uuid + ) ++ components.switches.map(_.uuid) ++ components.transformers.map( + _.uuid + ) ++ components.transformers3w.map(_.uuid) + } + override protected val subgridGates: Vector[SubGridGate] = gridEnv.subgridGateToActorRef.keys.toVector override protected val subgridId: Int = gridEnv.gridModel.subnetNo diff --git a/src/main/scala/edu/ie3/simona/agent/grid/GridResultsSupport.scala b/src/main/scala/edu/ie3/simona/agent/grid/GridResultsSupport.scala index dba0628f6c..7e84fed7cb 100644 --- a/src/main/scala/edu/ie3/simona/agent/grid/GridResultsSupport.scala +++ b/src/main/scala/edu/ie3/simona/agent/grid/GridResultsSupport.scala @@ -58,17 +58,16 @@ private[grid] trait GridResultsSupport { def createResultModels( grid: GridModel, sweepValueStore: SweepValueStore, - )(implicit timestamp: ZonedDateTime, log: Logger): PowerFlowResultEvent = { + )(using timestamp: ZonedDateTime, log: Logger): PowerFlowResultEvent = { // no sanity check for duplicated uuid result data as we expect valid data at this point - implicit val sweepValueStoreData: Map[UUID, SweepValueStoreData] = + given sweepValueStoreData: Map[UUID, SweepValueStoreData] = sweepValueStore.sweepData .map(sweepValueStoreData => sweepValueStoreData.nodeUuid -> sweepValueStoreData ) .toMap - implicit val iNominal: ElectricCurrent = - grid.mainRefSystem.nominalCurrent + given ElectricCurrent = grid.mainRefSystem.nominalCurrent /* When creating node results, we have to consider two things: * 1) The result of a two winding transformer's hv node is calculated twice. If this grid contains the diff --git a/src/main/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithm.scala b/src/main/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithm.scala index f57ed196db..59af0398b3 100644 --- a/src/main/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithm.scala +++ b/src/main/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithm.scala @@ -48,6 +48,7 @@ trait DCMAlgorithm extends CongestionDetection { private[grid] def startCongestionManagement( gridAgentBaseData: GridAgentBaseData, currentTick: Long, + nextTick: Long, results: Option[PowerFlowResultEvent], ctx: ActorContext[Message], )(using @@ -59,7 +60,7 @@ trait DCMAlgorithm extends CongestionDetection { val congestionManagementData = results .map(res => CongestionManagementData(gridAgentBaseData, currentTick, res)) .getOrElse( - CongestionManagementData.empty(gridAgentBaseData, currentTick) + CongestionManagementData.empty(gridAgentBaseData, currentTick, nextTick) ) ctx.self ! StartStep diff --git a/src/main/scala/edu/ie3/simona/agent/grid/congestion/data/CongestionManagementData.scala b/src/main/scala/edu/ie3/simona/agent/grid/congestion/data/CongestionManagementData.scala index 8811557bbc..0a8ddab7cc 100644 --- a/src/main/scala/edu/ie3/simona/agent/grid/congestion/data/CongestionManagementData.scala +++ b/src/main/scala/edu/ie3/simona/agent/grid/congestion/data/CongestionManagementData.scala @@ -169,6 +169,7 @@ object CongestionManagementData { def empty( gridAgentBaseData: GridAgentBaseData, currentTick: Long, + nextTick: Long, ): CongestionManagementData = apply( gridAgentBaseData, currentTick, diff --git a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgent.scala b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgent.scala index 110a220887..c9100b695e 100644 --- a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgent.scala +++ b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgent.scala @@ -17,8 +17,11 @@ import edu.ie3.simona.model.participant.ParticipantModel.AdditionalFactoryData import edu.ie3.simona.model.participant.ParticipantModelShell import edu.ie3.simona.ontology.messages.SchedulerMessage.Completion import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.* -import edu.ie3.simona.ontology.messages.ServiceMessage -import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.ontology.messages.{ + Activation, + SchedulerMessage, + ServiceMessage, +} import edu.ie3.simona.service.Data import edu.ie3.simona.service.Data.PrimaryDataExtra import edu.ie3.util.scala.Scope @@ -192,6 +195,9 @@ object ParticipantAgent { case (ctx, activation: ActivationRequest) => given ActorRef[Message] = ctx.self + // inform the result proxy that this grid agent will send new results + resultHandler.informProxy(modelShell.uuid, activation.tick) + val coreWithActivation = inputHandler.handleActivation(activation) val (updatedShell, updatedInputHandler, updatedGridAdapter) = @@ -212,6 +218,9 @@ object ParticipantAgent { case (ctx, msg: DataInputMessage) => given ActorRef[Message] = ctx.self + // inform the result proxy that this grid agent will send new results + resultHandler.informProxy(modelShell.uuid, msg.tick) + val inputHandlerWithData = inputHandler.handleDataInputMessage(msg) val (updatedShell, updatedInputHandler, updatedGridAdapter) = @@ -364,8 +373,7 @@ object ParticipantAgent { results.modelResults.foreach(resultHandler.maybeSend) - val newGridAdapter = - gridAdapter.storePowerValue(results.totalPower, tick) + val newGridAdapter = gridAdapter.storeResults(results, tick) (newShell, newGridAdapter) } else (shell, gridAdapter) @@ -389,7 +397,11 @@ object ParticipantAgent { case FlexActivation(tick, flexType) => val shellWithFlex = - if isCalculationRequired(shell, inputHandler) then { + if isCalculationRequired( + shell, + inputHandler, + ) || !modelShell.hasFlexOptions + then { val newShell = shell.updateFlexOptions(tick, flexType) resultHandler.maybeSend( newShell.determineFlexOptionsResult(tick, flexType) @@ -422,10 +434,7 @@ object ParticipantAgent { results.modelResults.foreach(resultHandler.maybeSend) val gridAdapterWithResult = - gridAdapter.storePowerValue( - results.totalPower, - flexControl.tick, - ) + gridAdapter.storeResults(results, flexControl.tick) val changeIndicator = shellWithOP.getChangeIndicator( flexControl.tick, @@ -456,7 +465,7 @@ object ParticipantAgent { .get (updatedShell, inputHandler.completeActivation(), updatedGridAdapter) - } else (modelShell, inputHandler, gridAdapter) + } else (modelShell, inputHandler, gridAdapter.clearLastResults) } /** Checks if all required messages needed for calculation have been received. @@ -499,13 +508,12 @@ object ParticipantAgent { private def isCalculationRequired( modelShell: ParticipantModelShell[?, ?], inputHandler: ParticipantInputHandler, - ): Boolean = - inputHandler.hasNewData || - inputHandler.activation.exists(activation => - modelShell - .getChangeIndicator(activation.tick - 1, None) - .changesAtTick - .contains(activation.tick) - ) + ): Boolean = inputHandler.hasNewData || + inputHandler.activation.exists(activation => + modelShell + .getChangeIndicator(activation.tick - 1, None) + .changesAtTick + .contains(activation.tick) + ) } diff --git a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgentInit.scala b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgentInit.scala index 938179e4ac..d874a19d84 100644 --- a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgentInit.scala +++ b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantAgentInit.scala @@ -35,6 +35,7 @@ import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.* import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} import edu.ie3.simona.scheduler.ScheduleLock.ScheduleKey import edu.ie3.simona.service.ServiceType +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherDataType import edu.ie3.simona.service.weather.WeatherService.WeatherRegistrationData import edu.ie3.simona.util.Coordinate @@ -59,16 +60,16 @@ object ParticipantAgentInit { * Reference to the grid agent. * @param primaryServiceProxy * Reference to the primary service proxy. + * @param resultServiceProxy + * Reference to the result service proxy. * @param services * References to services by service type. - * @param resultListener - * Reference to the result listeners. */ final case class ParticipantRefs( gridAgent: ActorRef[GridAgent.Message], primaryServiceProxy: ActorRef[ServiceMessage], + resultServiceProxy: ActorRef[ResultEvent | ExpectResult], services: Map[ServiceType, ActorRef[ServiceMessage]], - resultListener: Iterable[ActorRef[ResultEvent]], ) /** Container class that holds parameters related to the simulation. @@ -421,7 +422,7 @@ object ParticipantAgentInit { simulationParams.requestVoltageDeviationTolerance, ), ParticipantResultHandler( - participantRefs.resultListener, + participantRefs.resultServiceProxy, notifierConfig, ), ) diff --git a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantGridAdapter.scala b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantGridAdapter.scala index 426d03f611..db47d17029 100644 --- a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantGridAdapter.scala +++ b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantGridAdapter.scala @@ -6,16 +6,18 @@ package edu.ie3.simona.agent.participant +import edu.ie3.datamodel.models.result.ResultEntity import edu.ie3.simona.agent.grid.GridAgent import edu.ie3.simona.agent.participant.ParticipantGridAdapter.* import edu.ie3.simona.exceptions.CriticalFailureException +import edu.ie3.simona.model.participant.ParticipantModelShell.ResultsContainer import edu.ie3.simona.service.Data.PrimaryData.ComplexPower import edu.ie3.util.scala.quantities.DefaultQuantities.{zeroMVAr, zeroMW} import edu.ie3.util.scala.quantities.{Megavars, QuantityUtil, ReactivePower} import org.apache.pekko.actor.typed.ActorRef import org.slf4j.Logger import squants.energy.Megawatts -import squants.{Dimensionless, Each, Energy, Power} +import squants.{Dimensionless, Each, Energy, Power, UnitOfMeasure} import scala.collection.immutable.SortedMap import scala.util.{Failure, Success} @@ -52,6 +54,7 @@ final case class ParticipantGridAdapter( private val expectedRequestTick: Long, private val tickToPower: SortedMap[Long, ComplexPower], avgPowerResult: Option[AvgPowerResult], + lastResults: Iterable[ResultEntity] = Seq.empty, )(private implicit val requestVoltageDeviationTolerance: Dimensionless) { /** Whether a power request is expected and has not yet arrived, thus is @@ -81,6 +84,17 @@ final case class ParticipantGridAdapter( ): ParticipantGridAdapter = copy(tickToPower = tickToPower.updated(tick, power)) + def storeResults( + result: ResultsContainer, + tick: Long, + ): ParticipantGridAdapter = + copy( + tickToPower = tickToPower.updated(tick, result.totalPower), + lastResults = result.modelResults, + ) + + def clearLastResults: ParticipantGridAdapter = copy(lastResults = Seq.empty) + /** Handles a power request by making sure an average power value has been * calculated, taking into account the new voltage value. * @@ -245,6 +259,8 @@ object ParticipantGridAdapter { ], log: Logger, ): ComplexPower = { + given UnitOfMeasure[Power] = Megawatts + val p = QuantityUtil.average[Power, Energy]( tickToPower.map { case (tick, pd) => tick -> pd.p diff --git a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantResultHandler.scala b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantResultHandler.scala index ce95d1d299..f3803131e9 100644 --- a/src/main/scala/edu/ie3/simona/agent/participant/ParticipantResultHandler.scala +++ b/src/main/scala/edu/ie3/simona/agent/participant/ParticipantResultHandler.scala @@ -20,18 +20,21 @@ import edu.ie3.simona.event.ResultEvent.{ } import edu.ie3.simona.event.notifier.NotifierConfig import edu.ie3.simona.exceptions.CriticalFailureException +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import org.apache.pekko.actor.typed.ActorRef +import java.util.UUID + /** Handles all kind of results stemming from the participant by sending them to - * the result listener, if applicable. + * the result proxy, if applicable. * - * @param listener - * The actor reference to the result listener. + * @param resultProxy + * The actor reference to the result resultProxy. * @param config * The result configuration. */ final case class ParticipantResultHandler( - private val listener: Iterable[ActorRef[ResultEvent]], + private val resultProxy: ActorRef[ResultEvent | ExpectResult], private val config: NotifierConfig, ) { @@ -42,18 +45,16 @@ final case class ParticipantResultHandler( */ def maybeSend(result: ResultEntity): Unit = if config.simulationResultInfo then { - listener.foreach(actor => - result match { - case thermalResult: ThermalUnitResult => - actor ! ThermalResultEvent(thermalResult) - case participantResult: SystemParticipantResult => - actor ! ParticipantResultEvent(participantResult) - case unsupported => - throw new CriticalFailureException( - s"Results of class '${unsupported.getClass.getSimpleName}' are currently not supported." - ) - } - ) + result match { + case thermalResult: ThermalUnitResult => + resultProxy ! ThermalResultEvent(thermalResult) + case participantResult: SystemParticipantResult => + resultProxy ! ParticipantResultEvent(participantResult) + case unsupported => + throw new CriticalFailureException( + s"Results of class '${unsupported.getClass.getSimpleName}' are currently not supported." + ) + } } /** Send the flex options result to all listeners, if enabled. @@ -63,9 +64,10 @@ final case class ParticipantResultHandler( */ def maybeSend(result: FlexOptionsResult): Unit = if config.flexResult then { - listener.foreach( - _ ! FlexOptionsResultEvent(result) - ) + resultProxy ! FlexOptionsResultEvent(result) } + def informProxy(uuid: UUID, tick: Long): Unit = + resultProxy ! ExpectResult(uuid, tick) + } diff --git a/src/main/scala/edu/ie3/simona/api/ExtSimAdapter.scala b/src/main/scala/edu/ie3/simona/api/ExtSimAdapter.scala index 868cec2d8c..475639797e 100644 --- a/src/main/scala/edu/ie3/simona/api/ExtSimAdapter.scala +++ b/src/main/scala/edu/ie3/simona/api/ExtSimAdapter.scala @@ -6,6 +6,7 @@ package edu.ie3.simona.api +import edu.ie3.simona.api.data.ExtSimAdapterData import edu.ie3.simona.api.ontology.ScheduleDataServiceMessage import edu.ie3.simona.api.data.ExtSimAdapterData import edu.ie3.simona.api.ontology.simulation.{ diff --git a/src/main/scala/edu/ie3/simona/config/ArgsParser.scala b/src/main/scala/edu/ie3/simona/config/ArgsParser.scala index b5211600c9..12e5dd200c 100644 --- a/src/main/scala/edu/ie3/simona/config/ArgsParser.scala +++ b/src/main/scala/edu/ie3/simona/config/ArgsParser.scala @@ -8,7 +8,7 @@ package edu.ie3.simona.config import com.typesafe.config.{ConfigFactory, Config as TypesafeConfig} import com.typesafe.scalalogging.LazyLogging -import scopt.{OptionParser as scoptOptionParser} +import scopt.OptionParser as scoptOptionParser import java.io.File import java.nio.file.Paths @@ -21,6 +21,7 @@ object ArgsParser extends LazyLogging { mainArgs: Array[String], configLocation: Option[String] = None, config: Option[TypesafeConfig] = None, + extAddress: Option[String] = None, ) // build the config parser using scopt library @@ -39,6 +40,15 @@ object ArgsParser extends LazyLogging { ) .text("Location of the simona config file") .minOccurs(1) + opt[String]("ext-address") + .action((value, args) => args.copy(extAddress = Option(value))) + .validate(value => + if value.trim.isEmpty then failure("ext-address cannot be empty") + else success + ) + .text( + "Comma separated list (no whitespaces!) of initial addresses used for the rest of the cluster to bootstrap" + ) } } diff --git a/src/main/scala/edu/ie3/simona/event/ResultEvent.scala b/src/main/scala/edu/ie3/simona/event/ResultEvent.scala index 9972d92f7b..e4e4968ca0 100644 --- a/src/main/scala/edu/ie3/simona/event/ResultEvent.scala +++ b/src/main/scala/edu/ie3/simona/event/ResultEvent.scala @@ -22,16 +22,19 @@ import edu.ie3.datamodel.models.result.thermal.{ ThermalHouseResult, ThermalUnitResult, } -import edu.ie3.datamodel.models.result.{CongestionResult, NodeResult} +import edu.ie3.datamodel.models.result.{ + CongestionResult, + NodeResult, + ResultEntity, +} import edu.ie3.simona.agent.grid.GridResultsSupport.PartialTransformer3wResult -import edu.ie3.simona.event.listener.ResultEventListener import tech.units.indriya.ComparableQuantity import java.time.ZonedDateTime import java.util.UUID import javax.measure.quantity.{Energy, Power, Temperature} -sealed trait ResultEvent extends Event with ResultEventListener.Request +sealed trait ResultEvent extends Event /** Calculation result events */ @@ -175,5 +178,4 @@ object ResultEvent { final case class FlexOptionsResultEvent( flexOptionsResult: FlexOptionsResult ) extends ResultEvent - } diff --git a/src/main/scala/edu/ie3/simona/event/listener/DelayedStopHelper.scala b/src/main/scala/edu/ie3/simona/event/listener/DelayedStopHelper.scala index c76924af63..7b4ccbaf94 100644 --- a/src/main/scala/edu/ie3/simona/event/listener/DelayedStopHelper.scala +++ b/src/main/scala/edu/ie3/simona/event/listener/DelayedStopHelper.scala @@ -21,7 +21,7 @@ object DelayedStopHelper { * functionality */ sealed trait StoppingMsg - extends ResultEventListener.Request + extends ResultListener.Request with RuntimeEventListener.Request /** Message indicating that [[RuntimeEventListener]] should stop. Instead of diff --git a/src/main/scala/edu/ie3/simona/event/listener/ExtResultEvent.scala b/src/main/scala/edu/ie3/simona/event/listener/ExtResultEvent.scala new file mode 100644 index 0000000000..c5b4d14ca1 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/event/listener/ExtResultEvent.scala @@ -0,0 +1,133 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.event.listener + +import edu.ie3.simona.api.data.connection.{ + ExtResultDataConnection, + ExtResultListener, +} +import edu.ie3.simona.api.ontology.DataMessageFromExt +import edu.ie3.simona.api.ontology.results.{ + ProvideResultEntities, + RequestResultEntities, + ResultDataMessageFromExt, +} +import edu.ie3.simona.exceptions.CriticalFailureException +import edu.ie3.simona.ontology.messages.ResultMessage.* +import edu.ie3.simona.ontology.messages.SchedulerMessage.{ + Completion, + ScheduleActivation, +} +import edu.ie3.simona.ontology.messages.ServiceMessage.ScheduleServiceActivation +import edu.ie3.simona.ontology.messages.{ + Activation, + ResultMessage, + SchedulerMessage, +} +import edu.ie3.simona.util.CollectionUtils.asJava +import org.apache.pekko.actor.typed.scaladsl.Behaviors +import org.apache.pekko.actor.typed.{ActorRef, Behavior} + +import java.util +import scala.jdk.CollectionConverters.* + +object ExtResultEvent { + + type Message = ResultMessage.Response | DelayedStopHelper.StoppingMsg + + private final case class ProviderState( + scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], + connection: ExtResultDataConnection, + extMessage: Option[ResultDataMessageFromExt] = None, + ) + + def listener(connection: ExtResultListener): Behavior[Message] = + Behaviors.receivePartial[Message] { + case (_, ResultResponse(results)) => + connection.queueExtResponseMsg( + new ProvideResultEntities(results.asJava) + ) + + Behaviors.same + + case (ctx, msg: DelayedStopHelper.StoppingMsg) => + DelayedStopHelper.handleMsg((ctx, msg)) + } + + def provider( + connection: ExtResultDataConnection, + scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], + ): Behavior[Message | DataMessageFromExt | Activation] = { + val stateData = ProviderState(scheduler, resultProxy, connection) + + provider(stateData) + } + + private def provider( + stateData: ProviderState + ): Behavior[Message | DataMessageFromExt | Activation] = + Behaviors.receivePartial[Message | DataMessageFromExt | Activation] { + case (ctx, ResultResponse(results)) => + ctx.log.warn(s"Sending results to ext. Results: $results") + + // send result to external simulation + stateData.connection.queueExtResponseMsg( + new ProvideResultEntities(results.asJava) + ) + + stateData.scheduler ! Completion(ctx.self) + + Behaviors.same + + case (_, messageFromExt: ResultDataMessageFromExt) => + // save ext message + provider(stateData.copy(extMessage = Some(messageFromExt))) + + case (ctx, ScheduleServiceActivation(tick, unlockKey)) => + stateData.scheduler ! ScheduleActivation( + ctx.self, + tick, + Some(unlockKey), + ) + + Behaviors.same + + case (ctx, Activation(tick)) => + // handle ext message + + val extMsg = stateData.extMessage.getOrElse( + // this should not be possible because the external simulation schedules this provider + throw CriticalFailureException( + "ExtResultDataService was triggered without ResultDataMessageFromExt available" + ) + ) + + extMsg match { + case requestResultEntities: RequestResultEntities => + val requestedResults = + new util.ArrayList(requestResultEntities.requestedResults) + + // request results from result proxy + stateData.resultProxy ! RequestResult( + requestedResults.asScala.toSeq, + tick, + ctx.self, + ) + + Behaviors.same + case other => + ctx.log.warn(s"Cannot handle external result message: $other") + Behaviors.same + } + + case (ctx, msg: DelayedStopHelper.StoppingMsg) => + DelayedStopHelper.handleMsg((ctx, msg)) + + } +} diff --git a/src/main/scala/edu/ie3/simona/event/listener/ResultEventListener.scala b/src/main/scala/edu/ie3/simona/event/listener/ResultListener.scala similarity index 58% rename from src/main/scala/edu/ie3/simona/event/listener/ResultEventListener.scala rename to src/main/scala/edu/ie3/simona/event/listener/ResultListener.scala index 28866b54f6..1ce384e21f 100644 --- a/src/main/scala/edu/ie3/simona/event/listener/ResultEventListener.scala +++ b/src/main/scala/edu/ie3/simona/event/listener/ResultListener.scala @@ -6,23 +6,22 @@ package edu.ie3.simona.event.listener -import org.apache.pekko.actor.typed.scaladsl.Behaviors -import org.apache.pekko.actor.typed.{Behavior, PostStop} import edu.ie3.datamodel.io.processor.result.ResultEntityProcessor import edu.ie3.datamodel.models.result.{NodeResult, ResultEntity} -import edu.ie3.simona.agent.grid.GridResultsSupport.PartialTransformer3wResult -import edu.ie3.simona.event.ResultEvent.{ - FlexOptionsResultEvent, - ParticipantResultEvent, - PowerFlowResultEvent, - ThermalResultEvent, -} +import edu.ie3.simona.api.data.connection.ExtResultListener +import edu.ie3.simona.api.ontology.results.ProvideResultEntities +import edu.ie3.simona.event.ResultEvent import edu.ie3.simona.exceptions.{ FileHierarchyException, ProcessResultEventException, } import edu.ie3.simona.io.result.* +import edu.ie3.simona.ontology.messages.ResultMessage +import edu.ie3.simona.ontology.messages.ResultMessage.ResultResponse +import edu.ie3.simona.util.CollectionUtils.asJava import edu.ie3.simona.util.ResultFileHierarchy +import org.apache.pekko.actor.typed.scaladsl.Behaviors +import org.apache.pekko.actor.typed.{Behavior, PostStop} import org.slf4j.Logger import scala.concurrent.ExecutionContext.Implicits.global @@ -30,29 +29,26 @@ import scala.concurrent.duration.DurationInt import scala.concurrent.{Await, Future} import scala.util.{Failure, Success, Try} -object ResultEventListener extends Transformer3wResultSupport { +object ResultListener { trait Request + type Message = Request | ResultMessage.Response + private final case class SinkResponse( response: Map[Class[?], ResultEntitySink] ) extends Request private final case class InitFailed(ex: Exception) extends Request - /** [[ResultEventListener]] base data containing all information the listener - * needs + /** [[ResultListener]] base data containing all information the listener needs * * @param classToSink * a map containing the sink for each class that should be processed by the * listener */ private final case class BaseData( - classToSink: Map[Class[?], ResultEntitySink], - threeWindingResults: Map[ - Transformer3wKey, - AggregatedTransformer3wResult, - ] = Map.empty, + classToSink: Map[Class[?], ResultEntitySink] ) /** Initialize the sinks for this listener based on the provided collection @@ -147,73 +143,19 @@ object ResultEventListener extends Transformer3wResultSupport { } } - /** Handle the given result and possibly update the state data + /** Handle the given results. * - * @param resultEntity - * Result entity to handle + * @param resultEntities + * Results entity to handle. * @param baseData - * Base data - * @return - * The possibly update base data + * Base data. */ - private def handleResult( - resultEntity: ResultEntity, + private def handleResults( + resultEntities: Iterable[ResultEntity], baseData: BaseData, log: Logger, - ): BaseData = { - handOverToSink(resultEntity, baseData.classToSink, log) - baseData - } - - /** Handle a partial three winding result properly by adding it to an - * [[AggregatedTransformer3wResult]] and flushing then possibly completed - * results. Finally, the base data are updated. - * - * @param result - * Result entity to handle - * @param baseData - * Base data - * @return - * The possibly update base data - */ - private def handlePartialTransformer3wResult( - result: PartialTransformer3wResult, - baseData: BaseData, - log: Logger, - ): BaseData = { - val key = Transformer3wKey(result.input, result.time) - // retrieve existing partial result or use empty one - val partialResult = - baseData.threeWindingResults.getOrElse( - key, - AggregatedTransformer3wResult.EMPTY, - ) - // add partial result - val updatedResults = partialResult.add(result).map { updatedResult => - if updatedResult.ready then { - // if result is complete, we can write it out - updatedResult.consolidate.foreach { - handOverToSink(_, baseData.classToSink, log) - } - // also remove partial result from map - baseData.threeWindingResults.removed(key) - } else { - // if result is not complete yet, just update it - baseData.threeWindingResults + (key -> updatedResult) - } - } match { - case Success(results) => results - case Failure(exception) => - log.warn( - "Failure when handling partial Transformer3w result", - exception, - ) - // on failure, we just continue with previous results - baseData.threeWindingResults - } - - baseData.copy(threeWindingResults = updatedResults) - } + ): Unit = + resultEntities.foreach(handOverToSink(_, baseData.classToSink, log)) /** Handing over the given result entity to the sink, that might be apparent * in the map @@ -236,9 +178,29 @@ object ResultEventListener extends Transformer3wResultSupport { log.error("Error while writing result event: ", exception) } + /** Method to create an external result listener. + * + * @param connection + * Result listener data connection. + * @return + * The behavior of the listener. + */ + def external(connection: ExtResultListener): Behavior[Message] = + Behaviors.receivePartial[Message] { + case (_, ResultResponse(results)) => + connection.queueExtResponseMsg( + new ProvideResultEntities(results.asJava) + ) + + Behaviors.same + + case (ctx, msg: DelayedStopHelper.StoppingMsg) => + DelayedStopHelper.handleMsg((ctx, msg)) + } + def apply( resultFileHierarchy: ResultFileHierarchy - ): Behavior[Request] = Behaviors.setup[Request] { ctx => + ): Behavior[Message] = Behaviors.setup[Message] { ctx => ctx.log.debug("Starting initialization!") resultFileHierarchy.resultSinkType match { case _: ResultSinkType.Kafka => @@ -254,18 +216,18 @@ object ResultEventListener extends Transformer3wResultSupport { ctx.pipeToSelf( Future.sequence( - ResultEventListener.initializeSinks(resultFileHierarchy) + ResultListener.initializeSinks(resultFileHierarchy) ) ) { case Failure(exception: Exception) => InitFailed(exception) case Success(result) => SinkResponse(result.toMap) } - init() + init } - private def init(): Behavior[Request] = Behaviors.withStash(200) { buffer => - Behaviors.receive[Request] { + private def init: Behavior[Message] = Behaviors.withStash(200) { buffer => + Behaviors.receive[Message] { case (ctx, SinkResponse(response)) => ctx.log.debug("Initialization complete!") buffer.unstashAll(idle(BaseData(response))) @@ -281,47 +243,12 @@ object ResultEventListener extends Transformer3wResultSupport { } } - private def idle(baseData: BaseData): Behavior[Request] = Behaviors - .receivePartial[Request] { - case (ctx, ParticipantResultEvent(participantResult)) => - val updatedBaseData = handleResult(participantResult, baseData, ctx.log) - idle(updatedBaseData) - - case (ctx, ThermalResultEvent(thermalResult)) => - val updatedBaseData = handleResult(thermalResult, baseData, ctx.log) - idle(updatedBaseData) - - case ( - ctx, - PowerFlowResultEvent( - nodeResults, - switchResults, - lineResults, - transformer2wResults, - transformer3wResults, - congestionResults, - ), - ) => - val updatedBaseData = - (nodeResults ++ switchResults ++ lineResults ++ transformer2wResults ++ transformer3wResults ++ congestionResults) - .foldLeft(baseData) { - case (currentBaseData, resultEntity: ResultEntity) => - handleResult(resultEntity, currentBaseData, ctx.log) - case ( - currentBaseData, - partialTransformerResult: PartialTransformer3wResult, - ) => - handlePartialTransformer3wResult( - partialTransformerResult, - currentBaseData, - ctx.log, - ) - } - idle(updatedBaseData) + private def idle(baseData: BaseData): Behavior[Message] = Behaviors + .receivePartial[Message] { + case (ctx, ResultResponse(results)) => + handleResults(results.values.flatten, baseData, ctx.log) - case (ctx, FlexOptionsResultEvent(flexOptionsResult)) => - val updatedBaseData = handleResult(flexOptionsResult, baseData, ctx.log) - idle(updatedBaseData) + Behaviors.same case (ctx, msg: DelayedStopHelper.StoppingMsg) => DelayedStopHelper.handleMsg((ctx, msg)) @@ -329,15 +256,7 @@ object ResultEventListener extends Transformer3wResultSupport { } .receiveSignal { case (ctx, PostStop) => // wait until all I/O has finished - ctx.log.debug( - "Shutdown initiated.\n\tThe following three winding results are not comprehensive and are not " + - "handled in sinks:{}\n\tWaiting until writing result data is completed ...", - baseData.threeWindingResults.keys - .map { case Transformer3wKey(model, zdt) => - s"model '$model' at $zdt" - } - .mkString("\n\t\t"), - ) + ctx.log.debug("Shutdown initiated.") // close sinks concurrently to speed up closing (closing calls might be blocking) Await.ready( diff --git a/src/main/scala/edu/ie3/simona/main/EmBuilder.scala b/src/main/scala/edu/ie3/simona/main/EmBuilder.scala new file mode 100644 index 0000000000..86775f089a --- /dev/null +++ b/src/main/scala/edu/ie3/simona/main/EmBuilder.scala @@ -0,0 +1,86 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.main + +import edu.ie3.datamodel.io.naming.FileNamingStrategy +import edu.ie3.datamodel.io.sink.CsvFileSink +import edu.ie3.datamodel.io.source.csv.CsvDataSource +import edu.ie3.datamodel.io.source.* +import edu.ie3.datamodel.models.OperationTime +import edu.ie3.datamodel.models.input.{EmInput, OperatorInput} + +import java.nio.file.Path +import java.util.UUID +import scala.jdk.CollectionConverters.{ + MapHasAsScala, + SetHasAsJava, + SetHasAsScala, +} + +object EmBuilder { + + def main(args: Array[String]): Unit = { + val path = Path.of("simona", "input", "fullGrid") + + val csvSource = new CsvDataSource(";", path, new FileNamingStrategy()) + + val typeSource = new TypeSource(csvSource) + val gridSource = new RawGridSource(typeSource, csvSource) + val emSource = new EnergyManagementSource(typeSource, csvSource) + val thermalSource = new ThermalSource(typeSource, csvSource) + val participantSource = new SystemParticipantSource( + typeSource, + thermalSource, + gridSource, + emSource, + csvSource, + ) + + val (_, otherNodes) = gridSource.getNodes.asScala.toMap.partition { + case (_, node) => node.isSlack + } + + val emSup = new EmInput( + UUID.randomUUID(), + "EM_Scada", + OperatorInput.NO_OPERATOR_ASSIGNED, + OperationTime.notLimited(), + "PROPORTIONAL", + null, + ) + + val ems = otherNodes.map { case (_, node) => + node -> new EmInput( + UUID.randomUUID(), + s"Em_${node.getId}", + OperatorInput.NO_OPERATOR_ASSIGNED, + OperationTime.notLimited(), + "PROPORTIONAL", + emSup, + ) + } + + val allEms = ems.values.toSet ++ Set(emSup) + + val fixedFeedIns = participantSource.getFixedFeedIns.asScala.map { ffi => + val node = ffi.getNode + ffi.copy().em(ems(node)).build() + } + + val loads = participantSource.getLoads.asScala.map { load => + val node = load.getNode + load.copy().em(ems(node)).build() + } + + val sink = + new CsvFileSink(path.resolve("withEm"), new FileNamingStrategy(), ";") + + sink.persistAllIgnoreNested(allEms.asJava) + sink.persistAllIgnoreNested(fixedFeedIns.asJava) + sink.persistAllIgnoreNested(loads.asJava) + } +} diff --git a/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelInit.scala b/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelInit.scala index 42db1d7c36..9a771d575a 100644 --- a/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelInit.scala +++ b/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelInit.scala @@ -125,6 +125,7 @@ object ParticipantModelInit { PrimaryDataParticipantModel.Factory( modelFactory.create(), primaryDataExtra, + modelConfig.scaling, ) } diff --git a/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelShell.scala b/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelShell.scala index 7efaa567d3..9e3b7141c1 100644 --- a/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelShell.scala +++ b/src/main/scala/edu/ie3/simona/model/participant/ParticipantModelShell.scala @@ -115,6 +115,8 @@ final case class ParticipantModelShell[ */ def operationStart: Long = operationInterval.start + def hasFlexOptions: Boolean = flexOptions.isDefined + /** Returns the current flex options, if present, or throws a * [[CriticalFailureException]]. Only call this if you are certain the flex * options have been set. diff --git a/src/main/scala/edu/ie3/simona/model/participant/PrimaryDataParticipantModel.scala b/src/main/scala/edu/ie3/simona/model/participant/PrimaryDataParticipantModel.scala index 88f88e4e20..3e1414d863 100644 --- a/src/main/scala/edu/ie3/simona/model/participant/PrimaryDataParticipantModel.scala +++ b/src/main/scala/edu/ie3/simona/model/participant/PrimaryDataParticipantModel.scala @@ -45,6 +45,8 @@ import scala.reflect.ClassTag * physical [[ParticipantModel]]. * @param primaryDataExtra * Extra functionality specific to the primary data class. + * @param scalingFactor + * The scaling factor from the runtime config. * @tparam PD * The type of primary data. */ @@ -56,6 +58,7 @@ final case class PrimaryDataParticipantModel[PD <: PrimaryData: ClassTag]( override val qControl: QControl, private val primaryDataResultFunc: PrimaryResultFunc, private val primaryDataExtra: PrimaryDataExtra[PD], + private val scalingFactor: Double, ) extends ParticipantModel[ PrimaryOperatingPoint[PD], PrimaryDataState[PD], @@ -87,8 +90,10 @@ final case class PrimaryDataParticipantModel[PD <: PrimaryData: ClassTag]( override def determineOperatingPoint( state: PrimaryDataState[PD] - ): (PrimaryOperatingPoint[PD], Option[Long]) = - (PrimaryOperatingPoint(state.data), None) + ): (PrimaryOperatingPoint[PD], Option[Long]) = { + val scaledData = primaryDataExtra.scale(state.data, scalingFactor) + (PrimaryOperatingPoint(scaledData), None) + } override def zeroPowerOperatingPoint: PrimaryOperatingPoint[PD] = PrimaryOperatingPoint(primaryDataExtra.zero) @@ -124,7 +129,10 @@ final case class PrimaryDataParticipantModel[PD <: PrimaryData: ClassTag]( ): (PrimaryOperatingPoint[PD], OperationChangeIndicator) = { // scale the whole primary data by the same factor that // the active power set point was scaled by - val factor = state.data.p / setPower + val factor = if setPower.value != 0.0 then { + state.data.p / setPower + } else 1.0 + val scaledData: PD = primaryDataExtra.scale(state.data, factor) (PrimaryOperatingPoint(scaledData), OperationChangeIndicator()) @@ -213,10 +221,13 @@ object PrimaryDataParticipantModel { * The physical participant model. * @param primaryDataExtra * Extra functionality specific to the primary data class. + * @param scalingFactor + * The scaling factor from the runtime config. */ final case class Factory[PD <: PrimaryData]( physicalModel: ParticipantModel[?, ?], primaryDataExtra: PrimaryDataExtra[PD], + scalingFactor: Double, ) extends ParticipantModelFactory[PrimaryDataState[PD]] { override def getRequiredSecondaryServices: Iterable[ServiceType] = @@ -248,6 +259,7 @@ object PrimaryDataParticipantModel { physicalModel.qControl, primaryResultFunc, primaryDataExtra, + scalingFactor, )(using primaryDataExtra.getClassTag) } } diff --git a/src/main/scala/edu/ie3/simona/ontology/messages/ResultMessage.scala b/src/main/scala/edu/ie3/simona/ontology/messages/ResultMessage.scala new file mode 100644 index 0000000000..dd2efb9d4f --- /dev/null +++ b/src/main/scala/edu/ie3/simona/ontology/messages/ResultMessage.scala @@ -0,0 +1,41 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.ontology.messages + +import edu.ie3.datamodel.models.result.ResultEntity +import org.apache.pekko.actor.typed.ActorRef + +import java.util.UUID + +object ResultMessage { + + /** Message to request results. + * + * @param requestedResults + * The uuids of the input models. + * @param tick + * For which results are requested. + * @param replyTo + * The actor that should receive the results. + */ + final case class RequestResult( + requestedResults: Seq[UUID], + tick: Long, + replyTo: ActorRef[Response], + ) + + /** Trait that is extended by all responses to a [[RequestResult]]. + */ + sealed trait Response + + /** Response message that contains the requested results. + * @param results + * Map: uuid to results. + */ + final case class ResultResponse(results: Map[UUID, Iterable[ResultEntity]]) + extends Response +} diff --git a/src/main/scala/edu/ie3/simona/ontology/messages/ServiceMessage.scala b/src/main/scala/edu/ie3/simona/ontology/messages/ServiceMessage.scala index a1500a9374..9d2460fd3c 100644 --- a/src/main/scala/edu/ie3/simona/ontology/messages/ServiceMessage.scala +++ b/src/main/scala/edu/ie3/simona/ontology/messages/ServiceMessage.scala @@ -6,6 +6,7 @@ package edu.ie3.simona.ontology.messages +import edu.ie3.datamodel.models.result.ResultEntity import edu.ie3.simona.agent.em.EmAgent import edu.ie3.simona.agent.participant.ParticipantAgent import edu.ie3.simona.agent.participant.ParticipantAgent.ParticipantRequest @@ -17,8 +18,10 @@ import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.{ } import edu.ie3.simona.scheduler.ScheduleLock.ScheduleKey import edu.ie3.simona.service.ServiceStateData.InitializeServiceStateData +import edu.ie3.util.TimeUtil import org.apache.pekko.actor.typed.ActorRef +import java.time.ZonedDateTime import java.util.UUID /** Collections of all messages, that are send to and from the different diff --git a/src/main/scala/edu/ie3/simona/ontology/messages/flex/FlexibilityMessage.scala b/src/main/scala/edu/ie3/simona/ontology/messages/flex/FlexibilityMessage.scala index 62d7321935..fe4a48090f 100644 --- a/src/main/scala/edu/ie3/simona/ontology/messages/flex/FlexibilityMessage.scala +++ b/src/main/scala/edu/ie3/simona/ontology/messages/flex/FlexibilityMessage.scala @@ -9,6 +9,7 @@ package edu.ie3.simona.ontology.messages.flex import edu.ie3.datamodel.models.input.AssetInput import edu.ie3.simona.scheduler.ScheduleLock.ScheduleKey import edu.ie3.simona.service.Data.PrimaryData.ComplexPower +import edu.ie3.simona.service.em.{EmServiceCore, ExtEmDataService} import org.apache.pekko.actor.typed.ActorRef import squants.Power @@ -102,6 +103,12 @@ object FlexibilityMessage { FlexActivation(tick, flexType) } + // shifts the activation for controlled asset agent to the given tick + final case class FlexShiftActivation( + override val tick: Long, + flexType: FlexType, + ) extends FlexRequest + /** Message that provides [[FlexOptions]] to an * [[edu.ie3.simona.agent.em.EmAgent]] after they have been requested via * [[FlexActivation]]. diff --git a/src/main/scala/edu/ie3/simona/ontology/messages/flex/MinMaxFixFlexibilityMessage.scala b/src/main/scala/edu/ie3/simona/ontology/messages/flex/MinMaxFixFlexibilityMessage.scala new file mode 100644 index 0000000000..f3b005f5a8 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/ontology/messages/flex/MinMaxFixFlexibilityMessage.scala @@ -0,0 +1,123 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.ontology.messages.flex + +import edu.ie3.simona.exceptions.CriticalFailureException +import edu.ie3.util.scala.quantities.DefaultQuantities.* +import squants.Power + +import java.util.UUID + +/** Messages that communicate interval-based flexibility with minimum, reference + * and maximum power + */ +object MinMaxFixFlexibilityMessage { + + /** Message that provides flexibility options using reference, minimum and + * maximum power. It is possible that the power values are either all + * negative or all positive, meaning that feed-in or load is mandatory. + * + * @param ref + * The reference active power that the flex options provider would + * produce/consume regularly at the current tick, i.e. if it was not + * flex-controlled + * @param min + * The minimum active power that the flex options provider allows at the + * current tick + * @param max + * The maximum active power that the flex options provider allows at the + * current tick + */ + final case class MinMaxFixFlexOptions private ( + ref: Power, + min: Power, + max: Power, + fix: Power, + ) + + object MinMaxFixFlexOptions { + + implicit class RichIterable( + private val flexOptions: Iterable[MinMaxFixFlexOptions] + ) extends AnyVal { + def flexSum: (Power, Power, Power, Power) = + flexOptions.foldLeft((zeroKW, zeroKW, zeroKW, zeroKW)) { + case ( + (sumRef, sumMin, sumMax, sumFix), + MinMaxFixFlexOptions(addRef, addMin, addMax, addFix), + ) => + ( + sumRef + addRef, + sumMin + addMin, + sumMax + addMax, + sumFix + addFix, + ) + } + } + + /** Creates a + * [[edu.ie3.simona.ontology.messages.flex.MinMaxFixFlexibilityMessage.MinMaxFixFlexOptions]] + * message with sanity checks regarding the power values + * + * @param modelUuid + * The UUID of the flex options provider asset model + * @param ref + * The reference active power that the flex options provider would + * produce/consume regularly at the current tick, i.e. if it was not + * flex-controlled + * @param min + * The minimum active power that the flex options provider allows at the + * current tick + * @param max + * The maximum active power that the flex options provider allows at the + * current tick + * @return + * The + * [[edu.ie3.simona.ontology.messages.flex.MinMaxFixFlexibilityMessage.MinMaxFixFlexOptions]] + * message + */ + def apply( + modelUuid: UUID, + ref: Power, + min: Power, + max: Power, + fix: Power, + ): MinMaxFixFlexOptions = { + if min > ref then + throw new CriticalFailureException( + s"Minimum power $min is greater than reference power $ref" + ) + + if ref > max then + throw new CriticalFailureException( + s"Reference power $ref is greater than maximum power $max" + ) + + new MinMaxFixFlexOptions(ref, min, max, fix) + } + + /** Creates a + * [[edu.ie3.simona.ontology.messages.flex.MinMaxFixFlexibilityMessage.MinMaxFixFlexOptions]] + * message that does not allow any flexibility, meaning that min = ref = + * max power. + * + * @param modelUuid + * The UUID of the flex provider asset model + * @param power + * The active power that the flex provider requires + * @return + * The corresponding + * [[edu.ie3.simona.ontology.messages.flex.MinMaxFixFlexibilityMessage.MinMaxFixFlexOptions]] + * message + */ + def noFlexOption( + modelUuid: UUID, + power: Power, + ): MinMaxFixFlexOptions = + MinMaxFixFlexOptions(modelUuid, power, power, power, zeroKW) + } +} diff --git a/src/main/scala/edu/ie3/simona/ontology/messages/flex/PowerLimitFlexOptions.scala b/src/main/scala/edu/ie3/simona/ontology/messages/flex/PowerLimitFlexOptions.scala index 5f352488ba..9cd4f9e25a 100644 --- a/src/main/scala/edu/ie3/simona/ontology/messages/flex/PowerLimitFlexOptions.scala +++ b/src/main/scala/edu/ie3/simona/ontology/messages/flex/PowerLimitFlexOptions.scala @@ -75,10 +75,16 @@ object PowerLimitFlexOptions extends FlexOptionsExtra[PowerLimitFlexOptions] { ): Power = flexCtrl match { case IssuePowerControl(_, setPower) => - // sanity check: setPower is in range of latest flex options - checkSetPower(flexOptions, setPower) - - setPower + if setPower < flexOptions.min then { + flexOptions.min + } else if setPower > flexOptions.max then { + flexOptions.max + } else { + // sanity check: setPower is in range of latest flex options + checkSetPower(flexOptions, setPower) + + setPower + } case IssueNoControl(_) => // no override, take reference power diff --git a/src/main/scala/edu/ie3/simona/recode/RecodeDatabaseNamingStrategy.scala b/src/main/scala/edu/ie3/simona/recode/RecodeDatabaseNamingStrategy.scala new file mode 100644 index 0000000000..74c02e0478 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/recode/RecodeDatabaseNamingStrategy.scala @@ -0,0 +1,28 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.recode + +import edu.ie3.datamodel.io.naming.EntityPersistenceNamingStrategy +import edu.ie3.datamodel.models.result.{NodeResult, ResultEntity} + +import java.util.Optional + +class RecodeDatabaseNamingStrategy extends EntityPersistenceNamingStrategy { + + override def getResultEntityName( + resultEntityClass: Class[? <: ResultEntity] + ): Optional[String] = { + val NodeRes = classOf[NodeResult] + + resultEntityClass match { + case NodeRes => + Optional.of("bus") + case _ => + super.getResultEntityName(resultEntityClass) + } + } +} diff --git a/src/main/scala/edu/ie3/simona/recode/RecodeInfluxDBSink.java b/src/main/scala/edu/ie3/simona/recode/RecodeInfluxDBSink.java new file mode 100644 index 0000000000..7c1e78f764 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/recode/RecodeInfluxDBSink.java @@ -0,0 +1,140 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.recode; + +import edu.ie3.datamodel.exceptions.EntityProcessorException; +import edu.ie3.datamodel.exceptions.ProcessorProviderException; +import edu.ie3.datamodel.io.connectors.InfluxDbConnector; +import edu.ie3.datamodel.io.processor.ProcessorProvider; +import edu.ie3.datamodel.io.sink.OutputDataSink; +import edu.ie3.datamodel.models.Entity; +import edu.ie3.datamodel.models.result.ResultEntity; +import edu.ie3.datamodel.models.timeseries.TimeSeries; +import edu.ie3.datamodel.models.timeseries.TimeSeriesEntry; +import edu.ie3.datamodel.models.value.Value; +import java.util.*; +import java.util.concurrent.TimeUnit; +import org.influxdb.dto.BatchPoints; +import org.influxdb.dto.Point; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class RecodeInfluxDBSink implements OutputDataSink { + public static final Logger log = LoggerFactory.getLogger(RecodeInfluxDBSink.class); + private final InfluxDbConnector connector; + private final RecodeDatabaseNamingStrategy entityPersistenceNamingStrategy; + private final ProcessorProvider processorProvider; + + public RecodeInfluxDBSink( + InfluxDbConnector connector, RecodeDatabaseNamingStrategy entityPersistenceNamingStrategy) + throws EntityProcessorException { + this.connector = connector; + this.entityPersistenceNamingStrategy = entityPersistenceNamingStrategy; + this.processorProvider = + new ProcessorProvider( + ProcessorProvider.allResultEntityProcessors(), + ProcessorProvider.allTimeSeriesProcessors()); + } + + @Override + public void shutdown() { + connector.shutdown(); + } + + @Override + public void persist(C entity) throws ProcessorProviderException { + Set points = extractPoints(entity); + if (points.size() == 1) { + this.write(points.iterator().next()); + } else { + this.writeAll(points); + } + } + + @Override + public void persistAll(Collection entities) + throws ProcessorProviderException { + Set points = new HashSet<>(); + + for (C entity : entities) { + points.addAll(this.extractPoints(entity)); + } + + writeAll(points); + } + + @Override + public , V extends Value, R extends Value> void persistTimeSeries( + TimeSeries timeSeries) { + log.warn("Persisting time series is not supported!"); + } + + private Point transformToPoint(ResultEntity entity) throws ProcessorProviderException { + Optional measurementName = + entityPersistenceNamingStrategy.getResultEntityName(entity.getClass()); + + if (measurementName.isEmpty()) { + log.warn( + "I could not get a measurement name for class {}. I am using its simple name instead.", + entity.getClass().getSimpleName()); + } + + return transformToPoint(entity, measurementName.orElse(entity.getClass().getSimpleName())); + } + + private Point transformToPoint(ResultEntity entity, String measurementName) + throws ProcessorProviderException { + LinkedHashMap entityFieldData = + processorProvider.handleEntity(entity).getOrThrow(); + + if (entityFieldData.containsKey("p")) { + String value = entityFieldData.remove("p"); + entityFieldData.put("p_mw", value); + } + if (entityFieldData.containsKey("q")) { + String value = entityFieldData.remove("q"); + entityFieldData.put("q_mvar", value); + } + + entityFieldData.remove("time"); + return Point.measurement(transformToMeasurementName(measurementName)) + .time(entity.getTime().toInstant().toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("input_model", entityFieldData.remove("inputModel")) + .tag("run", connector.getScenarioName()) + .fields(Collections.unmodifiableMap(entityFieldData)) + .build(); + } + + private Set extractPoints(C entity) throws ProcessorProviderException { + Set points = new HashSet<>(); + if (entity instanceof ResultEntity resultEntity) { + points.add(transformToPoint(resultEntity)); + } else { + log.error( + "I don't know how to handle an entity of class {}", entity.getClass().getSimpleName()); + } + + return points; + } + + private void write(Point point) { + if (point != null) { + connector.getSession().write(point); + } + } + + private void writeAll(Collection points) { + if (!points.isEmpty()) { + BatchPoints batchPoints = BatchPoints.builder().points(points).build(); + connector.getSession().write(batchPoints); + } + } + + private static String transformToMeasurementName(String filename) { + return filename.trim().replaceAll("\\W", "_"); + } +} diff --git a/src/main/scala/edu/ie3/simona/service/em/EmCommunicationCore.scala b/src/main/scala/edu/ie3/simona/service/em/EmCommunicationCore.scala new file mode 100644 index 0000000000..3f5ec0672a --- /dev/null +++ b/src/main/scala/edu/ie3/simona/service/em/EmCommunicationCore.scala @@ -0,0 +1,682 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.em + +import edu.ie3.datamodel.models.result.system.FlexOptionsResult +import edu.ie3.datamodel.models.value.PValue +import edu.ie3.simona.agent.em.EmAgent.Message +import edu.ie3.simona.api.data.connection.ExtEmDataConnection.EmMode +import edu.ie3.simona.api.data.model.em +import edu.ie3.simona.api.data.model.em.* +import edu.ie3.simona.api.ontology.em.* +import edu.ie3.simona.exceptions.CriticalFailureException +import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.* +import edu.ie3.simona.ontology.messages.flex.{FlexType, FlexibilityMessage, PowerLimitFlexOptions} +import edu.ie3.simona.service.em.EmCommunicationCore.EmAgentState +import edu.ie3.simona.util.CollectionUtils.asJava +import edu.ie3.simona.util.SimonaConstants.PRE_INIT_TICK +import edu.ie3.simona.util.{ReceiveDataMap, ReceiveMultiDataMap} +import edu.ie3.util.scala.quantities.QuantityConversionUtils.* +import org.apache.pekko.actor.typed.ActorRef +import org.slf4j.Logger +import squants.Power + +import java.util.UUID +import scala.collection.mutable +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.{RichOption, RichOptional} +import scala.math.max +import scala.util.Try + +object EmCommunicationCore { + + def apply(core: EmServiceCore): EmCommunicationCore = { + val uuidToAgent = core.uuidToAgent + + EmCommunicationCore( + core.mode, + core.lastFinishedTick, + uuidToAgent, + core.agentToUuid, + core.uuidToInferior, + core.uuidToParent, + core.completions, + core.nextActivation, + core.allFlexOptions, + uuidToAgent.keys.map(uuid => uuid -> EmAgentState()).toMap, + ) + } + + final case class EmAgentState( + private var receivedActivation: Boolean = false, + private val awaitedFlexOptions: mutable.Set[UUID] = mutable.Set.empty, + private var awaitedSetPoint: Boolean = false, + private var waitingForInternal: Boolean = false, + private var waitingForRelease: Boolean = false, + ) { + def setReceivedRequest(): Unit = { + receivedActivation = true + waitingForInternal = true + awaitedSetPoint = true + } + + def setWaitingForRelease(): Unit = { + waitingForRelease = true + } + + def setReceivedRelease(): Unit = { + receivedActivation = true + waitingForInternal = false + waitingForRelease = false + } + + def addSendRequest(request: UUID): Unit = { + awaitedFlexOptions.add(request) + } + + def addSendRequests(requests: Seq[UUID]): Unit = { + awaitedFlexOptions.addAll(requests) + } + + def handleReceivedFlexOption(flexOption: UUID): Unit = { + awaitedFlexOptions.remove(flexOption) + waitingForInternal = false + + if awaitedFlexOptions.isEmpty then { + waitingForInternal = true + } + } + + def handleReceivedFlexOptions(flexOptions: Seq[UUID]): Unit = { + flexOptions.foreach(awaitedFlexOptions.remove) + waitingForInternal = false + + if awaitedFlexOptions.isEmpty then { + waitingForInternal = true + } + } + + def setReceivedSetPoint(): Unit = { + awaitedSetPoint = false + waitingForInternal = true + } + + def setWaitingForInternal(value: Boolean): Unit = { + waitingForInternal = value + } + + def getAwaited: Set[UUID] = awaitedFlexOptions.toSet + + def isWaitingForActivation: Boolean = !receivedActivation + + def isWaitingForExtern: Boolean = + (awaitedFlexOptions.nonEmpty || awaitedSetPoint) && !waitingForInternal + + def isWaitingForSetPoint: Boolean = awaitedSetPoint + + def isWaitingForRelease: Boolean = waitingForRelease + + def isWaitingForInternal: Boolean = waitingForInternal + + def isActivated: Boolean = receivedActivation + + def clear(): Unit = { + receivedActivation = false + awaitedFlexOptions.clear + awaitedSetPoint = false + waitingForInternal = false + } + } +} + +case class EmCommunicationCore( + override val mode: EmMode, + override val lastFinishedTick: Long = PRE_INIT_TICK, + override val uuidToAgent: Map[UUID, ActorRef[Message]] = Map.empty, + override val agentToUuid: Map[ + ActorRef[FlexRequest] | ActorRef[FlexResponse], + UUID, + ] = Map.empty, + override val uuidToInferior: Map[UUID, Set[UUID]] = Map.empty, + override val uuidToParent: Map[UUID, UUID] = Map.empty, + override val completions: ReceiveDataMap[UUID, FlexCompletion] = + ReceiveDataMap.empty, + override val nextActivation: Map[UUID, Long] = Map.empty, + override val allFlexOptions: Map[UUID, FlexOptions] = Map.empty, + emStates: Map[UUID, EmAgentState] = Map.empty, + disaggregated: Map[UUID, Boolean] = Map.empty, + requestedFlexType: Map[UUID, FlexType] = Map.empty, + currentSetPoint: Map[UUID, Power] = Map.empty, + activatedAgents: Set[UUID] = Set.empty, + expectDataFrom: ReceiveMultiDataMap[UUID, EmData] = + ReceiveMultiDataMap.empty, +) extends EmServiceCore { + + override def handleExtMessage(tick: Long, extMsg: EmDataMessageFromExt)(using + log: Logger + ): (EmServiceCore, Option[EmDataResponseMessageToExt]) = extMsg match { + case requestEmCompletion: RequestEmCompletion => + // finish tick and return next tick + val extTick = requestEmCompletion.tick + + if extTick != tick then { + throw new CriticalFailureException( + s"Received completion request for tick '$extTick', while being in tick '$tick'." + ) + } else { + log.info(s"Request to finish for tick '$tick' received.") + + // deactivate agents by sending an IssueNoControl message + // activatedAgents.map(uuidToAgent).foreach(_ ! IssueNoControl(tick)) + + val nextTick: java.util.Optional[java.lang.Long] = + if activatedAgents.nonEmpty then { + requestEmCompletion.maybeNextTick + } else getMaybeNextTick.map(long2Long).toJava + + ( + copy(lastFinishedTick = tick), + Some(new EmCompletion(nextTick)), + ) + } + + case provideEmData: ProvideEmData => + log.warn(s"Handling ext message: $provideEmData") + val extTick = provideEmData.tick + + // handling of requests + val (flexRelease, flexRequest) = + provideEmData.flexRequests.asScala.partition { case (_, request) => + request.releaseControl() + } + + flexRelease.keys.foreach { uuid => + log.warn(s"Release control for: $uuid") + + val inferior = uuidToInferior(uuid) + + uuidToAgent.get(uuid).foreach { agent => + // update the em states of the inferior + inferior.flatMap(emStates.get).foreach(_.setWaitingForRelease()) + + agent ! IssueNoControl(tick) + } + } + + val requestMapping = flexRequest.keys.flatMap { uuid => + if emStates(uuid).isWaitingForActivation then { + + uuidToAgent.get(uuid).map { agent => + // update the em state + emStates(uuid).setReceivedRequest() + + agent ! FlexShiftActivation( + tick, + requestedFlexType.getOrElse(uuid, FlexType.PowerLimit), + ) + + log.warn(s"Inferior: ${uuidToInferior.get(uuid)}") + + val count = max( + Try { + uuidToInferior(uuid).count { id => + nextActivation(id) <= tick + } + }.getOrElse(1), + 1, + ) + + // uuid -> number of sent flex requests + uuid -> count + } + } else None + }.toMap + + val updatedDisaggregated = disaggregated ++ flexRequest.map { + case (uuid, request) => uuid -> request.disaggregated.booleanValue + }.toMap + + // handling of set points + val setPointMapping = provideEmData + .setPoints() + .asScala + .flatMap { case (receiver, setPoint) => + val agent = uuidToAgent(receiver) + log.warn(s"Receiver of set point: $agent") + + // updates the em state + emStates(receiver).setReceivedSetPoint() + + setPoint.power.toScala.flatMap( + _.getP.toScala.map(_.toSquants) + ) match { + case Some(power) => + agent ! IssuePowerControl(extTick, power) + + case None => + agent ! IssueNoControl(extTick) + } + + val count = Try { + uuidToInferior(receiver).count { id => emStates(id).isActivated } + }.getOrElse(0) + + // sender -> number of set points to send + Some(receiver -> count) + } + .toMap + + /* update internal state */ + val mapping = requestMapping ++ setPointMapping + + val updatedExpectDataFrom = expectDataFrom.addExpectedKeys(mapping) + log.warn(s"ExpectDataFrom: $updatedExpectDataFrom") + + // check if we need to wait for internal answers + val msgToExt = getMsgToExtOption + + // update state data + val newState = copy( + disaggregated = updatedDisaggregated, + expectDataFrom = updatedExpectDataFrom, + completions = completions.addExpectedKeys(mapping.keySet), + ) + + log.warn(s"EmStates: ${newState.emStates}") + log.warn(s"Message to ext: $msgToExt") + + (newState, msgToExt) + + case comMsg: EmCommunicationMessages => + log.warn(s"Handling ext message: $comMsg") + + val messages = comMsg.messages.asScala + val extTick = comMsg.tick + + val mapping = messages.flatMap { msg => + val receiver = msg.receiver + val sender = msg.sender + + msg.content match { + case _: FlexOptionRequest => + uuidToAgent.get(receiver) match { + case Some(agent) => + // update the em state + emStates(receiver).setReceivedRequest() + + agent ! FlexShiftActivation( + tick, + requestedFlexType.getOrElse(receiver, FlexType.PowerLimit), + ) + + val count = max( + Try { + uuidToInferior(receiver).count { id => + nextActivation(id) <= tick + } + }.getOrElse(1), + 1, + ) + + // uuid -> number of sent flex requests + Some(receiver -> count) + + case None => + log.warn(s"Cannot send flex request to receiver '$receiver'.") + None + } + + case flexOption: FlexOptions => + val agent = uuidToAgent(receiver) + val emState = emStates(receiver) + + // update the em state + emState.handleReceivedFlexOption(sender) + + flexOption match { + case options: em.PowerLimitFlexOptions => + // send flex options to agent + agent ! ProvideFlexOptions( + sender, + PowerLimitFlexOptions( + options.pRef.toSquants, + options.pMin.toSquants, + options.pMax.toSquants, + ), + ) + case other => + log.warn(s"Cannot handle flex option: $other") + } + + // receiver -> number of received flex options + Some(receiver -> 1) + + case flexOptions: FlexOptionsResult => + val agent = uuidToAgent(receiver) + + val emState = emStates(receiver) + + // update the em state + emState.handleReceivedFlexOption(sender) + + // send flex options to agent + agent ! ProvideFlexOptions( + sender, + PowerLimitFlexOptions( + flexOptions.getpRef.toSquants, + flexOptions.getpMin.toSquants, + flexOptions.getpMax.toSquants, + ), + ) + + // receiver -> number of received flex options + Some(receiver -> 1) + + case setPoint: EmSetPoint => + val agent = uuidToAgent(receiver) + log.warn(s"Receiver of set point: $agent") + + // updates the em state + emStates(receiver).setReceivedSetPoint() + + setPoint.power.toScala.flatMap( + _.getP.toScala.map(_.toSquants) + ) match { + case Some(power) => + agent ! IssuePowerControl(extTick, power) + + case None => + agent ! IssueNoControl(extTick) + } + + val count = Try { + uuidToInferior(receiver).count { id => emStates(id).isActivated } + }.getOrElse(0) + + // sender -> number of set points to send + Some(receiver -> count) + } + }.toMap + + val updatedExpectDataFrom = expectDataFrom.addExpectedKeys(mapping) + + log.warn(s"ExpectDataFrom: $updatedExpectDataFrom, Changes: $mapping") + + // check if we need to wait for internal answers + val msgToExt = getMsgToExtOption + + // update state data + val newState = copy( + expectDataFrom = updatedExpectDataFrom, + completions = completions.addExpectedKeys(mapping.keySet), + ) + + log.warn(s"EmStates: ${newState.emStates}") + log.warn(s"Message to ext: $msgToExt") + + (newState, msgToExt) + + case other => + log.warn(s"Deprecated message received! Message: $other") + + (this, None) + } + + override def handleFlexResponse( + tick: Long, + flexResponse: FlexResponse, + receiver: Either[UUID, ActorRef[FlexResponse]], + )(using log: Logger): (EmServiceCore, Option[EmDataResponseMessageToExt]) = { + val receiverUuid = receiver match { + case Left(value) => + value + case Right(ref) => + agentToUuid(ref) + } + + flexResponse match { + case scheduleFlexActivation@ScheduleFlexActivation( + modelUuid, + _, + scheduleKey, + ) => + log.warn(s"$scheduleFlexActivation not handled!") + (this, None) + + case ProvideFlexOptions(sender, flexOptions) => + // flex option to ext + val (resultToExt, pRef) = flexOptions match { + case PowerLimitFlexOptions(ref, min, max) => + val flexOptionResult = new em.PowerLimitFlexOptions( + receiverUuid, + sender, + ref.toQuantity, + min.toQuantity, + max.toQuantity, + ) + + if disaggregated.contains(receiverUuid) then { + uuidToInferior(receiverUuid) + .flatMap(allFlexOptions.get) + .foreach { result => + val model = result match { + case options: em.EnergyBoundariesFlexOptions => + options.model + case options: em.PowerLimitFlexOptions => + options.model + } + + flexOptionResult.addDisaggregated(model, result) + } + } + + (flexOptionResult, ref) + + case other => + throw CriticalFailureException( + s"Flex option type '$other' is currently not supported!" + ) + } + + // wrap the result, if sender and receiver are not the same, since we want to use ext communication + val msg = if receiverUuid != sender then { + new EmCommunicationMessage(receiverUuid, sender, resultToExt) + } else resultToExt + + val updated = expectDataFrom.addData(sender, msg) + + if updated.isComplete || updated.hasCompleted then { + val (data, updatedExpectDataFrom) = updated.getFinished + + // should no longer wait for internal data + data.keys.foreach(emStates(_).setWaitingForInternal(false)) + log.warn(s"Updated EmStates: $emStates") + + ( + copy( + allFlexOptions = allFlexOptions.updated(sender, resultToExt), + currentSetPoint = currentSetPoint.updated(sender, pRef), + expectDataFrom = updatedExpectDataFrom, + ), + Some(new EmResultResponse(data.asJava)), + ) + } else { + ( + copy( + allFlexOptions = allFlexOptions.updated(sender, resultToExt), + currentSetPoint = currentSetPoint.updated(sender, pRef), + expectDataFrom = updated, + ), + None, + ) + } + + + case completion@FlexCompletion( + sender, + requestAtNextActivation, + requestAtTick, + ) => + // the completion can be sent directly to the receiver, since it's not used by the external communication + uuidToAgent(receiverUuid) ! completion + emStates(sender).setWaitingForInternal(false) + + val updatedData = completions.addData(sender, completion) + + if updatedData.isComplete then { + emStates.foreach(_._2.clear()) + log.warn(s"Cleared EmStates: $emStates") + + // the next activations + val additionalActivation = updatedData.receivedData.flatMap { + case (uuid, msg) => + msg.requestAtTick.map(uuid -> _) + } + + ( + copy( + lastFinishedTick = tick, + completions = ReceiveDataMap.empty, + requestedFlexType = Map.empty, + allFlexOptions = Map.empty, + currentSetPoint = Map.empty, + activatedAgents = Set.empty, + expectDataFrom = ReceiveMultiDataMap.empty, + nextActivation = nextActivation ++ additionalActivation, + ), + Some(new EmCompletion(getMaybeNextTick.map(long2Long).toJava)), + ) + } else { + val msgToExt = getMsgToExtOption + log.warn(s"Not finished! Expected: ${updatedData.getExpectedKeys}") + log.warn(s"EmStates: $emStates") + log.warn(s"Message to ext: $msgToExt") + + (copy(completions = updatedData), msgToExt) + } + + + // not supported + case other => + log.warn(s"Flex response $other is not supported!") + + (this, None) + } + } + + override def handleFlexRequest( + flexRequest: FlexRequest, + receiver: ActorRef[FlexRequest], + )(using log: Logger): (EmServiceCore, Option[EmDataResponseMessageToExt]) = { + val receiverUuid = agentToUuid(receiver) // the controlled em + val sender = uuidToParent(receiverUuid) // the controlling em + + val updated = flexRequest match { + case FlexActivation(tick, flexType) => + // update the em state => waiting for external flex option provision + emStates(sender).addSendRequest(receiverUuid) + + // send request to ext + expectDataFrom.addData( + sender, + new EmCommunicationMessage( + receiverUuid, + sender, + new FlexOptionRequest( + receiverUuid, + disaggregated.getOrElse(sender, false), + ), + ), + ) + + case control: IssueFlexControl => + val state = emStates(receiverUuid) + + if state.isWaitingForRelease then { + // we are waiting for release, therefore, we are not sending data to ext + + state.setReceivedRelease() + receiver ! control + + // since we don't expect data, we simply return this store + expectDataFrom + + } else { + state.setWaitingForInternal(false) + + // send set point to ext + log.warn( + s"Receiver $receiverUuid got flex control message from $sender" + ) + + val power = control match { + case IssueNoControl(tick) => + log.warn(s"Set points: $currentSetPoint") + new PValue(currentSetPoint(receiverUuid).toQuantity) + + case IssuePowerControl(tick, setPower) => + new PValue(setPower.toQuantity) + + case other => + throw new CriticalFailureException( + s"Flex control $other is not supported!" + ) + } + + expectDataFrom.addData( + sender, + new EmCommunicationMessage( + receiverUuid, + sender, + new EmSetPoint(receiverUuid, power), + ), + ) + } + + case other => + log.warn(s"$other is not supported!") + expectDataFrom + } + + if updated.isComplete then { + val data = updated.receivedData + + // should no longer wait for internal data + data.keys.foreach { uuid => emStates(uuid).setWaitingForInternal(false) } + log.warn(s"Updated EmStates: $emStates") + + ( + copy(expectDataFrom = ReceiveMultiDataMap.empty), + Some(new EmResultResponse(data.asJava)), + ) + } else { + val msgToExt = getMsgToExtOption + log.warn(s"Not finished! Expected: ${updated.getExpectedKeys}") + log.warn(s"EmStates: $emStates") + log.warn(s"Message to ext: $msgToExt") + + (copy(expectDataFrom = updated), msgToExt) + } + } + + private def getMsgToExtOption(using + log: Logger + ): Option[EmDataResponseMessageToExt] = { + if emStates.exists(_._2.isWaitingForInternal) then { + None + } else { + val awaited = emStates.filter((_, x) => x.isWaitingForExtern).map { + case (uuid, state) => uuid -> state.getAwaited + } + + log.info(s"Waiting for external data: $awaited") + + if awaited.isEmpty then None + else Some(new EmResultResponse(Map.empty.asJava)) + } + } +} diff --git a/src/main/scala/edu/ie3/simona/service/em/EmServiceBaseCore.scala b/src/main/scala/edu/ie3/simona/service/em/EmServiceBaseCore.scala index b1db55d428..775936cc2b 100644 --- a/src/main/scala/edu/ie3/simona/service/em/EmServiceBaseCore.scala +++ b/src/main/scala/edu/ie3/simona/service/em/EmServiceBaseCore.scala @@ -7,45 +7,48 @@ package edu.ie3.simona.service.em import edu.ie3.simona.agent.em.EmAgent -import edu.ie3.simona.api.data.model.em.ExtendedFlexOptionsResult +import edu.ie3.simona.api.data.connection.ExtEmDataConnection.EmMode +import edu.ie3.simona.api.data.model.em +import edu.ie3.simona.api.data.model.em.{EmSetPoint, FlexOptions} import edu.ie3.simona.api.ontology.em.* import edu.ie3.simona.exceptions.CriticalFailureException -import edu.ie3.simona.ontology.messages.ServiceMessage.EmServiceRegistration import edu.ie3.simona.ontology.messages.flex.FlexType.PowerLimit import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.* import edu.ie3.simona.ontology.messages.flex.PowerLimitFlexOptions +import edu.ie3.simona.util.CollectionUtils.asJava import edu.ie3.simona.util.ReceiveDataMap import edu.ie3.simona.util.SimonaConstants.PRE_INIT_TICK -import edu.ie3.simona.util.TickUtil.TickLong import org.apache.pekko.actor.typed.ActorRef import org.slf4j.Logger -import java.time.ZonedDateTime import java.util.UUID -import scala.jdk.CollectionConverters.{ - ListHasAsScala, - MapHasAsJava, - MapHasAsScala, - SetHasAsScala, -} +import scala.jdk.CollectionConverters.MapHasAsScala /** Basic service core for an [[ExtEmDataService]]. + * @param mode + * The em mode of the data connection. * @param lastFinishedTick * The last tick that was completed. * @param uuidToAgent * Map: uuid to em agent reference. - * @param flexOptions - * ReceiveDataMap: uuid to flex option result. - * @param allFlexOptions - * Map: uuid to flex option result. - * @param completions - * ReceiveDataMap: uuid to completions. - * @param structure + * @param agentToUuid + * Map: em agent reference to uuid. + * @param uuidToInferior * A map that contains information about uuids of inferior em agents. This * information is used to determine the disaggregated flex options. + * @param uuidToParent + * A map: uuid to parent uuid. + * @param completions + * ReceiveDataMap: uuid to completions. + * @param nextActivation + * A map: uuid to next activation tick. + * @param allFlexOptions + * Map: uuid to flex option result. + * @param flexOptions + * ReceiveDataMap: uuid to flex option result. * @param disaggregated * A map: uuid of em agent to boolean. It defines for which em agent we - * should return disaggregated flex optios. + * should return disaggregated flex options. * @param sendOptionsToExt * True, if flex options should be sent to the external simulation. * @param canHandleSetPoints @@ -56,83 +59,33 @@ import scala.jdk.CollectionConverters.{ * Option for em set points that needs to be handled at a later time. */ final case class EmServiceBaseCore( + override val mode: EmMode, override val lastFinishedTick: Long = PRE_INIT_TICK, override val uuidToAgent: Map[UUID, ActorRef[EmAgent.Message]] = Map.empty, - flexOptions: ReceiveDataMap[UUID, ExtendedFlexOptionsResult] = - ReceiveDataMap.empty, - override val allFlexOptions: Map[UUID, ExtendedFlexOptionsResult] = - Map.empty, + override val agentToUuid: Map[ + ActorRef[FlexRequest] | ActorRef[FlexResponse], + UUID, + ] = Map.empty, + override val uuidToInferior: Map[UUID, Set[UUID]] = Map.empty, + override val uuidToParent: Map[UUID, UUID] = Map.empty, override val completions: ReceiveDataMap[UUID, FlexCompletion] = ReceiveDataMap.empty, - structure: Map[UUID, Set[UUID]] = Map.empty, + override val nextActivation: Map[UUID, Long] = Map.empty, + override val allFlexOptions: Map[UUID, FlexOptions] = Map.empty, + flexOptions: ReceiveDataMap[UUID, FlexOptions] = ReceiveDataMap.empty, disaggregated: Map[UUID, Boolean] = Map.empty, sendOptionsToExt: Boolean = false, canHandleSetPoints: Boolean = false, - setPointOption: Option[ProvideEmSetPointData] = None, + setPointOption: Option[Map[UUID, EmSetPoint]] = None, ) extends EmServiceCore { - override def handleRegistration( - emServiceRegistration: EmServiceRegistration - ): EmServiceBaseCore = { - val ref = emServiceRegistration.requestingActor - val modelUuid = emServiceRegistration.inputUuid - val parentUuid = emServiceRegistration.parentUuid - - val updatedStructure = parentUuid match { - case Some(parent) => - structure.get(parent) match { - case Some(subEms) => - val allSubEms = subEms + modelUuid - structure ++ Map(parent -> allSubEms) - case None => - structure ++ Map(parent -> Set(modelUuid)) - } - - case _ => - // since the given em agent has no parent, no changes to the parent structure are needed - // the actual em agent is added to the structure later - structure - } - - copy( - uuidToAgent = uuidToAgent + (modelUuid -> ref), - completions = completions.addExpectedKeys(Set(modelUuid)), - structure = updatedStructure ++ Map(modelUuid -> Set.empty[UUID]), - ) - } - override def handleExtMessage(tick: Long, extMsg: EmDataMessageFromExt)(using log: Logger ): (EmServiceBaseCore, Option[EmDataResponseMessageToExt]) = extMsg match { - case requestEmFlexResults: RequestEmFlexResults => - val tick = requestEmFlexResults.tick - val emEntities = requestEmFlexResults.emEntities.asScala - val disaggregatedFlex = requestEmFlexResults.disaggregated - - val requests = emEntities.flatMap { entity => - uuidToAgent.get(entity).map { ref => - ref ! FlexActivation(tick, PowerLimit) - - entity -> disaggregatedFlex - } - }.toMap - - ( - copy( - flexOptions = ReceiveDataMap(emEntities.toSet), - disaggregated = disaggregated ++ requests, - sendOptionsToExt = true, - ), - None, - ) - case provideEmData: ProvideEmData => - if !provideEmData.flexOptions.isEmpty || !provideEmData - .setPoints() - .isEmpty - then { + if !provideEmData.flexOptions.isEmpty then { log.warn( - s"We received the following data '$provideEmData'. The base service can currently not handle the provided flex options and set points." + s"We received the following data '$provideEmData'. The base service can currently not handle the provided flex options." ) } @@ -146,43 +99,45 @@ final case class EmServiceBaseCore( } }.toMap - ( - copy( - flexOptions = ReceiveDataMap(flexRequests.keySet), - disaggregated = disaggregated ++ flexRequests, - sendOptionsToExt = true, - ), - None, + val updatedState = copy( + flexOptions = ReceiveDataMap(flexRequests.keySet), + disaggregated = disaggregated ++ flexRequests, + sendOptionsToExt = true, ) - case provideEmSetPoints: ProvideEmSetPointData => - if canHandleSetPoints then { - handleSetPoint(tick, provideEmSetPoints, log) + // handle set points + val setPoints = provideEmData.setPoints().asScala.toMap - (this, None) - } else { - val tick = provideEmSetPoints.tick - val emEntities = provideEmSetPoints.emSetPoints.keySet.asScala - - emEntities.foreach { entity => - uuidToAgent.get(entity) match { - case Some(ref) => - // activate the necessary em agent, this is needed, because an em agent needs to know - // its current flex option to properly handle the given set point - ref ! FlexActivation(tick, PowerLimit) - case None => - log.warn(s"Received entity: $entity") + if setPoints.nonEmpty then { + + if canHandleSetPoints then { + handleSetPoint(tick, setPoints, log) + + (updatedState, None) + } else { + val entities = setPoints.keySet + + entities.foreach { entity => + uuidToAgent.get(entity) match { + case Some(ref) => + // activate the necessary em agent, this is needed, because an em agent needs to know + // its current flex option to properly handle the given set point + ref ! FlexActivation(tick, PowerLimit) + case None => + log.warn(s"Received entity: $entity") + } } + + ( + updatedState.copy( + flexOptions = updatedState.flexOptions.addExpectedKeys(entities), + setPointOption = Some(setPoints), + ), + None, + ) } - ( - copy( - flexOptions = ReceiveDataMap(emEntities.toSet), - setPointOption = Some(provideEmSetPoints), - ), - None, - ) - } + } else (updatedState, None) case _ => throw new CriticalFailureException( @@ -195,18 +150,25 @@ final case class EmServiceBaseCore( flexResponse: FlexResponse, receiver: Either[UUID, ActorRef[FlexResponse]], )(using - startTime: ZonedDateTime, - log: Logger, + log: Logger ): (EmServiceBaseCore, Option[EmDataResponseMessageToExt]) = { - receiver.foreach(_ ! flexResponse) + + val receiverUuid = receiver match { + case Right(ref) => + ref ! flexResponse + agentToUuid(ref) + case Left(uuid) => + uuid + } flexResponse match { case provideFlexOptions: ProvideFlexOptions => val (updated, updatedAdditional) = - handleFlexOptions(tick, provideFlexOptions) + handleFlexOptions(tick, receiverUuid, provideFlexOptions) if updated.isComplete then { // we received all flex options + val data = updated.receivedData data.foreach { case (uuid, flexOption) => @@ -214,7 +176,7 @@ final case class EmServiceBaseCore( // we add the disaggregated flex options addDisaggregatingFlexOptions( flexOption, - structure.getOrElse(uuid, Set.empty), + uuidToInferior.getOrElse(uuid, Set.empty), ) } } @@ -226,8 +188,12 @@ final case class EmServiceBaseCore( ) if sendOptionsToExt then { + val dataToSend = data.map { case (uuid, option) => + uuid -> List(option) + } + // we have received an option request, that will now be answered - (updatedCore, Some(new FlexOptionsResponse(data.asJava))) + (updatedCore, Some(new FlexOptionsResponse(dataToSend.asJava))) } else { setPointOption match { @@ -243,6 +209,8 @@ final case class EmServiceBaseCore( } } else { + log.warn(s"Missing flex options for: ${updated.getExpectedKeys}") + ( copy( flexOptions = updated, @@ -253,23 +221,46 @@ final case class EmServiceBaseCore( } case completion: FlexCompletion => - val (updated, extMsgOption, finished) = + val (updated, extMsgOption, nextTick, finished) = handleCompletion(tick, completion) if finished then { + // the next activations + val updatedNextActivation = + nextActivation ++ updated.receivedData.flatMap { case (uuid, msg) => + msg.requestAtTick.map(uuid -> _) + } + + val expectedCompletions = nextTick match { + case Some(t) => + val keys = updatedNextActivation.filter { case (_, activation) => + activation == t + }.keySet + log.warn(s"Keys: $keys") + ReceiveDataMap[UUID, FlexCompletion](keys) + case None => + updated + } + + log.warn(s"$updated") + ( copy( lastFinishedTick = tick, - completions = updated, - allFlexOptions = Map.empty, + completions = expectedCompletions, disaggregated = Map.empty, sendOptionsToExt = false, canHandleSetPoints = false, + nextActivation = updatedNextActivation, ), extMsgOption, ) - } else (copy(completions = updated), extMsgOption) + } else { + log.warn(s"$updated") + + (copy(completions = updated), extMsgOption) + } case _ => (this, None) @@ -280,8 +271,7 @@ final case class EmServiceBaseCore( flexRequest: FlexRequest, receiver: ActorRef[FlexRequest], )(using - startTime: ZonedDateTime, - log: Logger, + log: Logger ): (EmServiceBaseCore, Option[EmDataResponseMessageToExt]) = { log.debug(s"$receiver: $flexRequest") receiver ! flexRequest @@ -292,26 +282,27 @@ final case class EmServiceBaseCore( /** Method to handle flex options. * @param tick * Current tick of the service. + * @param receiver + * The receiver of the flex options. * @param provideFlexOptions * The provided flex options. - * @param startTime - * The start time of the simulation. * @return * An updated service core and a map: uuid to flex options */ private def handleFlexOptions( tick: Long, + receiver: UUID, provideFlexOptions: ProvideFlexOptions, - )(using startTime: ZonedDateTime): ( - ReceiveDataMap[UUID, ExtendedFlexOptionsResult], - Map[UUID, ExtendedFlexOptionsResult], + ): ( + ReceiveDataMap[UUID, FlexOptions], + Map[UUID, FlexOptions], ) = provideFlexOptions match { case ProvideFlexOptions( modelUuid: UUID, PowerLimitFlexOptions(ref, min, max), ) => - val result = new ExtendedFlexOptionsResult( - tick.toDateTime, + val result = new em.PowerLimitFlexOptions( + receiver, modelUuid, min.toQuantity, ref.toQuantity, @@ -319,11 +310,15 @@ final case class EmServiceBaseCore( ) if flexOptions.expects(modelUuid) then { + println(s"Received expected: $modelUuid") + ( flexOptions.addData(modelUuid, result), - allFlexOptions, + allFlexOptions.updated(modelUuid, result), ) } else { + println(s"Received unexpected: $modelUuid") + ( flexOptions, allFlexOptions.updated(modelUuid, result), @@ -338,5 +333,15 @@ final case class EmServiceBaseCore( object EmServiceBaseCore { - def empty: EmServiceBaseCore = EmServiceBaseCore() + def apply(core: EmServiceCore): EmServiceBaseCore = EmServiceBaseCore( + core.mode, + core.lastFinishedTick, + core.uuidToAgent, + core.agentToUuid, + core.uuidToInferior, + core.uuidToParent, + core.completions, + core.nextActivation, + core.allFlexOptions, + ) } diff --git a/src/main/scala/edu/ie3/simona/service/em/EmServiceCore.scala b/src/main/scala/edu/ie3/simona/service/em/EmServiceCore.scala index 4c5752f24b..bb14b8d122 100644 --- a/src/main/scala/edu/ie3/simona/service/em/EmServiceCore.scala +++ b/src/main/scala/edu/ie3/simona/service/em/EmServiceCore.scala @@ -6,10 +6,10 @@ package edu.ie3.simona.service.em -import edu.ie3.datamodel.models.result.system.FlexOptionsResult import edu.ie3.datamodel.models.value.{PValue, SValue} import edu.ie3.simona.agent.em.EmAgent -import edu.ie3.simona.api.data.model.em.ExtendedFlexOptionsResult +import edu.ie3.simona.api.data.connection.ExtEmDataConnection.EmMode +import edu.ie3.simona.api.data.model.em.{EmSetPoint, FlexOptions} import edu.ie3.simona.api.ontology.em.* import edu.ie3.simona.ontology.messages.ServiceMessage.{ EmFlexMessage, @@ -29,13 +29,14 @@ import tech.units.indriya.ComparableQuantity import java.time.ZonedDateTime import java.util.UUID import javax.measure.quantity.Power as PsdmPower -import scala.jdk.CollectionConverters.MapHasAsScala import scala.jdk.OptionConverters.{RichOption, RichOptional} /** Trait for all em service cores. */ trait EmServiceCore { + val mode: EmMode + /** The last tick that was completed. */ val lastFinishedTick: Long @@ -44,29 +45,34 @@ trait EmServiceCore { */ val uuidToAgent: Map[UUID, ActorRef[EmAgent.Message]] + val agentToUuid: Map[ActorRef[FlexRequest] | ActorRef[FlexResponse], UUID] + + val uuidToInferior: Map[UUID, Set[UUID]] + + val uuidToParent: Map[UUID, UUID] + /** Map: uuid to flex option result. */ - val allFlexOptions: Map[UUID, FlexOptionsResult] + val allFlexOptions: Map[UUID, FlexOptions] /** ReceiveDataMap: uuid to completions. */ val completions: ReceiveDataMap[UUID, FlexCompletion] + val nextActivation: Map[UUID, Long] + /** Extension to convert a squants power value to a psdm power value. */ extension (value: Power) { def toQuantity: ComparableQuantity[PsdmPower] = value.toMegawatts.asMegaWatt } - /** Method to handle a registration message. - * @param emServiceRegistration - * The registration to handle. - * @return - * An updated service core. - */ - def handleRegistration( - emServiceRegistration: EmServiceRegistration - ): EmServiceCore + def toInternal: InternalCore = InternalCore(this) + + def toExternal: EmServiceCore = mode match { + case EmMode.BASE => EmServiceBaseCore(this) + case EmMode.EM_COMMUNICATION => EmCommunicationCore(this) + } /** Method to handle the received message from the external simulation. * @param tick @@ -145,43 +151,42 @@ trait EmServiceCore { /** Method to handle the set points provided by the external simulation. * @param tick * Current tick of the service. - * @param provideEmSetPoints + * @param setPoints * The set points to handle. * @param log * Logger for logging messages. */ final def handleSetPoint( tick: Long, - provideEmSetPoints: ProvideEmSetPointData, + setPoints: Map[UUID, EmSetPoint], log: Logger, ): Unit = { - log.info(s"Handling of: $provideEmSetPoints") - - provideEmSetPoints.emSetPoints.asScala - .foreach { case (agent, setPoint) => - uuidToAgent.get(agent) match { - case Some(receiver) => - val (pOption, qOption) = setPoint.power.toScala match { - case Some(sValue: SValue) => - (sValue.getP.toScala, sValue.getQ.toScala) - case Some(pValue: PValue) => - (pValue.getP.toScala, None) - case None => - (None, None) - } - - (pOption, qOption) match { - case (Some(activePower), _) => - receiver ! IssuePowerControl(tick, activePower.toSquants) - - case (None, _) => - receiver ! IssueNoControl(tick) - } - - case None => - log.warn(s"No em agent with uuid '$agent' registered!") - } + log.info(s"Handling of set points: $setPoints") + + setPoints.foreach { case (agent, setPoint) => + uuidToAgent.get(agent) match { + case Some(receiver) => + val (pOption, qOption) = setPoint.power.toScala match { + case Some(sValue: SValue) => + (sValue.getP.toScala, sValue.getQ.toScala) + case Some(pValue: PValue) => + (pValue.getP.toScala, None) + case None => + (None, None) + } + + (pOption, qOption) match { + case (Some(activePower), _) => + receiver ! IssuePowerControl(tick, activePower.toSquants) + + case (None, _) => + receiver ! IssueNoControl(tick) + } + + case None => + log.warn(s"No em agent with uuid '$agent' registered!") } + } } /** Method to handle flex responses from the em agents. @@ -191,8 +196,6 @@ trait EmServiceCore { * From the agent to handle. * @param receiver * The receiver of the agent. - * @param startTime - * The start time of the simulation. * @param log * Logger for logging messages. * @return @@ -203,18 +206,13 @@ trait EmServiceCore { tick: Long, flexResponse: FlexResponse, receiver: Either[UUID, ActorRef[FlexResponse]], - )(using - startTime: ZonedDateTime, - log: Logger, - ): (EmServiceCore, Option[EmDataResponseMessageToExt]) + )(using log: Logger): (EmServiceCore, Option[EmDataResponseMessageToExt]) /** Method to handle flex requests to the em agents. * @param flexRequest * That is sent to an agents. * @param receiver * Of the flex request. - * @param startTime - * The start time of the simulation. * @param log * Logger for logging messages. * @return @@ -224,10 +222,7 @@ trait EmServiceCore { def handleFlexRequest( flexRequest: FlexRequest, receiver: ActorRef[FlexRequest], - )(using - startTime: ZonedDateTime, - log: Logger, - ): (EmServiceCore, Option[EmDataResponseMessageToExt]) + )(using log: Logger): (EmServiceCore, Option[EmDataResponseMessageToExt]) /** Method to add disaggregated flex options to given data. * @param flexOption @@ -236,7 +231,7 @@ trait EmServiceCore { * To derive the needed disaggregated data. */ final def addDisaggregatingFlexOptions( - flexOption: ExtendedFlexOptionsResult, + flexOption: FlexOptions, inferiorAgents: Set[UUID], ): Unit = { inferiorAgents.foreach { inferior => @@ -257,6 +252,7 @@ trait EmServiceCore { final def handleCompletion(tick: Long, completion: FlexCompletion): ( ReceiveDataMap[UUID, FlexCompletion], Option[EmDataResponseMessageToExt], + Option[Long], Boolean, ) = { val updated = completions.addData(completion.modelUuid, completion) @@ -264,27 +260,25 @@ trait EmServiceCore { if updated.isComplete then { val allKeys = updated.receivedData.keySet - val extMsgOption = if tick != INIT_SIM_TICK then { + val (extMsgOption, nextTickOption) = if tick != INIT_SIM_TICK then { // send completion message to external simulation, if we aren't in the INIT_SIM_TICK - Some(new EmCompletion(getMaybeNextTick)) - } else None + val option = getMaybeNextTick + + (Some(new EmCompletion(option.map(long2Long).toJava)), option) + } else (None, None) // every em agent has sent a completion message - (ReceiveDataMap(allKeys), extMsgOption, true) + (updated, extMsgOption, nextTickOption, true) - } else (updated, None, false) + } else (updated, None, None, false) } /** Method to calculate the next tick option. * @return * An option for the next activation tick. */ - private final def getMaybeNextTick: java.util.Optional[java.lang.Long] = - completions.receivedData - .flatMap { case (_, completion) => - completion.requestAtTick - } - .minOption - .map(long2Long) - .toJava + final def getMaybeNextTick: Option[Long] = + completions.receivedData.flatMap { case (_, completion) => + completion.requestAtTick + }.minOption } diff --git a/src/main/scala/edu/ie3/simona/service/em/ExtEmDataService.scala b/src/main/scala/edu/ie3/simona/service/em/ExtEmDataService.scala index ac077ad4f3..c7d1f674e0 100644 --- a/src/main/scala/edu/ie3/simona/service/em/ExtEmDataService.scala +++ b/src/main/scala/edu/ie3/simona/service/em/ExtEmDataService.scala @@ -22,7 +22,10 @@ import edu.ie3.simona.service.ServiceStateData.{ ServiceBaseStateData, } import edu.ie3.simona.service.{ExtDataSupport, SimonaService} -import edu.ie3.simona.util.SimonaConstants.INIT_SIM_TICK +import edu.ie3.simona.util.SimonaConstants.{ + FIRST_TICK_IN_SIMULATION, + INIT_SIM_TICK, +} import org.apache.pekko.actor.typed.ActorRef import org.apache.pekko.actor.typed.scaladsl.{ActorContext, Behaviors} import org.slf4j.{Logger, LoggerFactory} @@ -95,6 +98,7 @@ object ExtEmDataService extends SimonaService with ExtDataSupport { startTime: ZonedDateTime, serviceCore: EmServiceCore, tick: Long = INIT_SIM_TICK, + simulateUntil: Long = FIRST_TICK_IN_SIMULATION, extEmDataMessage: Option[EmDataMessageFromExt] = None, ) extends ServiceBaseStateData @@ -134,10 +138,7 @@ object ExtEmDataService extends SimonaService with ExtDataSupport { )(using log: Logger): Try[(ExtEmDataStateData, Option[Long])] = initServiceData match { case InitExtEmData(extEmDataConnection, startTime) => - val serviceCore = extEmDataConnection.mode match { - case EmMode.BASE => - EmServiceBaseCore.empty - } + val serviceCore = InternalCore(extEmDataConnection.mode) val emDataInitializedStateData = ExtEmDataStateData(extEmDataConnection, startTime, serviceCore) @@ -163,8 +164,8 @@ object ExtEmDataService extends SimonaService with ExtDataSupport { ): Try[ExtEmDataStateData] = registrationMessage match { case emServiceRegistration: EmServiceRegistration => - val updatedCore = - serviceStateData.serviceCore.handleRegistration(emServiceRegistration) + val updatedCore = serviceStateData.serviceCore.toInternal + .handleRegistration(emServiceRegistration) if emServiceRegistration.parentEm.isEmpty then { emServiceRegistration.requestingActor ! FlexActivation( @@ -208,27 +209,59 @@ object ExtEmDataService extends SimonaService with ExtDataSupport { (updatedStateData, Some(tick)) } else { - val extMsg = serviceStateData.extEmDataMessage.getOrElse( - throw ServiceException( - "ExtEmDataService was triggered without ExtEmDataMessage available" - ) - ) + log.warn(s"Tick ($tick): ServiceCore -> ${serviceStateData.serviceCore.getClass}, msg -> ${serviceStateData.extEmDataMessage}") - val (updatedCore, msgToExt) = - serviceStateData.serviceCore.handleExtMessage(tick, extMsg)(using - ctx.log - ) + val ((updatedCore, msgToExt), until) = ( + serviceStateData.extEmDataMessage, + serviceStateData.serviceCore, + ) match { + case (Some(simulationUntil: EmSimulationUntil), core) => + ((core.toInternal, None), Some(simulationUntil.tick)) - msgToExt.foreach(serviceStateData.extEmDataConnection.queueExtResponseMsg) + case (Some(extMsg), core: EmCommunicationCore) => + (core.handleExtMessage(tick, extMsg), None) - ( - serviceStateData.copy( - tick = tick, - serviceCore = updatedCore, - extEmDataMessage = None, - ), - None, - ) + case (Some(extMsg), core: EmServiceBaseCore) => + (core.handleExtMessage(tick, extMsg), None) + + case (_, core: InternalCore) if serviceStateData.simulateUntil > tick => + log.warn(s"Received external message with internal core!") + + ((core, None), Some(serviceStateData.simulateUntil)) + + case (Some(extMsg), core) => + (core.toExternal.handleExtMessage(tick, extMsg), None) + + case (None, _) => + throw ServiceException( + "ExtEmDataService was triggered without ExtEmDataMessage available" + ) + } + + until match { + case Some(lastInternalTick) => + ( + serviceStateData.copy( + tick = tick, + serviceCore = updatedCore, + simulateUntil = lastInternalTick, + extEmDataMessage = None, + ), + updatedCore.getMaybeNextTick, + ) + + case None => + msgToExt.foreach(serviceStateData.extEmDataConnection.queueExtResponseMsg) + + ( + serviceStateData.copy( + tick = tick, + serviceCore = updatedCore, + extEmDataMessage = None, + ), + None, + ) + } } } diff --git a/src/main/scala/edu/ie3/simona/service/em/InternalCore.scala b/src/main/scala/edu/ie3/simona/service/em/InternalCore.scala new file mode 100644 index 0000000000..b48c8f9f6e --- /dev/null +++ b/src/main/scala/edu/ie3/simona/service/em/InternalCore.scala @@ -0,0 +1,174 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.em + +import edu.ie3.simona.agent.em.EmAgent +import edu.ie3.simona.api.data.connection.ExtEmDataConnection.EmMode +import edu.ie3.simona.api.data.model.em.{EmSetPoint, FlexOptions} +import edu.ie3.simona.api.ontology.em.{ + EmDataMessageFromExt, + EmDataResponseMessageToExt, +} +import edu.ie3.simona.ontology.messages.ServiceMessage.EmServiceRegistration +import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage +import edu.ie3.simona.ontology.messages.flex.FlexibilityMessage.{ + FlexCompletion, + FlexRequest, + FlexResponse, + IssueNoControl, +} +import edu.ie3.simona.util.ReceiveDataMap +import edu.ie3.simona.util.SimonaConstants.PRE_INIT_TICK +import org.apache.pekko.actor.typed.ActorRef +import org.slf4j.Logger + +import java.util.UUID + +case class InternalCore( + override val mode: EmMode, + override val lastFinishedTick: Long = PRE_INIT_TICK, + override val uuidToAgent: Map[UUID, ActorRef[EmAgent.Message]] = Map.empty, + override val agentToUuid: Map[ + ActorRef[FlexRequest] | ActorRef[FlexResponse], + UUID, + ] = Map.empty, + override val uuidToInferior: Map[UUID, Set[UUID]] = Map.empty, + override val uuidToParent: Map[UUID, UUID] = Map.empty, + override val completions: ReceiveDataMap[UUID, FlexCompletion] = + ReceiveDataMap.empty, + override val nextActivation: Map[UUID, Long] = Map.empty, + override val allFlexOptions: Map[UUID, FlexOptions] = Map.empty, + disaggregated: Map[UUID, Boolean] = Map.empty, + flexOptions: ReceiveDataMap[UUID, FlexOptions] = ReceiveDataMap.empty, + sendOptionsToExt: Boolean = false, + canHandleSetPoints: Boolean = false, + setPointOption: Option[Map[UUID, EmSetPoint]] = None, +) extends EmServiceCore { + + /** Method to handle a registration message. + * + * @param emServiceRegistration + * The registration to handle. + * @return + * An updated service core. + */ + def handleRegistration( + emServiceRegistration: EmServiceRegistration + ): EmServiceCore = { + val uuid = emServiceRegistration.inputUuid + val ref = emServiceRegistration.requestingActor + + val (updatedInferior, updatedUuidToParent) = + emServiceRegistration.parentUuid match { + case Some(parent) => + val inferior = uuidToInferior.get(parent) match { + case Some(inferiorUuids) => + inferiorUuids ++ Seq(uuid) + case None => + Set(uuid) + } + + ( + uuidToInferior.updated(parent, inferior), + uuidToParent.updated(uuid, parent), + ) + case None => + (uuidToInferior, uuidToParent) + } + + copy( + uuidToAgent = uuidToAgent.updated(uuid, ref), + agentToUuid = agentToUuid.updated(ref, uuid), + uuidToInferior = updatedInferior, + uuidToParent = updatedUuidToParent, + nextActivation = nextActivation.updated(uuid, 0), + ) + } + + override def handleExtMessage(tick: Long, extMsg: EmDataMessageFromExt)(using + log: Logger + ): (EmServiceCore, Option[EmDataResponseMessageToExt]) = { + log.warn("Handling of external message not possible!") + + (this, None) + } + + override def handleFlexResponse( + tick: Long, + flexResponse: FlexResponse, + receiver: Either[UUID, ActorRef[FlexResponse]], + )(using log: Logger): (EmServiceCore, Option[EmDataResponseMessageToExt]) = { + flexResponse match { + case FlexibilityMessage.ProvideFlexOptions(modelUuid, flexOptions) => + receiver match { + case Left(uuid) => uuidToAgent(uuid) ! IssueNoControl(tick) + case Right(ref) => ref ! flexResponse + } + + (this, None) + + case FlexCompletion(modelUuid, requestAtNextActivation, requestAtTick) => + (receiver, requestAtTick) match { + case (Left(_), Some(nextTick)) => + ( + copy( + lastFinishedTick = tick, + nextActivation = nextActivation.updated(modelUuid, nextTick), + ), + None, + ) + + case (Left(_), None) => + (copy(lastFinishedTick = tick), None) + + case (Right(ref), Some(nextTick)) => + ref ! flexResponse + + ( + copy(nextActivation = + nextActivation.updated(modelUuid, nextTick) + ), + None, + ) + + case (Right(ref), None) => + ref ! flexResponse + + (this, None) + } + } + } + + override def handleFlexRequest( + flexRequest: FlexRequest, + receiver: ActorRef[FlexRequest], + )(using log: Logger): (EmServiceCore, Option[EmDataResponseMessageToExt]) = { + receiver ! flexRequest + + (this, None) + } +} + +object InternalCore { + def apply(core: EmServiceCore): InternalCore = core match { + case internal: InternalCore => + internal + case external => + InternalCore( + core.mode, + core.lastFinishedTick, + core.uuidToAgent, + core.agentToUuid, + core.uuidToInferior, + core.uuidToParent, + core.completions, + core.nextActivation, + core.allFlexOptions, + ) + } + +} diff --git a/src/main/scala/edu/ie3/simona/service/primary/ExtPrimaryDataService.scala b/src/main/scala/edu/ie3/simona/service/primary/ExtPrimaryDataService.scala new file mode 100644 index 0000000000..b3fa5a2c6a --- /dev/null +++ b/src/main/scala/edu/ie3/simona/service/primary/ExtPrimaryDataService.scala @@ -0,0 +1,244 @@ +/* + * © 2024. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.primary + +import edu.ie3.simona.agent.participant.ParticipantAgent +import edu.ie3.simona.agent.participant.ParticipantAgent.{ + DataProvision, + PrimaryRegistrationSuccessfulMessage, +} +import edu.ie3.simona.api.data.connection.ExtPrimaryDataConnection +import edu.ie3.simona.api.ontology.DataMessageFromExt +import edu.ie3.simona.api.ontology.primary.{ + PrimaryDataMessageFromExt, + ProvidePrimaryData, +} +import edu.ie3.simona.exceptions.WeatherServiceException.InvalidRegistrationRequestException +import edu.ie3.simona.exceptions.{InitializationException, ServiceException} +import edu.ie3.simona.ontology.messages.ServiceMessage.{ + PrimaryServiceRegistrationMessage, + ServiceRegistrationMessage, + ServiceResponseMessage, +} +import edu.ie3.simona.service.Data.PrimaryData +import edu.ie3.simona.service.Data.PrimaryData.RichValue +import edu.ie3.simona.service.ServiceStateData.{ + InitializeServiceStateData, + ServiceBaseStateData, +} +import edu.ie3.simona.service.{ExtDataSupport, ServiceStateData, SimonaService} +import org.apache.pekko.actor.typed.ActorRef +import org.apache.pekko.actor.typed.scaladsl.ActorContext +import org.slf4j.Logger + +import java.util.UUID +import scala.jdk.CollectionConverters.MapHasAsScala +import scala.jdk.OptionConverters.RichOptional +import scala.util.{Failure, Success, Try} + +object ExtPrimaryDataService extends SimonaService with ExtDataSupport { + + override type S = ExtPrimaryDataStateData + + final case class ExtPrimaryDataStateData( + extPrimaryData: ExtPrimaryDataConnection, + subscribers: List[UUID] = List.empty, + uuidToActorRef: Map[UUID, ActorRef[ParticipantAgent.Request]] = + Map.empty, // subscribers in SIMONA + extPrimaryDataMessage: Option[PrimaryDataMessageFromExt] = None, + maybeNextTick: Option[Long] = None, + ) extends ServiceBaseStateData + + case class InitExtPrimaryData( + extPrimaryData: ExtPrimaryDataConnection + ) extends InitializeServiceStateData + + override def init( + initServiceData: ServiceStateData.InitializeServiceStateData + )(using log: Logger): Try[(ExtPrimaryDataStateData, Option[Long])] = + initServiceData match { + case InitExtPrimaryData(extPrimaryData) => + val primaryDataInitializedStateData = ExtPrimaryDataStateData( + extPrimaryData + ) + Success( + primaryDataInitializedStateData, + None, + ) + + case invalidData => + Failure( + new InitializationException( + s"Provided init data '${invalidData.getClass.getSimpleName}' for ExtPrimaryService are invalid!" + ) + ) + } + + override protected def handleRegistrationRequest( + registrationMessage: ServiceRegistrationMessage + )(using + serviceStateData: ExtPrimaryDataStateData, + ctx: ActorContext[Message], + ): Try[ExtPrimaryDataStateData] = registrationMessage match { + case PrimaryServiceRegistrationMessage( + requestingActor, + modelUuid, + ) => + Success(handleRegistrationRequest(requestingActor, modelUuid)) + case invalidMessage => + Failure( + InvalidRegistrationRequestException( + s"A primary service provider is not able to handle registration request '$invalidMessage'." + ) + ) + } + + private def handleRegistrationRequest( + agentToBeRegistered: ActorRef[ParticipantAgent.Request], + agentUUID: UUID, + )(using + serviceStateData: ExtPrimaryDataStateData, + ctx: ActorContext[Message], + ): ExtPrimaryDataStateData = { + serviceStateData.uuidToActorRef.get(agentUUID) match { + case None => + // checks if a value class was specified for the agent + val valueClass = serviceStateData.extPrimaryData + .getValueClass(agentUUID) + .toScala + .getOrElse( + throw InvalidRegistrationRequestException( + s"A primary service provider is not able to handle registration request, because there was no value class specified for the agent with id: '$agentUUID'." + ) + ) + + agentToBeRegistered ! PrimaryRegistrationSuccessfulMessage( + ctx.self, + 0L, + PrimaryData.getPrimaryDataExtra(valueClass), + ) + ctx.log.info(s"Successful registration for $agentUUID") + + serviceStateData.copy( + subscribers = serviceStateData.subscribers :+ agentUUID, + uuidToActorRef = + serviceStateData.uuidToActorRef + (agentUUID -> agentToBeRegistered), + ) + + case Some(_) => + // actor is already registered, do nothing + ctx.log.warn( + "Sending actor {} is already registered", + agentToBeRegistered, + ) + serviceStateData + } + } + + /** Send out the information to all registered recipients + * + * @param tick + * current tick data should be announced for + * @param serviceStateData + * the current state data of this service + * @return + * the service stata data that should be used in the next state (normally + * with updated values) together with the completion message that is sent + * in response to the trigger that was sent to start this announcement + */ + override protected def announceInformation( + tick: Long + )(using + serviceStateData: ExtPrimaryDataStateData, + ctx: ActorContext[Message], + ): (ExtPrimaryDataStateData, Option[Long]) = { // We got activated for this tick, so we expect incoming primary data + serviceStateData.extPrimaryDataMessage.getOrElse( + throw ServiceException( + "ExtPrimaryDataService was triggered without ExtPrimaryDataMessage available" + ) + ) match { + case providedPrimaryData: ProvidePrimaryData => + processDataAndAnnounce(tick, providedPrimaryData) + } + } + + private def processDataAndAnnounce( + tick: Long, + primaryDataMessage: ProvidePrimaryData, + )(using + serviceStateData: ExtPrimaryDataStateData, + ctx: ActorContext[Message], + ): ( + ExtPrimaryDataStateData, + Option[Long], + ) = { + ctx.log.debug( + s"Got activation to distribute primaryData = $primaryDataMessage" + ) + val actorToPrimaryData = primaryDataMessage.primaryData.asScala.flatMap { + case (agent, primaryDataPerAgent) => + serviceStateData.uuidToActorRef + .get(agent) + .map((_, primaryDataPerAgent)) + .orElse { + ctx.log.warn( + "A corresponding actor ref for UUID {} could not be found", + agent, + ) + None + } + } + + val maybeNextTick = primaryDataMessage.maybeNextTick.toScala.map(Long2long) + + // Distribute Primary Data + if actorToPrimaryData.nonEmpty then { + actorToPrimaryData.foreach { case (actor, value) => + value.toPrimaryData match { + case Success(primaryData) => + actor ! DataProvision( + tick, + ctx.self, + primaryData, + maybeNextTick, + ) + case Failure(exception) => + /* Processing of data failed */ + ctx.log.warn( + "Unable to convert received value to primary data. Skipped that data." + + "\nException: {}", + exception, + ) + } + } + } + + ( + serviceStateData.copy(extPrimaryDataMessage = None), + None, + ) + } + + override protected def handleDataMessage( + extMsg: DataMessageFromExt + )(using + serviceStateData: ExtPrimaryDataStateData + ): ExtPrimaryDataStateData = { + extMsg match { + case extPrimaryDataMessage: PrimaryDataMessageFromExt => + serviceStateData.copy( + extPrimaryDataMessage = Some(extPrimaryDataMessage) + ) + } + } + + override protected def handleDataResponseMessage( + extResponseMsg: ServiceResponseMessage + )(implicit + serviceStateData: ExtPrimaryDataStateData + ): ExtPrimaryDataStateData = serviceStateData +} diff --git a/src/main/scala/edu/ie3/simona/service/primary/PrimaryServiceProxy.scala b/src/main/scala/edu/ie3/simona/service/primary/PrimaryServiceProxy.scala index 3c53b74f5a..8c7a50c9b1 100644 --- a/src/main/scala/edu/ie3/simona/service/primary/PrimaryServiceProxy.scala +++ b/src/main/scala/edu/ie3/simona/service/primary/PrimaryServiceProxy.scala @@ -27,11 +27,17 @@ import edu.ie3.datamodel.io.source.{ TimeSeriesMetaInformationSource, } import edu.ie3.datamodel.models.value.Value +import edu.ie3.simona.api.data.connection.ExtPrimaryDataConnection import edu.ie3.simona.agent.participant.ParticipantAgent import edu.ie3.simona.agent.participant.ParticipantAgent.RegistrationFailedMessage import edu.ie3.simona.config.ConfigParams.{SqlParams, TimeStampedCsvParams} import edu.ie3.simona.config.InputConfig.Primary as PrimaryConfig import edu.ie3.simona.exceptions.InitializationException +import edu.ie3.simona.exceptions.{ + InitializationException, + InvalidConfigParameterException, +} +import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} import edu.ie3.simona.ontology.messages.SchedulerMessage.{ Completion, ScheduleActivation, @@ -88,6 +94,9 @@ object PrimaryServiceProxy { final case class InitPrimaryServiceProxyStateData( primaryConfig: PrimaryConfig, simulationStart: ZonedDateTime, + extSimulationData: Seq[ + (ExtPrimaryDataConnection, ActorRef[ServiceMessage]) + ], ) extends InitializeServiceStateData /** Holding the state of an initialized proxy. @@ -106,6 +115,7 @@ object PrimaryServiceProxy { timeSeriesToSourceRef: Map[UUID, SourceRef], simulationStart: ZonedDateTime, primaryConfig: PrimaryConfig, + extSubscribersToService: Map[UUID, ActorRef[ServiceMessage]] = Map.empty, ) extends ServiceStateData /** Giving reference to the target time series and source worker. @@ -153,6 +163,7 @@ object PrimaryServiceProxy { prepareStateData( initStateData.primaryConfig, initStateData.simulationStart, + initStateData.extSimulationData, )(using ctx.log) match { case Success(stateData) => scheduler ! Completion(ctx.self) @@ -185,6 +196,9 @@ object PrimaryServiceProxy { private[service] def prepareStateData( primaryConfig: PrimaryConfig, simulationStart: ZonedDateTime, + extSimulationData: Seq[ + (ExtPrimaryDataConnection, ActorRef[ServiceMessage]) + ], )(using log: Logger): Try[PrimaryServiceStateData] = { val sourceOption = Seq( primaryConfig.sqlParams, @@ -232,12 +246,28 @@ object PrimaryServiceProxy { } } .toMap - PrimaryServiceStateData( - modelToTimeSeries, - timeSeriesToSourceRef, - simulationStart, - primaryConfig, - ) + if extSimulationData.nonEmpty then { + val extSubscribersToService = extSimulationData.flatMap { + case (connection, ref) => + connection.getPrimaryDataAssets.asScala.map(id => id -> ref) + } + + // Ask ExtPrimaryDataService which UUIDs should be substituted + PrimaryServiceStateData( + modelToTimeSeries, + timeSeriesToSourceRef, + simulationStart, + primaryConfig, + extSubscribersToService.toMap, + ) + } else { + PrimaryServiceStateData( + modelToTimeSeries, + timeSeriesToSourceRef, + simulationStart, + primaryConfig, + ) + } } } @@ -312,26 +342,40 @@ object PrimaryServiceProxy { PrimaryServiceRegistrationMessage(requestingActor, modelUuid), ) => /* Try to register for this model */ - stateData.modelToTimeSeries.get(modelUuid) match { - case Some(timeSeriesUuid) => - /* There is a time series apparent for this model, try to get a worker for it */ - val updatedStateData = handleCoveredModel( - modelUuid, - timeSeriesUuid, - stateData, - requestingActor, - )(using scheduler, ctx) + stateData.extSubscribersToService.get(modelUuid) match { + case Some(_) => + /* There is external data apparent for this model */ + handleExternalModel(modelUuid, stateData, requestingActor) - onMessage(updatedStateData) + Behaviors.same case None => ctx.log.debug( - s"There is no time series apparent for the model with uuid '{}'.", + s"There is no external data apparent for the model with uuid '{}'.", modelUuid, ) - requestingActor ! RegistrationFailedMessage(ctx.self) - Behaviors.same + stateData.modelToTimeSeries.get(modelUuid) match { + case Some(timeSeriesUuid) => + /* There is a time series apparent for this model, try to get a worker for it */ + val updatedStateData = handleCoveredModel( + modelUuid, + timeSeriesUuid, + stateData, + requestingActor, + )(using scheduler, ctx) + + onMessage(updatedStateData) + + case None => + ctx.log.debug( + s"There is no time series apparent for the model with uuid '{}'.", + modelUuid, + ) + requestingActor ! RegistrationFailedMessage(ctx.self) + + Behaviors.same + } } case (ctx, unknown) => ctx.log.error( @@ -400,6 +444,19 @@ object PrimaryServiceProxy { } } + protected def handleExternalModel( + modelUuid: UUID, + stateData: PrimaryServiceStateData, + requestingActor: ActorRef[ParticipantAgent.Request], + ): Unit = { + stateData.extSubscribersToService.foreach { case (_, ref) => + ref ! PrimaryServiceRegistrationMessage( + requestingActor, + modelUuid, + ) + } + } + /** Instantiate a new [[PrimaryServiceWorker]] and send initialization * information * diff --git a/src/main/scala/edu/ie3/simona/service/results/ExtResultProvider.scala b/src/main/scala/edu/ie3/simona/service/results/ExtResultProvider.scala new file mode 100644 index 0000000000..7bae8d2531 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/service/results/ExtResultProvider.scala @@ -0,0 +1,147 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.results + +import edu.ie3.simona.api.data.connection.ExtResultDataConnection +import edu.ie3.simona.api.ontology.DataMessageFromExt +import edu.ie3.simona.api.ontology.results.{ + ProvideResultEntities, + RequestResultEntities, + ResultDataMessageFromExt, +} +import edu.ie3.simona.event.listener.DelayedStopHelper +import edu.ie3.simona.exceptions.CriticalFailureException +import edu.ie3.simona.ontology.messages.SchedulerMessage.{ + Completion, + ScheduleActivation, +} +import edu.ie3.simona.ontology.messages.ServiceMessage.ScheduleServiceActivation +import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.ontology.messages.ResultMessage +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} +import org.apache.pekko.actor.typed.scaladsl.Behaviors +import org.apache.pekko.actor.typed.{ActorRef, Behavior} + +import java.util +import edu.ie3.simona.util.CollectionUtils.asJava + +import scala.jdk.CollectionConverters.* + +object ExtResultProvider { + + type Message = ResultMessage.Response | DelayedStopHelper.StoppingMsg + + /** State data for a result [[provider]]. + * + * @param scheduler + * Reference to the scheduler. + * @param resultProxy + * The result service proxy. + * @param connection + * Result data connection to the external simulation. + * @param extMessage + * Option for the current message from the external simulation. + */ + private final case class ProviderState( + scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], + connection: ExtResultDataConnection, + extMessage: Option[ResultDataMessageFromExt] = None, + ) + + /** Method to create an external result provider. In contrast to the listener, + * the result provider will only provide those result that were requested. + * + * @param connection + * Result data connection to the external simulation. + * @param scheduler + * Reference to the scheduler. + * @param resultProxy + * The result service proxy. + * @return + * The behavior of the result provider. + */ + def apply( + connection: ExtResultDataConnection, + scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], + ): Behavior[Message | DataMessageFromExt | Activation] = { + val stateData = ProviderState(scheduler, resultProxy, connection) + + provider(stateData) + } + + /** Definition of the behavior of the result provider. + * + * @param stateData + * The state data of the provider. + * @return + * The behavior of the result provider. + */ + private def provider( + stateData: ProviderState + ): Behavior[Message | DataMessageFromExt | Activation] = + Behaviors.receivePartial[Message | DataMessageFromExt | Activation] { + case (ctx, ResultResponse(results)) => + // send result to external simulation + stateData.connection.queueExtResponseMsg( + new ProvideResultEntities(results.asJava) + ) + + stateData.scheduler ! Completion(ctx.self) + + Behaviors.same + + case (_, messageFromExt: ResultDataMessageFromExt) => + // save ext message + provider(stateData.copy(extMessage = Some(messageFromExt))) + + case (ctx, ScheduleServiceActivation(tick, unlockKey)) => + stateData.scheduler ! ScheduleActivation( + ctx.self, + tick, + Some(unlockKey), + ) + + Behaviors.same + + case (ctx, Activation(tick)) => + // handle ext message + + val extMsg = stateData.extMessage.getOrElse( + // this should not be possible because the external simulation schedules this provider + throw CriticalFailureException( + "ExtResultDataService was triggered without ResultDataMessageFromExt available" + ) + ) + + extMsg match { + case requestResultEntities: RequestResultEntities => + val requestedResults = + new util.ArrayList(requestResultEntities.requestedResults) + + // request results from result proxy + stateData.resultProxy ! RequestResult( + requestedResults.asScala.toSeq, + tick, + ctx.self, + ) + + Behaviors.same + case other => + ctx.log.warn(s"Cannot handle external result message: $other") + Behaviors.same + } + + case (ctx, msg: DelayedStopHelper.StoppingMsg) => + DelayedStopHelper.handleMsg((ctx, msg)) + + } +} diff --git a/src/main/scala/edu/ie3/simona/service/results/ResultServiceProxy.scala b/src/main/scala/edu/ie3/simona/service/results/ResultServiceProxy.scala new file mode 100644 index 0000000000..b194e554c9 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/service/results/ResultServiceProxy.scala @@ -0,0 +1,287 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.results + +import edu.ie3.datamodel.models.result.ResultEntity +import edu.ie3.datamodel.models.result.connector.Transformer3WResult +import edu.ie3.simona.agent.grid.GridResultsSupport.PartialTransformer3wResult +import edu.ie3.simona.event.ResultEvent +import edu.ie3.simona.event.ResultEvent.* +import edu.ie3.simona.event.listener.DelayedStopHelper +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} +import edu.ie3.simona.service.ServiceStateData.ServiceBaseStateData +import edu.ie3.simona.service.results.Transformer3wResultSupport.{ + AggregatedTransformer3wResult, + Transformer3wKey, +} +import edu.ie3.simona.util.SimonaConstants.INIT_SIM_TICK +import edu.ie3.simona.util.TickUtil.RichZonedDateTime +import org.apache.pekko.actor.typed.scaladsl.{Behaviors, StashBuffer} +import org.apache.pekko.actor.typed.{ActorRef, Behavior, PostStop} +import org.slf4j.Logger + +import java.time.ZonedDateTime +import java.util.UUID +import scala.util.{Failure, Success} + +object ResultServiceProxy { + + type Message = ResultEvent | RequestResult | ExpectResult | + DelayedStopHelper.StoppingMsg + + final case class ExpectResult(assets: UUID | Seq[UUID], tick: Long) + + private final case class ResultServiceStateData( + listeners: Seq[ActorRef[ResultResponse]], + simStartTime: ZonedDateTime, + currentTick: Long = INIT_SIM_TICK, + threeWindingResults: Map[ + Transformer3wKey, + AggregatedTransformer3wResult, + ] = Map.empty, + gridResults: Map[UUID, Iterable[ResultEntity]] = Map.empty, + results: Map[UUID, Iterable[ResultEntity]] = Map.empty, + waitingForResults: Map[UUID, Long] = Map.empty, + ) extends ServiceBaseStateData { + def notifyListener(results: Map[UUID, Iterable[ResultEntity]]): Unit = + if results.nonEmpty then listeners.foreach(_ ! ResultResponse(results)) + + def notifyListener(result: ResultEntity): Unit = + listeners.foreach( + _ ! ResultResponse(Map(result.getInputModel -> List(result))) + ) + + def isWaiting(uuids: Iterable[UUID], tick: Long): Boolean = { + uuids.exists { uuid => + waitingForResults.get(uuid) match { + case Some(nextTick) if nextTick <= tick => true + case _ => false + } + } + } + + def updateTick(tick: Long): ResultServiceStateData = + copy(currentTick = tick) + + def waitForResult(expectResult: ExpectResult): ResultServiceStateData = + expectResult.assets match { + case uuid: UUID => + copy(waitingForResults = + waitingForResults.updated(uuid, expectResult.tick) + ) + case uuids: Seq[UUID] => + val tick = expectResult.tick + + copy(waitingForResults = + waitingForResults ++ uuids.map(uuid => uuid -> tick).toMap + ) + } + + def addResult(result: ResultEntity): ResultServiceStateData = { + val uuid = result.getInputModel + val tick = result.getTime.toTick(using simStartTime) + + val updatedWaitingForResults = + if waitingForResults.get(uuid).contains(tick) then { + waitingForResults.removed(uuid) + } else waitingForResults + + val updatedResults = results.get(uuid) match { + case Some(values) => + val updatedValues = values + .map { value => value.getClass -> value } + .toMap + .updated(result.getClass, result) + .values + + results.updated(uuid, updatedValues) + + case None => + results.updated(uuid, Iterable(result)) + } + + copy( + results = updatedResults, + waitingForResults = updatedWaitingForResults, + ) + } + + def getResults(uuids: Seq[UUID]): Map[UUID, Iterable[ResultEntity]] = { + uuids.flatMap { uuid => + gridResults.get(uuid) match { + case Some(values) => + Some(uuid -> values) + case None => + results.get(uuid).map { res => uuid -> res } + } + }.toMap + } + + } + + def apply( + listeners: Seq[ActorRef[ResultResponse]], + simStartTime: ZonedDateTime, + bufferSize: Int = 10000, + ): Behavior[Message] = Behaviors.withStash(bufferSize) { buffer => + idle(ResultServiceStateData(listeners, simStartTime))(using buffer) + } + + private def idle( + stateData: ResultServiceStateData + )(using + buffer: StashBuffer[Message] + ): Behavior[Message] = Behaviors + .receivePartial[Message] { + case (_, expectResult: ExpectResult) => + idle(stateData.waitForResult(expectResult)) + + case (ctx, resultEvent: ResultEvent) => + // ctx.log.warn(s"Received results: $resultEvent") + + // handles the event and updates the state data + val updatedStateData = + handleResultEvent(resultEvent, stateData)(using ctx.log) + + // un-stash received requests + buffer.unstashAll(idle(updatedStateData)) + case (_, requestResultMessage: RequestResult) => + val requestedResults = requestResultMessage.requestedResults + val tick = requestResultMessage.tick + + if stateData.isWaiting(requestedResults, tick) then { + // ctx.log.warn(s"Cannot answer request: $requestedResults") + + buffer.stash(requestResultMessage) + } else { + + requestResultMessage.replyTo ! ResultResponse( + stateData.getResults(requestedResults) + ) + } + + Behaviors.same + + case (ctx, msg: DelayedStopHelper.StoppingMsg) => + DelayedStopHelper.handleMsg((ctx, msg)) + } + .receiveSignal { case (ctx, PostStop) => + ctx.log.debug( + "Shutdown initiated.\n\tThe following three winding results are not comprehensive and are not " + + "handled in sinks:{}\n\tWaiting until writing result data is completed ...", + stateData.threeWindingResults.keys + .map { case Transformer3wKey(model, zdt) => + s"model '$model' at $zdt" + } + .mkString("\n\t\t"), + ) + + Behaviors.same + } + + private def handleResultEvent( + resultEvent: ResultEvent, + stateData: ResultServiceStateData, + )(using log: Logger): ResultServiceStateData = resultEvent match { + case PowerFlowResultEvent( + nodeResults, + switchResults, + lineResults, + transformer2wResults, + partialTransformer3wResults, + congestionResults, + ) => + // handling of three winding transformers + val (updatedResults, transformer3wResults) = + handleThreeWindingTransformers( + partialTransformer3wResults, + stateData.threeWindingResults, + ) + + val gridResults = + (transformer3wResults ++ nodeResults ++ switchResults ++ lineResults ++ transformer2wResults ++ congestionResults) + .groupBy(_.getInputModel) + + // notify listener + stateData.notifyListener(gridResults) + + stateData.copy( + gridResults = stateData.gridResults ++ gridResults, + threeWindingResults = updatedResults, + waitingForResults = + stateData.waitingForResults.removedAll(gridResults.keys), + ) + + case ParticipantResultEvent(systemParticipantResult) => + // notify listener + stateData.notifyListener(systemParticipantResult) + + stateData.addResult(systemParticipantResult) + + case ThermalResultEvent(thermalResult) => + // notify listener + stateData.notifyListener(thermalResult) + + stateData.addResult(thermalResult) + + case FlexOptionsResultEvent(flexOptionsResult) => + // notify listener + stateData.notifyListener(flexOptionsResult) + + stateData.addResult(flexOptionsResult) + } + + private def handleThreeWindingTransformers( + transformer3wResults: Iterable[PartialTransformer3wResult], + threeWindingResults: Map[Transformer3wKey, AggregatedTransformer3wResult], + )(using log: Logger) = transformer3wResults.foldLeft( + threeWindingResults, + Seq.empty[Transformer3WResult], + ) { case ((allPartialResults, allResults), result) => + val key = Transformer3wKey(result.input, result.time) + // retrieve existing partial result or use empty one + val partialResult = + allPartialResults.getOrElse( + key, + AggregatedTransformer3wResult.EMPTY, + ) + // add partial result + partialResult.add(result).map { updatedResult => + if updatedResult.ready then { + // if result is complete, we can write it out + updatedResult.consolidate match { + case Failure(exception) => + log.warn( + "Failure when handling partial Transformer3w result", + exception, + ) + // on failure, we just continue with previous results + (allPartialResults, allResults) + case Success(res) => + (allPartialResults.removed(key), allResults.appended(res)) + } + + } else { + // if result is not complete yet, just update it + (allPartialResults + (key -> updatedResult), allResults) + } + } match { + case Success(results) => results + case Failure(exception) => + log.warn( + "Failure when handling partial Transformer3w result", + exception, + ) + // on failure, we just continue with previous results + (allPartialResults, allResults) + } + } + +} diff --git a/src/main/scala/edu/ie3/simona/event/listener/Transformer3wResultSupport.scala b/src/main/scala/edu/ie3/simona/service/results/Transformer3wResultSupport.scala similarity index 97% rename from src/main/scala/edu/ie3/simona/event/listener/Transformer3wResultSupport.scala rename to src/main/scala/edu/ie3/simona/service/results/Transformer3wResultSupport.scala index 1065475d06..c6f9787823 100644 --- a/src/main/scala/edu/ie3/simona/event/listener/Transformer3wResultSupport.scala +++ b/src/main/scala/edu/ie3/simona/service/results/Transformer3wResultSupport.scala @@ -4,19 +4,19 @@ * Research group Distribution grid planning and operation */ -package edu.ie3.simona.event.listener +package edu.ie3.simona.service.results import edu.ie3.datamodel.models.result.connector.Transformer3WResult import edu.ie3.simona.agent.grid.GridResultsSupport.PartialTransformer3wResult -import tech.units.indriya.quantity.Quantities import edu.ie3.util.quantities.PowerSystemUnits +import tech.units.indriya.quantity.Quantities import tech.units.indriya.unit.Units import java.time.ZonedDateTime import java.util.UUID import scala.util.{Failure, Success, Try} -private[listener] trait Transformer3wResultSupport { +private[results] object Transformer3wResultSupport { /** Case class to serve as a map key for unfulfilled three winding results * diff --git a/src/main/scala/edu/ie3/simona/service/weather/WeatherService.scala b/src/main/scala/edu/ie3/simona/service/weather/WeatherService.scala index d414eacd86..b952ce7c6a 100644 --- a/src/main/scala/edu/ie3/simona/service/weather/WeatherService.scala +++ b/src/main/scala/edu/ie3/simona/service/weather/WeatherService.scala @@ -6,6 +6,7 @@ package edu.ie3.simona.service.weather +import edu.ie3.datamodel.models.input.NodeInput import edu.ie3.simona.agent.participant.ParticipantAgent import edu.ie3.simona.agent.participant.ParticipantAgent.{ DataProvision, diff --git a/src/main/scala/edu/ie3/simona/sim/SimonaSim.scala b/src/main/scala/edu/ie3/simona/sim/SimonaSim.scala index 2817369494..44715ebebf 100644 --- a/src/main/scala/edu/ie3/simona/sim/SimonaSim.scala +++ b/src/main/scala/edu/ie3/simona/sim/SimonaSim.scala @@ -26,8 +26,8 @@ import java.nio.file.Path * overall simulation has been successful or not. For specific status * information, the user needs to pass in and subscribe to the corresponding * listener e.g. [[edu.ie3.simona.event.listener.RuntimeEventListener]] for - * simulation status or [[edu.ie3.simona.event.listener.ResultEventListener]] - * for result events + * simulation status or [[edu.ie3.simona.event.listener.ResultListener]] for + * result events * * @since 01.07.20 */ @@ -70,8 +70,15 @@ object SimonaSim { ): Behavior[Request] = Behaviors .receivePartial[Request] { case (ctx, Start(_)) => - val resultEventListeners = simonaSetup.resultEventListener(ctx) val runtimeEventListener = simonaSetup.runtimeEventListener(ctx) + val resultEventListeners = simonaSetup.resultEventListener(ctx) + + // result proxy + val resultProxy = simonaSetup.resultServiceProxy( + ctx, + resultEventListeners, + simonaSetup.simonaConfig.simona.time.simStartTime, + ) val timeAdvancer = simonaSetup.timeAdvancer(ctx, ctx.self, runtimeEventListener) @@ -86,7 +93,11 @@ object SimonaSim { simonaSetup.simonaConfig.simona.input.extSimDir.map(Path.of(_)) val extSimulationData = - simonaSetup.extSimulations(ctx, scheduler, extSimDir) + simonaSetup.extSimulations(ctx, scheduler, resultProxy, extSimDir) + + val allResultEventListeners = + resultEventListeners ++ extSimulationData.resultListeners + val resultProviders = extSimulationData.resultProviders /* start services */ // primary service proxy @@ -104,6 +115,7 @@ object SimonaSim { scheduler, runtimeEventListener, primaryServiceProxy, + resultProxy, weatherService, loadProfileService, extSimulationData.emDataService, @@ -111,11 +123,7 @@ object SimonaSim { ) /* start grid agents */ - val gridAgents = simonaSetup.gridAgents( - ctx, - environmentRefs, - resultEventListeners, - ) + val gridAgents = simonaSetup.gridAgents(ctx, environmentRefs) val otherActors = Iterable[ActorRef[?]]( timeAdvancer, @@ -126,8 +134,10 @@ object SimonaSim { gridAgents ++ extSimulationData.allServiceRefs /* watch all actors */ - resultEventListeners.foreach(ctx.watch) + allResultEventListeners.foreach(ctx.watch) + resultProviders.foreach(ctx.watch) ctx.watch(runtimeEventListener) + ctx.watch(resultProxy) otherActors.foreach(ctx.watch) extSimulationData.extSimAdapters.foreach(ctx.watch) @@ -137,7 +147,11 @@ object SimonaSim { // Start simulation timeAdvancer ! TimeAdvancer.Start - val delayedActors = resultEventListeners.appended(runtimeEventListener) + val delayedActors = + allResultEventListeners + .appendedAll(resultProviders) + .appended(runtimeEventListener) + .appended(resultProxy) idle( ActorData( diff --git a/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetup.scala b/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetup.scala index 9dfae566ee..eb570b3db1 100644 --- a/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetup.scala +++ b/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetup.scala @@ -9,17 +9,14 @@ package edu.ie3.simona.sim.setup import com.typesafe.config.Config import edu.ie3.datamodel.models.input.container.JointGridContainer import edu.ie3.simona.api.data.ExtSimAdapterData -import edu.ie3.simona.api.data.connection.{ - ExtEmDataConnection, - ExtEvDataConnection, - ExtInputDataConnection, - ExtPrimaryDataConnection, -} +import edu.ie3.simona.api.data.connection.* import edu.ie3.simona.api.ontology.DataMessageFromExt import edu.ie3.simona.api.ontology.simulation.ControlResponseMessageFromExt import edu.ie3.simona.api.simulation.ExtSimulation import edu.ie3.simona.api.{ExtLinkInterface, ExtSimAdapter} +import edu.ie3.simona.event.listener.{ExtResultEvent, ResultListener} import edu.ie3.simona.exceptions.ServiceException +import edu.ie3.simona.ontology.messages.ResultMessage.RequestResult import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.ServiceStateData.InitializeServiceStateData @@ -27,6 +24,9 @@ import edu.ie3.simona.service.em.ExtEmDataService import edu.ie3.simona.service.em.ExtEmDataService.InitExtEmData import edu.ie3.simona.service.ev.ExtEvDataService import edu.ie3.simona.service.ev.ExtEvDataService.InitExtEvData +import edu.ie3.simona.service.primary.ExtPrimaryDataService +import edu.ie3.simona.service.primary.ExtPrimaryDataService.InitExtPrimaryData +import edu.ie3.simona.service.results.ExtResultProvider import edu.ie3.simona.util.SimonaConstants.PRE_INIT_TICK import org.apache.pekko.actor.typed.ActorRef import org.apache.pekko.actor.typed.scaladsl.ActorContext @@ -55,6 +55,8 @@ object ExtSimSetup { * The actor context of this actor system. * @param scheduler * The scheduler of simona. + * @param resultProxy + * The result service proxy. * @param startTime * The start time of the simulation. * @return @@ -70,6 +72,7 @@ object ExtSimSetup { )(using context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], startTime: ZonedDateTime, ): ExtSimSetupData = extLinks.zipWithIndex.foldLeft(ExtSimSetupData.apply) { case (extSimSetupData, (extLink, index)) => @@ -139,6 +142,7 @@ object ExtSimSetup { context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], extSimAdapterData: ExtSimAdapterData, + resultProxy: ActorRef[RequestResult], startTime: ZonedDateTime, ): ExtSimSetupData = { given extSimAdapter: ActorRef[ControlResponseMessageFromExt] = @@ -154,6 +158,20 @@ object ExtSimSetup { val updatedSetupData = connections.foldLeft(extSimSetupData) { case (setupData, connection) => connection match { + case extPrimaryDataConnection: ExtPrimaryDataConnection => + val serviceRef = context.spawn( + ExtPrimaryDataService(scheduler), + "ExtPrimaryDataService", + ) + + setupService( + extPrimaryDataConnection, + serviceRef, + InitExtPrimaryData.apply, + ) + + setupData.update(extPrimaryDataConnection, serviceRef) + case extEmDataConnection: ExtEmDataConnection => if setupData.emDataService.nonEmpty then { throw ServiceException( @@ -178,7 +196,7 @@ object ExtSimSetup { InitExtEmData(_, startTime), ) - extSimSetupData.update(extEmDataConnection, serviceRef) + setupData.copy(emDataService = Some(serviceRef)) } case extEvDataConnection: ExtEvDataConnection => @@ -199,7 +217,32 @@ object ExtSimSetup { InitExtEvData.apply, ) - extSimSetupData.update(extEvDataConnection, serviceRef) + setupData.update(extEvDataConnection, serviceRef) + + case extResultDataConnection: ExtResultDataConnection => + val extResultProvider = context.spawn( + ExtResultProvider( + extResultDataConnection, + scheduler, + resultProxy, + ), + s"ExtResultProvider_$index", + ) + + extResultDataConnection.setActorRefs( + extResultProvider, + extSimAdapter, + ) + + setupData.update(extResultDataConnection, extResultProvider) + + case extResultListener: ExtResultListener => + val extResultEventListener = context.spawn( + ResultListener.external(extResultListener), + s"ExtResultListener_$index", + ) + + setupData.update(extResultListener, extResultEventListener) case otherConnection => log.warn( diff --git a/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetupData.scala b/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetupData.scala index 59d2fcf896..ab7a710bf5 100644 --- a/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetupData.scala +++ b/src/main/scala/edu/ie3/simona/sim/setup/ExtSimSetupData.scala @@ -8,9 +8,13 @@ package edu.ie3.simona.sim.setup import edu.ie3.simona.api.ExtSimAdapter import edu.ie3.simona.api.data.connection.* +import edu.ie3.simona.event.listener.ResultListener +import edu.ie3.simona.event.listener.ExtResultEvent import edu.ie3.simona.ontology.messages.ServiceMessage import edu.ie3.simona.service.em.ExtEmDataService import edu.ie3.simona.service.ev.ExtEvDataService +import edu.ie3.simona.service.results.ExtResultProvider +import edu.ie3.simona.service.em.ExtEmDataService import org.apache.pekko.actor.typed.ActorRef /** Case class that holds information regarding the external data connections as @@ -24,8 +28,10 @@ import org.apache.pekko.actor.typed.ActorRef * Option for an external em data service. * @param evDataService * Option for an external ev data service. - * @param extResultListeners - * Seq: external result data connections to result data providers. + * @param resultListeners + * Seq: external result listeners. + * @param resultProviders + * Seq: external result providers. */ final case class ExtSimSetupData( extSimAdapters: Iterable[ActorRef[ExtSimAdapter.Request]], @@ -34,7 +40,8 @@ final case class ExtSimSetupData( ], emDataService: Option[ActorRef[ExtEmDataService.Message]], evDataService: Option[ActorRef[ExtEvDataService.Message]], - extResultListeners: Seq[ActorRef[ServiceMessage]], + resultListeners: Seq[ActorRef[ResultListener.Message]], + resultProviders: Seq[ActorRef[ExtResultProvider.Message]], ) { private[setup] def update( @@ -52,18 +59,21 @@ final case class ExtSimSetupData( serviceRef: ActorRef[ServiceMessage], ) => update(primaryConnection, serviceRef) - case ( - _: ExtEmDataConnection, - serviceRef: ActorRef[ExtEmDataService.Message], - ) => - copy(emDataService = Some(serviceRef)) case ( _: ExtEvDataConnection, serviceRef: ActorRef[ExtEvDataService.Message], ) => copy(evDataService = Some(serviceRef)) - case (_: ExtResultDataConnection, serviceRef: ActorRef[ServiceMessage]) => - copy(extResultListeners = extResultListeners ++ Seq(serviceRef)) + case ( + _: ExtResultDataConnection, + providerRef: ActorRef[ExtResultProvider.Message], + ) => + copy(resultProviders = resultProviders ++ Seq(providerRef)) + case ( + _: ExtResultListener, + listenerRef: ActorRef[ResultListener.Message], + ) => + copy(resultListeners = resultListeners ++ Seq(listenerRef)) case (_, _) => this } @@ -82,7 +92,9 @@ final case class ExtSimSetupData( Seq( emDataService, evDataService, - ).flatten ++ extResultListeners ++ primaryDataServices.map(_._2) + ).flatten ++ resultListeners ++ resultProviders ++ primaryDataServices.map( + _._2 + ) } object ExtSimSetupData { @@ -95,5 +107,6 @@ object ExtSimSetupData { None, None, Seq.empty, + Seq.empty, ) } diff --git a/src/main/scala/edu/ie3/simona/sim/setup/SimonaSetup.scala b/src/main/scala/edu/ie3/simona/sim/setup/SimonaSetup.scala index feb1c9a1d7..110c72f1a9 100644 --- a/src/main/scala/edu/ie3/simona/sim/setup/SimonaSetup.scala +++ b/src/main/scala/edu/ie3/simona/sim/setup/SimonaSetup.scala @@ -16,18 +16,24 @@ import edu.ie3.datamodel.models.input.thermal.ThermalBusInput import edu.ie3.simona.agent.EnvironmentRefs import edu.ie3.simona.agent.grid.GridAgent import edu.ie3.simona.config.SimonaConfig -import edu.ie3.simona.event.listener.{ResultEventListener, RuntimeEventListener} +import edu.ie3.simona.event.listener.{ResultListener, RuntimeEventListener} import edu.ie3.simona.event.{ResultEvent, RuntimeEvent} import edu.ie3.simona.io.grid.GridProvider +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} import edu.ie3.simona.scheduler.TimeAdvancer import edu.ie3.simona.scheduler.core.Core.CoreFactory import edu.ie3.simona.scheduler.core.RegularSchedulerCore +import edu.ie3.simona.service.results.ResultServiceProxy import edu.ie3.simona.sim.SimonaSim import org.apache.pekko.actor.typed.ActorRef import org.apache.pekko.actor.typed.scaladsl.ActorContext import java.nio.file.Path +import java.time.ZonedDateTime /** Trait that can be used to set up a customized simona simulation by providing * implementations for all setup information required by a @@ -86,7 +92,7 @@ trait SimonaSetup { */ def resultEventListener( context: ActorContext[?] - ): Seq[ActorRef[ResultEventListener.Request]] + ): Seq[ActorRef[ResultListener.Message]] /** Creates a primary service proxy. The proxy is the first instance to ask * for primary data. If necessary, it delegates the registration request to @@ -108,6 +114,27 @@ trait SimonaSetup { extSimSetupData: ExtSimSetupData, ): ActorRef[ServiceMessage] + /** Creates a result service proxy. The proxy will receive information about + * the result that should be expected for the current tick and all result + * events that are send by the agents. The proxy is responsible for + * processing the result events and passing the processed data to the + * different result listeners and providers. + * + * @param context + * Actor context to use. + * @param listeners + * The internal result event listeners. + * @param simStartTime + * The start time of the simulation. + * @return + * An actor reference to the service. + */ + def resultServiceProxy( + context: ActorContext[?], + listeners: Seq[ActorRef[ResultResponse]], + simStartTime: ZonedDateTime, + ): ActorRef[ResultServiceProxy.Message] + /** Creates a weather service. * * @param context @@ -145,6 +172,8 @@ trait SimonaSetup { * Actor context to use. * @param scheduler * Actor reference to the scheduler to use. + * @param resultProxy + * Actor reference to the result provider. * @param extSimPath * Option for a directory with external simulations. * @return @@ -153,6 +182,7 @@ trait SimonaSetup { def extSimulations( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], extSimPath: Option[Path], ): ExtSimSetupData @@ -197,8 +227,6 @@ trait SimonaSetup { * Actor context to use. * @param environmentRefs * EnvironmentRefs to use. - * @param resultEventListeners - * Listeners that await events from system participants. * @return * A mapping from actor reference to it's according initialization data to * be used when setting up the agents. @@ -206,7 +234,6 @@ trait SimonaSetup { def gridAgents( context: ActorContext[?], environmentRefs: EnvironmentRefs, - resultEventListeners: Seq[ActorRef[ResultEvent]], ): Iterable[ActorRef[GridAgent.Message]] /** SIMONA links sub grids connected by a three winding transformer a bit diff --git a/src/main/scala/edu/ie3/simona/sim/setup/SimonaStandaloneSetup.scala b/src/main/scala/edu/ie3/simona/sim/setup/SimonaStandaloneSetup.scala index 767a77bc28..40502b7e1d 100644 --- a/src/main/scala/edu/ie3/simona/sim/setup/SimonaStandaloneSetup.scala +++ b/src/main/scala/edu/ie3/simona/sim/setup/SimonaStandaloneSetup.scala @@ -15,9 +15,13 @@ import edu.ie3.simona.agent.EnvironmentRefs import edu.ie3.simona.agent.grid.GridAgent import edu.ie3.simona.agent.grid.GridAgentMessages.CreateGridAgent import edu.ie3.simona.config.{GridConfigParser, SimonaConfig} -import edu.ie3.simona.event.listener.{ResultEventListener, RuntimeEventListener} -import edu.ie3.simona.event.{ResultEvent, RuntimeEvent} +import edu.ie3.simona.event.RuntimeEvent +import edu.ie3.simona.event.listener.{ResultListener, RuntimeEventListener} import edu.ie3.simona.exceptions.agent.GridAgentInitializationException +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} import edu.ie3.simona.scheduler.core.Core.CoreFactory import edu.ie3.simona.scheduler.core.RegularSchedulerCore @@ -26,6 +30,7 @@ import edu.ie3.simona.service.load.LoadProfileService import edu.ie3.simona.service.load.LoadProfileService.InitLoadProfileServiceStateData import edu.ie3.simona.service.primary.PrimaryServiceProxy import edu.ie3.simona.service.primary.PrimaryServiceProxy.InitPrimaryServiceProxyStateData +import edu.ie3.simona.service.results.ResultServiceProxy import edu.ie3.simona.service.weather.WeatherService import edu.ie3.simona.service.weather.WeatherService.InitWeatherServiceStateData import edu.ie3.simona.sim.SimonaSim @@ -38,6 +43,7 @@ import org.apache.pekko.actor.typed.ActorRef import org.apache.pekko.actor.typed.scaladsl.ActorContext import java.nio.file.Path +import java.time.ZonedDateTime import java.util.UUID import java.util.concurrent.LinkedBlockingQueue import scala.jdk.CollectionConverters.* @@ -61,7 +67,6 @@ class SimonaStandaloneSetup( override def gridAgents( context: ActorContext[?], environmentRefs: EnvironmentRefs, - resultEventListeners: Seq[ActorRef[ResultEvent]], ): Iterable[ActorRef[GridAgent.Message]] = { /* get the grid */ @@ -76,7 +81,6 @@ class SimonaStandaloneSetup( subGridTopologyGraph, context, environmentRefs, - resultEventListeners, ) val keys = ScheduleLock.multiKey( @@ -147,6 +151,7 @@ class SimonaStandaloneSetup( InitPrimaryServiceProxyStateData( simonaConfig.simona.input.primary, simulationStart, + extSimSetupData.primaryDataServices, ), ), "primaryServiceProxyAgent", @@ -155,6 +160,16 @@ class SimonaStandaloneSetup( primaryServiceProxy } + override def resultServiceProxy( + context: ActorContext[?], + listeners: Seq[ActorRef[ResultResponse]], + simStartTime: ZonedDateTime, + ): ActorRef[ResultServiceProxy.Message] = + context.spawn( + ResultServiceProxy(listeners, simStartTime), + "resultEventProxyAgent", + ) + override def weatherService( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], @@ -202,6 +217,7 @@ class SimonaStandaloneSetup( override def extSimulations( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], extSimPath: Option[Path], ): ExtSimSetupData = { val jars = ExtSimLoader.scanInputFolder(extSimPath) @@ -210,6 +226,7 @@ class SimonaStandaloneSetup( setupExtSim(extLinks, args, typeSafeConfig, grid)(using context, scheduler, + resultProxy, simonaConfig.simona.time.simStartTime, ) } @@ -259,15 +276,13 @@ class SimonaStandaloneSetup( override def resultEventListener( context: ActorContext[?] - ): Seq[ActorRef[ResultEventListener.Request]] = { + ): Seq[ActorRef[ResultListener.Message]] = { // append ResultEventListener as well to write raw output files Seq( context .spawn( - ResultEventListener( - resultFileHierarchy - ), - ResultEventListener.getClass.getSimpleName, + ResultListener(resultFileHierarchy), + ResultListener.getClass.getSimpleName, ) ) } @@ -276,7 +291,6 @@ class SimonaStandaloneSetup( subGridTopologyGraph: SubGridTopologyGraph, context: ActorContext[?], environmentRefs: EnvironmentRefs, - resultEventListeners: Seq[ActorRef[ResultEvent]], ): Map[Int, ActorRef[GridAgent.Message]] = { subGridTopologyGraph .vertexSet() @@ -287,7 +301,6 @@ class SimonaStandaloneSetup( GridAgent( environmentRefs, simonaConfig, - resultEventListeners, ), subGridContainer.getSubnet.toString, ) diff --git a/src/main/scala/edu/ie3/simona/util/CollectionUtils.scala b/src/main/scala/edu/ie3/simona/util/CollectionUtils.scala index e84e14efa1..31a8b585a9 100644 --- a/src/main/scala/edu/ie3/simona/util/CollectionUtils.scala +++ b/src/main/scala/edu/ie3/simona/util/CollectionUtils.scala @@ -10,9 +10,19 @@ import squants.Quantity import scala.annotation.tailrec import scala.collection.immutable.HashSet +import scala.math.Ordering.Double +import scala.jdk.CollectionConverters.{SeqHasAsJava, MapHasAsJava} object CollectionUtils { + extension [K, V](scalaMap: Map[K, Iterable[V]]) { + def asJava: java.util.Map[K, java.util.List[V]] = { + scalaMap.map { case (key, value) => + key -> value.toList.asJava + }.asJava + } + } + /** fast implementation to test if a list contains duplicates. See * https://stackoverflow.com/questions/3871491/functional-programming-does-a-list-only-contain-unique-items * for details diff --git a/src/main/scala/edu/ie3/simona/util/ReceiveHierarchicalDataMap.scala b/src/main/scala/edu/ie3/simona/util/ReceiveHierarchicalDataMap.scala new file mode 100644 index 0000000000..76ab01c9b1 --- /dev/null +++ b/src/main/scala/edu/ie3/simona/util/ReceiveHierarchicalDataMap.scala @@ -0,0 +1,130 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.util + +final case class ReceiveHierarchicalDataMap[K, V]( + private val allKeys: Set[K], + private val expectedKeys: Set[K], + structure: Map[K, Set[K]], + receivedData: Map[K, V], +) { + + def hasCompletedKeys: Boolean = structure.keySet.exists(isComplete) + + def isComplete: Boolean = expectedKeys.isEmpty + + private def isComplete(key: K): Boolean = structure + .get(key) + .map(_.intersect(expectedKeys)) + .forall(_.forall(receivedData.contains)) + + def updateStructure( + key: Option[K], + subKey: K, + ): ReceiveHierarchicalDataMap[K, V] = { + val (updatedStructure, updatedKeys): (Map[K, Set[K]], Set[K]) = key match { + case Some(parent) => + structure.get(parent) match { + case Some(subKeys) => + val allSubKeys = subKeys + subKey + + ( + structure ++ Map(parent -> allSubKeys), + allKeys + subKey, + ) + case None => + ( + structure ++ Map(parent -> Set(subKey)), + allKeys ++ List(parent, subKey), + ) + } + + case None if !structure.contains(subKey) => + ( + structure ++ Map(subKey -> Set.empty), + allKeys + subKey, + ) + case _ => + // we already added the subkey as parent + // therefore, no changes are needed + (structure, allKeys) + } + + copy( + structure = updatedStructure, + allKeys = updatedKeys, + ) + } + + def addSubKeysToExpectedKeys(keys: Set[K]): ReceiveHierarchicalDataMap[K, V] = + copy(expectedKeys = expectedKeys ++ keys.flatMap(structure.get).flatten) + + def addData( + key: K, + value: V, + ): ReceiveHierarchicalDataMap[K, V] = { + + if !allKeys.contains(key) then + throw new RuntimeException( + s"Received value $value for key $key, but no data has been expected for this key." + ) + + copy( + expectedKeys = expectedKeys.excl(key), + receivedData = receivedData.updated(key, value), + ) + } + + def getExpectedKeys: Set[K] = expectedKeys + + def getFinishedData: (Map[K, V], ReceiveHierarchicalDataMap[K, V]) = { + val dataMap = if expectedKeys.nonEmpty then { + structure.keySet + .filter(isComplete) + .flatMap(key => structure(key)) + .map(key => key -> receivedData(key)) + .toMap + } else receivedData + + val updated = receivedData.removedAll(dataMap.keys) + + (dataMap, copy(receivedData = updated)) + } + + def getFinishedDataHierarchical + : (Map[K, Set[K]], Map[K, V], ReceiveHierarchicalDataMap[K, V]) = { + val (dataMap, updated) = getFinishedData + + val structureMap = structure.keySet + .filter(isComplete) + .map(parent => parent -> structure(parent)) + .toMap + + (structureMap, dataMap, updated) + } + +} + +object ReceiveHierarchicalDataMap { + + def apply[K, V](expected: Set[K]): ReceiveHierarchicalDataMap[K, V] = + ReceiveHierarchicalDataMap( + Set.empty, + expected, + Map.empty, + Map.empty, + ) + + def empty[K, V]: ReceiveHierarchicalDataMap[K, V] = + ReceiveHierarchicalDataMap( + Set.empty, + Set.empty, + Map.empty, + Map.empty, + ) + +} diff --git a/src/main/scala/edu/ie3/simona/util/ReceiveMultiDataMap.scala b/src/main/scala/edu/ie3/simona/util/ReceiveMultiDataMap.scala new file mode 100644 index 0000000000..7a075b158f --- /dev/null +++ b/src/main/scala/edu/ie3/simona/util/ReceiveMultiDataMap.scala @@ -0,0 +1,102 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.util + +import edu.ie3.simona.util.ReceiveMultiDataMap.log +import org.slf4j.{Logger, LoggerFactory} + +final case class ReceiveMultiDataMap[K, V]( + private val expectedKeys: Map[K, Int], + finishedKeys: Set[K], + receivedData: Map[K, Seq[V]], +) { + def isComplete: Boolean = expectedKeys.isEmpty + + def hasCompleted: Boolean = finishedKeys.nonEmpty + + def nonComplete: Boolean = expectedKeys.nonEmpty + + def expects(key: K): Boolean = expectedKeys.contains(key) + + def getExpected(key: K): Int = expectedKeys.getOrElse(key, 0) + + def getFinished: (Map[K, Seq[V]], ReceiveMultiDataMap[K, V]) = { + val data = finishedKeys.map { key => key -> receivedData(key) }.toMap + + ( + data, + copy( + receivedData = receivedData.removedAll(finishedKeys), + finishedKeys = Set.empty, + ), + ) + } + + def addData[A]( + key: K, + value: V, + ): ReceiveMultiDataMap[K, V] = { + if !expectedKeys.contains(key) && !receivedData.contains(key) then { + throw new RuntimeException( + s"Received value $value for key $key, but no data has been expected or received for this key." + ) + } else { + val count = expectedKeys(key) - 1 + + val newValue = receivedData.get(key) match { + case Some(values) => + values.appended(value) + case None => + Seq(value) + } + + if count == 0 then { + copy( + expectedKeys = expectedKeys.removed(key), + finishedKeys = finishedKeys + key, + receivedData = receivedData.updated(key, newValue), + ) + } else { + copy( + expectedKeys = expectedKeys.updated(key, count), + receivedData = receivedData.updated(key, newValue), + ) + } + } + } + + def addExpectedKeys(keys: Map[K, Int]): ReceiveMultiDataMap[K, V] = { + val (add, remove) = keys.partition(_._2 > 0) + val updated = (expectedKeys ++ add).removedAll(remove.keys) + copy(expectedKeys = updated) + } + + def getExpectedKeys: Set[K] = expectedKeys.keySet + +} + +object ReceiveMultiDataMap { + + private val log: Logger = LoggerFactory.getLogger("ReceiveMultiDataMap") + + def apply[K, V]( + expectedKeys: Map[K, Int] + ): ReceiveMultiDataMap[K, V] = + ReceiveMultiDataMap( + expectedKeys = expectedKeys, + finishedKeys = Set.empty, + receivedData = Map.empty, + ) + + def empty[K, V]: ReceiveMultiDataMap[K, V] = + ReceiveMultiDataMap( + expectedKeys = Map.empty, + finishedKeys = Set.empty, + receivedData = Map.empty, + ) + +} diff --git a/src/main/scala/edu/ie3/util/scala/collection/mutable/PriorityMultiBiSet.scala b/src/main/scala/edu/ie3/util/scala/collection/mutable/PriorityMultiBiSet.scala index 85267883d2..a28433d129 100644 --- a/src/main/scala/edu/ie3/util/scala/collection/mutable/PriorityMultiBiSet.scala +++ b/src/main/scala/edu/ie3/util/scala/collection/mutable/PriorityMultiBiSet.scala @@ -83,6 +83,9 @@ final case class PriorityMultiBiSet[K, V]( } } + def set(key: K, values: Set[V]): Unit = + values.foreach(set(key, _)) + /** Removes the given value, if it exists. * * @param value diff --git a/src/main/scala/edu/ie3/util/scala/quantities/QuantityUtil.scala b/src/main/scala/edu/ie3/util/scala/quantities/QuantityUtil.scala index d2edeb36b8..3090112c87 100644 --- a/src/main/scala/edu/ie3/util/scala/quantities/QuantityUtil.scala +++ b/src/main/scala/edu/ie3/util/scala/quantities/QuantityUtil.scala @@ -73,7 +73,7 @@ object QuantityUtil { values: Map[Long, Q], windowStart: Long, windowEnd: Long, - ): Try[Q] = { + )(using defaultUnit: UnitOfMeasure[Q]): Try[Q] = { if windowStart == windowEnd then Failure( new IllegalArgumentException("Cannot average over trivial time window.") @@ -114,7 +114,7 @@ object QuantityUtil { values: Map[Long, Q], windowStart: Long, windowEnd: Long, - ): QI = { + )(using defaultUnit: UnitOfMeasure[Q]): QI = { /** Case class to hold current state of integration * @@ -136,11 +136,7 @@ object QuantityUtil { /* Determine the unit from the first best value */ val unit = sortedValues.values.headOption .map(_.unit) - .getOrElse( - throw new QuantityException( - "Unable to determine unit for dummy starting value." - ) - ) + .getOrElse(defaultUnit) val zeroValue = unit(0d) /* the first relevant value for integration is placed before or at windowStart */ diff --git a/src/test/resources/logback-test.xml b/src/test/resources/logback-test.xml index 79b160e703..a153d47f5b 100644 --- a/src/test/resources/logback-test.xml +++ b/src/test/resources/logback-test.xml @@ -52,7 +52,7 @@ - + diff --git a/src/test/scala/edu/ie3/simona/agent/em/EmAgentIT.scala b/src/test/scala/edu/ie3/simona/agent/em/EmAgentIT.scala index 23c1522744..901d628e4a 100644 --- a/src/test/scala/edu/ie3/simona/agent/em/EmAgentIT.scala +++ b/src/test/scala/edu/ie3/simona/agent/em/EmAgentIT.scala @@ -31,10 +31,12 @@ import edu.ie3.simona.ontology.messages.ServiceMessage.{ SecondaryServiceRegistrationMessage, } import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.ontology.messages.ResultMessage.RequestResult import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.Data.SecondaryData.WeatherData import edu.ie3.simona.service.ServiceType import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherService.WeatherRegistrationData import edu.ie3.simona.service.weather.{WeatherDataType, WeatherService} import edu.ie3.simona.test.common.TestSpawnerTyped @@ -105,7 +107,8 @@ class EmAgentIT "having load, pv and storage agents connected" should { "be initialized correctly and run through some activations" in { val gridAgent = TestProbe[GridAgent.Message]("GridAgent") - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultServiceProxy = + TestProbe[ResultEvent | ExpectResult]("ResultServiceProxy") val primaryServiceProxy = TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") val weatherService = TestProbe[WeatherService.Message]("WeatherService") @@ -114,8 +117,8 @@ class EmAgentIT given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> weatherService.ref), - resultListener = Iterable(resultListener.ref), ) val keys = ScheduleLock @@ -133,7 +136,8 @@ class EmAgentIT "PRIORITIZED", simulationStartDate, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultServiceProxy.ref, + None, ), "EmAgent", ) @@ -244,14 +248,14 @@ class EmAgentIT Some(7200), ) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 0L.toDateTime emResult.getP should equalWithTolerance(-0.00057340027.asMegaWatt) emResult.getQ should equalWithTolerance(-0.0018318880807.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(7200))) /* TICK 7200 @@ -275,14 +279,14 @@ class EmAgentIT Some(14400), ) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 7200.toDateTime emResult.getP should equalWithTolerance(0.asMegaWatt) emResult.getQ should equalWithTolerance(-0.00113292701968.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(13246))) /* TICK 13246 @@ -294,14 +298,14 @@ class EmAgentIT */ emAgentActivation ! Activation(13246) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 13246.toDateTime emResult.getP should equalWithTolerance(-0.00344685673.asMegaWatt) emResult.getQ should equalWithTolerance(-0.001132927.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(14400))) /* TICK 14400 @@ -328,14 +332,14 @@ class EmAgentIT emAgentActivation ! Activation(14400) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 14400.toDateTime emResult.getP should equalWithTolerance(0.asMegaWatt) emResult.getQ should equalWithTolerance(0.000065375.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(21600))) } } @@ -343,7 +347,8 @@ class EmAgentIT "having load, pv and heat pump agents connected" should { "be initialized correctly and run through some activations" in { val gridAgent = TestProbe[GridAgent.Message]("GridAgent") - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultServiceProxy = + TestProbe[ResultEvent | ExpectResult]("ResultServiceProxy") val primaryServiceProxy = TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") val weatherService = TestProbe[WeatherService.Message]("WeatherService") @@ -352,8 +357,8 @@ class EmAgentIT given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> weatherService.ref), - resultListener = Iterable(resultListener.ref), ) val keys = ScheduleLock @@ -371,7 +376,8 @@ class EmAgentIT "PRIORITIZED", simulationStartDate, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultServiceProxy.ref, + None, ), "EmAgent1", ) @@ -502,14 +508,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 0.toDateTime emResult.getP should equalWithTolerance(-0.0055734002706.asMegaWatt) emResult.getQ should equalWithTolerance(-0.0018318880807.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(7200))) /* TICK 7200 @@ -535,14 +541,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 7200.toDateTime emResult.getP should equalWithTolerance(0.001403143271.asMegaWatt) emResult.getQ should equalWithTolerance(-0.00014809252.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(10800))) /* TICK 10800 @@ -568,14 +574,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 10800.toDateTime emResult.getP should equalWithTolerance(0.0011098586291.asMegaWatt) emResult.getQ should equalWithTolerance(-0.000244490516.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(11000))) /* TICK 11000 @@ -603,14 +609,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 11000.toDateTime emResult.getP should equalWithTolerance(0.0050603789402.asMegaWatt) emResult.getQ should equalWithTolerance(0.0010539827178.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(11500))) /* TICK 11500 @@ -638,14 +644,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 11500.toDateTime emResult.getP should equalWithTolerance(0.00013505248.asMegaWatt) emResult.getQ should equalWithTolerance(0.000044389603878.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(28800))) } } @@ -653,7 +659,8 @@ class EmAgentIT "having a pv and a load agent connected" should { "have correct values also for agents with limited operation time" in { val gridAgent = TestProbe[GridAgent.Message]("GridAgent") - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultServiceProxy = + TestProbe[ResultEvent | ExpectResult]("ResultServiceProxy") val primaryServiceProxy = TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") val weatherService = TestProbe[WeatherService.Message]("WeatherService") @@ -662,8 +669,8 @@ class EmAgentIT given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> weatherService.ref), - resultListener = Iterable(resultListener.ref), ) val keys = ScheduleLock @@ -681,7 +688,8 @@ class EmAgentIT "PRIORITIZED", simulationStartDate, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultServiceProxy.ref, + None, ), "EmAgentReactivePower", ) @@ -783,14 +791,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 0.toDateTime emResult.getP should equalWithTolerance(0.000268603.asMegaWatt) emResult.getQ should equalWithTolerance(0.0000882855367.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(3600))) /* TICK 3600 @@ -814,14 +822,14 @@ class EmAgentIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 3600.toDateTime emResult.getP should equalWithTolerance(0.000268603.asMegaWatt) emResult.getQ should equalWithTolerance(0.0000882855367.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(7200))) /* TICK 7200 @@ -845,14 +853,14 @@ class EmAgentIT emAgentActivation ! Activation(7200) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 7200.toDateTime emResult.getP should equalWithTolerance(-0.008423564.asMegaWatt) emResult.getQ should equalWithTolerance(-0.0027686916118.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(10800))) /* TICK 10800 @@ -861,14 +869,14 @@ class EmAgentIT -> expect P and Q values of PV */ emAgentActivation ! Activation(10800) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 10800.toDateTime emResult.getP should equalWithTolerance(-0.008692167.asMegaWatt) emResult.getQ should equalWithTolerance(-0.00285697715.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(14400))) /* TICK 14400 @@ -877,14 +885,14 @@ class EmAgentIT -> expect P: 0 W Q: 0 var */ emAgentActivation ! Activation(14400) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 14400.toDateTime emResult.getP should equalWithTolerance(0.asMegaWatt) emResult.getQ should equalWithTolerance(0.asMegaVar) } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, None)) } } diff --git a/src/test/scala/edu/ie3/simona/agent/em/EmAgentSpec.scala b/src/test/scala/edu/ie3/simona/agent/em/EmAgentSpec.scala index 80bd7813cf..71df0986c5 100644 --- a/src/test/scala/edu/ie3/simona/agent/em/EmAgentSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/em/EmAgentSpec.scala @@ -67,7 +67,7 @@ class EmAgentSpec "A self-optimizing EM agent" should { "be initialized correctly and run through some activations" in { - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultProxy = TestProbe[ResultEvent]("ResultProxy") val scheduler = TestProbe[SchedulerMessage]("Scheduler") val emAgent = spawn( @@ -78,7 +78,8 @@ class EmAgentSpec "PRIORITIZED", simulationStart, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultProxy.ref, + None, ) ) @@ -116,7 +117,7 @@ class EmAgentSpec ) // expect no results for init - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // expect completion from EmAgent scheduler.expectMessage( Completion(emAgent, Some(0)) @@ -178,7 +179,7 @@ class EmAgentSpec ) // expect correct results - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 0.toDateTime @@ -187,7 +188,7 @@ class EmAgentSpec flexResult.getpMax() should equalWithTolerance(.006.asMegaWatt) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe simulationStart @@ -232,7 +233,7 @@ class EmAgentSpec emAgent ! FlexCompletion(modelUuid = evcsInput.getUuid) // expect correct results - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 300.toDateTime @@ -241,7 +242,7 @@ class EmAgentSpec flexResult.getpMax() should equalWithTolerance(-.005.asMegaWatt) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 300.toDateTime @@ -255,7 +256,7 @@ class EmAgentSpec } "revoke triggers correctly" in { - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultProxy = TestProbe[ResultEvent]("ResultProxy") val scheduler = TestProbe[SchedulerMessage]("Scheduler") val emAgent = spawn( @@ -266,7 +267,8 @@ class EmAgentSpec "PRIORITIZED", simulationStart, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultProxy.ref, + None, ) ) @@ -344,7 +346,7 @@ class EmAgentSpec ) // expect correct results - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 0.toDateTime @@ -353,7 +355,7 @@ class EmAgentSpec flexResult.getpMax() should equalWithTolerance(.006.asMegaWatt) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe simulationStart @@ -414,7 +416,7 @@ class EmAgentSpec ) // expect correct results - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 300.toDateTime @@ -423,7 +425,7 @@ class EmAgentSpec flexResult.getpMax() should equalWithTolerance(.008.asMegaWatt) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 300.toDateTime @@ -438,7 +440,7 @@ class EmAgentSpec } "handle ChangingFlexOptions indicator correctly" in { - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultProxy = TestProbe[ResultEvent]("ResultProxy") val scheduler = TestProbe[SchedulerMessage]("Scheduler") val emAgent = spawn( @@ -449,7 +451,8 @@ class EmAgentSpec "PRIORITIZED", simulationStart, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultProxy.ref, + None, ) ) @@ -528,7 +531,7 @@ class EmAgentSpec requestAtTick = Some(600), ) - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 0.toDateTime @@ -537,7 +540,7 @@ class EmAgentSpec flexResult.getpMax() should equalWithTolerance(.006.asMegaWatt) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 0.toDateTime @@ -607,7 +610,7 @@ class EmAgentSpec ) // expect correct results - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 300.toDateTime @@ -616,7 +619,7 @@ class EmAgentSpec flexResult.getpMax() should equalWithTolerance(.008.asMegaWatt) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 300.toDateTime @@ -632,8 +635,7 @@ class EmAgentSpec "An EM-controlled EM agent" should { "be initialized correctly and run through some activations" in { - val resultListener = TestProbe[ResultEvent]("ResultListener") - + val resultProxy = TestProbe[ResultEvent]("ResultProxy") val parentEmAgent = TestProbe[FlexResponse]("ParentEmAgent") val emAgent = spawn( @@ -644,7 +646,8 @@ class EmAgentSpec "PRIORITIZED", simulationStart, parent = Right(parentEmAgent.ref), - listener = Iterable(resultListener.ref), + listener = resultProxy.ref, + None, ) ) @@ -685,7 +688,7 @@ class EmAgentSpec ) // expect no results for init - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // expect completion from EmAgent parentEmAgent.expectMessage( FlexCompletion( @@ -723,7 +726,7 @@ class EmAgentSpec ), ) - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(flexResult) => flexResult.getInputModel shouldBe emInput.getUuid flexResult.getTime shouldBe 0.toDateTime @@ -780,7 +783,7 @@ class EmAgentSpec ) // expect correct results - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 0.toDateTime @@ -830,7 +833,7 @@ class EmAgentSpec ) // expect correct results - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(emResult: EmResult) => emResult.getInputModel shouldBe emInput.getUuid emResult.getTime shouldBe 150.toDateTime diff --git a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmCenGridSpec.scala b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmCenGridSpec.scala index f1fbc69c05..35bf9e3b4b 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmCenGridSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmCenGridSpec.scala @@ -25,6 +25,8 @@ import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.load.LoadProfileService import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherService import edu.ie3.simona.test.common.model.grid.DbfsTestGrid import edu.ie3.simona.test.common.{ConfigTestData, TestSpawnerTyped} @@ -37,6 +39,7 @@ import org.apache.pekko.actor.testkit.typed.scaladsl.{ import squants.electro.Kilovolts import squants.energy.Megawatts +import java.util.UUID import scala.language.postfixOps /** Test to ensure the functions that a [[GridAgent]] in center position should @@ -60,6 +63,7 @@ class DBFSAlgorithmCenGridSpec ) private val primaryService = TestProbe[PrimaryServiceProxy.Message]("primaryService") + private val resultProxy = TestProbe[ResultServiceProxy.Message]("resultProxy") private val weatherService = TestProbe[WeatherService.Message]("weatherService") private val loadProfileService = @@ -85,24 +89,17 @@ class DBFSAlgorithmCenGridSpec scheduler = scheduler.ref, runtimeEventListener = runtimeEvents.ref, primaryServiceProxy = primaryService.ref, + resultProxy = resultProxy.ref, weather = weatherService.ref, loadProfiles = loadProfileService.ref, emDataService = None, evDataService = None, ) - val resultListener: TestProbe[ResultEvent] = TestProbe("resultListener") - "A GridAgent actor in center position with async test" should { val centerGridAgent = - testKit.spawn( - GridAgent( - environmentRefs, - simonaConfig, - listener = Iterable(resultListener.ref), - ) - ) + testKit.spawn(GridAgent(environmentRefs, simonaConfig)) s"initialize itself when it receives an init activation" in { @@ -157,6 +154,17 @@ class DBFSAlgorithmCenGridSpec // send the start grid simulation trigger centerGridAgent ! Activation(3600) + resultProxy.expectMessageType[ExpectResult] match { + case ExpectResult(assets, tick) => + assets match { + case uuids: Seq[UUID] => + uuids.toSet shouldBe assetsHv.toSet + case uuid: UUID => + fail(s"Received uuid $uuid, but expected grid asset uuids.") + } + tick shouldBe 3600 + } + /* We expect one grid power request message per inferior grid */ val firstPowerRequestSender11 = inferiorGrid11.expectGridPowerRequest() @@ -442,7 +450,7 @@ class DBFSAlgorithmCenGridSpec // after all grids have received a FinishGridSimulationTrigger, the scheduler should receive a Completion scheduler.expectMessageType[Completion].newTick shouldBe Some(7200) - val resultMessage = resultListener.expectMessageType[ResultEvent] + val resultMessage = resultProxy.expectMessageType[ResultEvent] resultMessage match { case powerFlowResultEvent: PowerFlowResultEvent => // we expect results for 4 nodes, 5 lines and 2 transformer2ws diff --git a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmFailedPowerFlowSpec.scala b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmFailedPowerFlowSpec.scala index 40e102d0e9..2b07656af0 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmFailedPowerFlowSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmFailedPowerFlowSpec.scala @@ -14,7 +14,7 @@ import edu.ie3.simona.agent.grid.GridAgentMessages.Responses.{ ExchangePower, ExchangeVoltage, } -import edu.ie3.simona.event.{ResultEvent, RuntimeEvent} +import edu.ie3.simona.event.RuntimeEvent import edu.ie3.simona.model.grid.{RefSystem, VoltageLimits} import edu.ie3.simona.ontology.messages.SchedulerMessage.{ Completion, @@ -24,6 +24,8 @@ import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.load.LoadProfileService import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherService import edu.ie3.simona.test.common.model.grid.DbfsTestGrid import edu.ie3.simona.test.common.{ConfigTestData, TestSpawnerTyped} @@ -38,6 +40,7 @@ import org.apache.pekko.actor.typed.scaladsl.adapter.TypedActorRefOps import squants.electro.Kilovolts import squants.energy.Megawatts +import java.util.UUID import scala.concurrent.duration.DurationInt import scala.language.postfixOps @@ -54,6 +57,7 @@ class DBFSAlgorithmFailedPowerFlowSpec ) private val primaryService = TestProbe[PrimaryServiceProxy.Message]("primaryService") + private val resultProxy = TestProbe[ResultServiceProxy.Message]("resultProxy") private val weatherService = TestProbe[WeatherService.Message]("weatherService") private val loadProfileService = @@ -71,27 +75,20 @@ class DBFSAlgorithmFailedPowerFlowSpec scheduler = scheduler.ref, runtimeEventListener = runtimeEvents.ref, primaryServiceProxy = primaryService.ref, + resultProxy = resultProxy.ref, weather = weatherService.ref, loadProfiles = loadProfileService.ref, emDataService = None, evDataService = None, ) - val resultListener: TestProbe[ResultEvent] = TestProbe("resultListener") - "A GridAgent actor in center position with async test" should { // since the grid agent is stopped after a failed power flow // we need to initialize the agent for each test def initAndGoToSimulateGrid: ActorRef[GridAgent.Message] = { val centerGridAgent = - testKit.spawn( - GridAgent( - environmentRefs, - simonaConfig, - listener = Iterable(resultListener.ref), - ) - ) + testKit.spawn(GridAgent(environmentRefs, simonaConfig)) // this subnet has 1 superior grid (ehv) and 3 inferior grids (mv). Map the gates to test probes accordingly val subGridGateToActorRef = hvSubGridGatesPF.map { @@ -144,6 +141,18 @@ class DBFSAlgorithmFailedPowerFlowSpec // send the start grid simulation trigger centerGridAgent ! Activation(3600) + + resultProxy.expectMessageType[ExpectResult] match { + case ExpectResult(assets, tick) => + assets match { + case uuids: Seq[UUID] => + uuids.toSet shouldBe assetsHvPF.toSet + case uuid: UUID => + fail(s"Received uuid $uuid, but expected grid asset uuids.") + } + tick shouldBe 3600 + } + // we expect a request for grid power values here for sweepNo $sweepNo val powerRequestSender = inferiorGridAgent.expectGridPowerRequest() @@ -214,7 +223,7 @@ class DBFSAlgorithmFailedPowerFlowSpec // after all grids have received a FinishGridSimulationTrigger, the scheduler should receive a Completion scheduler.expectMessageType[Completion].newTick shouldBe Some(7200) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // PowerFlowFailed events are only sent by the slack subgrid runtimeEvents.expectNoMessage() @@ -228,6 +237,17 @@ class DBFSAlgorithmFailedPowerFlowSpec // send the start grid simulation trigger centerGridAgent ! Activation(3600) + resultProxy.expectMessageType[ExpectResult] match { + case ExpectResult(assets, tick) => + assets match { + case uuids: Seq[UUID] => + uuids.toSet shouldBe assetsHvPF.toSet + case uuid: UUID => + fail(s"Received uuid $uuid, but expected grid asset uuids.") + } + tick shouldBe 3600 + } + // we expect a request for grid power values here for sweepNo 0 val powerRequestSender = inferiorGridAgent.expectGridPowerRequest() @@ -286,7 +306,7 @@ class DBFSAlgorithmFailedPowerFlowSpec // after all grids have received a FinishGridSimulationTrigger, the scheduler should receive a Completion scheduler.expectMessageType[Completion].newTick shouldBe Some(7200) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // PowerFlowFailed events are only sent by the slack subgrid runtimeEvents.expectNoMessage() @@ -303,7 +323,6 @@ class DBFSAlgorithmFailedPowerFlowSpec GridAgent( environmentRefs, simonaConfig, // stopOnFailure is enabled - listener = Iterable(resultListener.ref), ) ) @@ -342,6 +361,17 @@ class DBFSAlgorithmFailedPowerFlowSpec // send the start grid simulation trigger slackGridAgent ! Activation(3600) + resultProxy.expectMessageType[ExpectResult] match { + case ExpectResult(assets, tick) => + assets match { + case uuids: Seq[UUID] => + uuids.toSet shouldBe assetsEhv.toSet + case uuid: UUID => + fail(s"Received uuid $uuid, but expected grid asset uuids.") + } + tick shouldBe 3600 + } + val powerRequestSender = hvGridAgent.expectGridPowerRequest() // normally the inferior grid agents ask for the slack voltage as well to run their power flow calculation @@ -377,7 +407,7 @@ class DBFSAlgorithmFailedPowerFlowSpec hvGridAgent.gaProbe.expectNoMessage() scheduler.expectNoMessage() - resultListener.expectNoMessage() + resultProxy.expectNoMessage() } } diff --git a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmParticipantSpec.scala b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmParticipantSpec.scala index 078fa0c8ab..dac8b8b58c 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmParticipantSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmParticipantSpec.scala @@ -26,6 +26,7 @@ import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.load.LoadProfileService import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy import edu.ie3.simona.service.weather.WeatherService import edu.ie3.simona.test.common.model.grid.DbfsTestGridWithParticipants import edu.ie3.simona.test.common.{ConfigTestData, TestSpawnerTyped} @@ -53,6 +54,7 @@ class DBFSAlgorithmParticipantSpec TestProbe("runtimeEvents") private val primaryService = TestProbe[PrimaryServiceProxy.Message]("primaryService") + private val resultProxy = TestProbe[ResultServiceProxy.Message]("resultProxy") private val weatherService = TestProbe[WeatherService.Message]("weatherService") private val loadProfileService = @@ -62,15 +64,13 @@ class DBFSAlgorithmParticipantSpec scheduler = scheduler.ref, runtimeEventListener = runtimeEvents.ref, primaryServiceProxy = primaryService.ref, + resultProxy = resultProxy.ref, weather = weatherService.ref, loadProfiles = loadProfileService.ref, emDataService = None, evDataService = None, ) - protected val resultListener: TestProbe[ResultEvent] = - TestProbe("resultListener") - private val superiorGridAgent = SuperiorGA( TestProbe("superiorGridAgent_1000"), Seq(supNodeA.getUuid), @@ -81,7 +81,6 @@ class DBFSAlgorithmParticipantSpec GridAgent( environmentRefs, simonaConfig, - Iterable(resultListener.ref), ) ) diff --git a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmSupGridSpec.scala b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmSupGridSpec.scala index 5b85249d1b..f5226052f2 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmSupGridSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/DBFSAlgorithmSupGridSpec.scala @@ -23,6 +23,8 @@ import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.load.LoadProfileService import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherService import edu.ie3.simona.test.common.model.grid.DbfsTestGrid import edu.ie3.simona.test.common.{ConfigTestData, TestSpawnerTyped, UnitSpec} @@ -55,6 +57,7 @@ class DBFSAlgorithmSupGridSpec private val runtimeEvents = TestProbe[RuntimeEvent]("runtimeEvents") private val primaryService = TestProbe[PrimaryServiceProxy.Message]("primaryService") + private val resultProxy = TestProbe[ResultServiceProxy.Message]("resultProxy") private val weatherService = TestProbe[WeatherService.Message]("weatherService") private val loadProfileService = @@ -65,20 +68,18 @@ class DBFSAlgorithmSupGridSpec scheduler = scheduler.ref, runtimeEventListener = runtimeEvents.ref, primaryServiceProxy = primaryService.ref, + resultProxy = resultProxy.ref, weather = weatherService.ref, loadProfiles = loadProfileService.ref, emDataService = None, evDataService = None, ) - val resultListener: TestProbe[ResultEvent] = TestProbe("resultListener") - "A GridAgent actor in superior position with async test" should { val superiorGridAgentFSM: ActorRef[GridAgent.Message] = testKit.spawn( GridAgent( environmentRefs, simonaConfig, - listener = Iterable(resultListener.ref), ) ) @@ -162,8 +163,9 @@ class DBFSAlgorithmSupGridSpec case Completion(_, Some(3600)) => // we expect another completion message when the agent is in SimulateGrid again case Completion(_, Some(7200)) => + resultProxy.expectMessageType[ExpectResult] // agent should be in Idle again and listener should contain power flow result data - val resultMessage = resultListener.expectMessageType[ResultEvent] + val resultMessage = resultProxy.expectMessageType[ResultEvent] resultMessage match { case powerFlowResultEvent: PowerFlowResultEvent => @@ -282,9 +284,9 @@ class DBFSAlgorithmSupGridSpec // when we received a FinishGridSimulationTrigger (as inferior grid agent) // we expect another completion message then as well (scheduler view) case Completion(_, Some(7200)) => + resultProxy.expectMessageType[ExpectResult] // after doing cleanup stuff, our agent should go back to idle again and listener should contain power flow result data - val resultMessage = - resultListener.expectMessageType[ResultEvent] + val resultMessage = resultProxy.expectMessageType[ResultEvent] resultMessage match { case powerFlowResultEvent: PowerFlowResultEvent => diff --git a/src/test/scala/edu/ie3/simona/agent/grid/GridAgentSetupSpec.scala b/src/test/scala/edu/ie3/simona/agent/grid/GridAgentSetupSpec.scala index 39d0b02811..1c9f2657f8 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/GridAgentSetupSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/GridAgentSetupSpec.scala @@ -39,7 +39,6 @@ class GridAgentSetupSpec gridContainer.getSubGridTopologyGraph, ctx, mock[EnvironmentRefs], - Seq.empty, ) Behaviors.stopped @@ -66,7 +65,6 @@ class GridAgentSetupSpec threeWindingTestGrid.getSubGridTopologyGraph, ctx, mock[EnvironmentRefs], - Seq.empty, ) Behaviors.stopped diff --git a/src/test/scala/edu/ie3/simona/agent/grid/ThermalGridIT.scala b/src/test/scala/edu/ie3/simona/agent/grid/ThermalGridIT.scala index bc0e214e8b..5f6547d1f4 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/ThermalGridIT.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/ThermalGridIT.scala @@ -35,10 +35,12 @@ import edu.ie3.simona.ontology.messages.ServiceMessage.{ SecondaryServiceRegistrationMessage, } import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.ontology.messages.ResultMessage.RequestResult import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.Data.SecondaryData.WeatherData import edu.ie3.simona.service.ServiceType import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherService.WeatherRegistrationData import edu.ie3.simona.service.weather.{WeatherDataType, WeatherService} import edu.ie3.simona.test.common.TestSpawnerTyped @@ -117,7 +119,8 @@ class ThermalGridIT ) val gridAgent = TestProbe[GridAgent.Message]("GridAgent") - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultServiceProxy = + TestProbe[ResultEvent | ExpectResult]("ResultProxy") val scheduler: TestProbe[SchedulerMessage] = TestProbe("scheduler") val primaryServiceProxy = TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") @@ -126,8 +129,8 @@ class ThermalGridIT given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> weatherService.ref), - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -186,7 +189,7 @@ class ThermalGridIT val weatherDependentAgents = Seq(hpAgent) scheduler.expectMessage(Completion(heatPumpAgent, Some(0))) - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() /* TICK 0 Start of Simulation @@ -212,7 +215,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -246,7 +249,7 @@ class ThermalGridIT energy should equalWithTolerance(0.asMegaWattHour) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(3416))) /* TICK 3416 @@ -259,7 +262,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -294,7 +297,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(3600))) /* TICK 3600 @@ -319,7 +322,7 @@ class ThermalGridIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(hpResult) => hpResult.getInputModel shouldBe typicalHpInputModel.getUuid hpResult.getTime shouldBe 3600.toDateTime @@ -332,7 +335,7 @@ class ThermalGridIT // Since this activation is caused by new weather data, we don't expect any // message for house or heat storage since there is no change of their operating // point nor one of it reached any boundary. - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(4412))) /* TICK 4412 @@ -345,7 +348,7 @@ class ThermalGridIT Range(0, 2) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -370,7 +373,7 @@ class ThermalGridIT indoorTemp should equalWithTolerance(19.99.asDegreeCelsius) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(21600))) /* TICK 21600 @@ -396,7 +399,7 @@ class ThermalGridIT ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(hpResult) => hpResult.getInputModel shouldBe typicalHpInputModel.getUuid hpResult.getTime shouldBe 21600.toDateTime @@ -407,7 +410,7 @@ class ThermalGridIT // Since this activation is caused by new weather data, we don't expect any // message for house or heat storage since there is no change of their operating // point nor one of it reached any boundary. - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(23288))) /* TICK 23288 @@ -420,7 +423,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -455,7 +458,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(25000))) /* TICK 25000 @@ -480,7 +483,8 @@ class ThermalGridIT Some(28000), ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(hpResult) => hpResult.getInputModel shouldBe typicalHpInputModel.getUuid hpResult.getTime shouldBe 25000.toDateTime @@ -491,8 +495,8 @@ class ThermalGridIT // Since this activation is caused by new weather data, we don't expect any // message for house or heat storage since there is no change of their operating // point nor one of it reached any boundary. - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(heatPumpAgent, Some(26704))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(heatPumpAgent, Some(26887))) /* TICK 26704 Heat storage is empty @@ -504,7 +508,7 @@ class ThermalGridIT Range(0, 2) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -529,7 +533,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(heatPumpAgent, Some(28000))) /* TICK 28000 @@ -553,7 +557,7 @@ class ThermalGridIT Some(151200), ) } - resultListener.expectMessageType[ParticipantResultEvent] match { + resultServiceProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(hpResult) => hpResult.getInputModel shouldBe typicalHpInputModel.getUuid hpResult.getTime shouldBe 28000.toDateTime @@ -564,8 +568,8 @@ class ThermalGridIT // Since this activation is caused by new weather data, we don't expect any // message for house or storage since there is no change of their operating // point nor one of it reached any boundary. - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(heatPumpAgent, Some(31837))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(heatPumpAgent, Some(32043))) /* TICK 31837 House will reach the upperTemperatureBoundary @@ -577,7 +581,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -612,8 +616,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(heatPumpAgent, Some(35253))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(heatPumpAgent, Some(35459))) /* TICK 35253 Storage will be fully charged, but meanwhile the house cooled a bit @@ -625,7 +629,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -660,8 +664,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(heatPumpAgent, Some(35788))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(heatPumpAgent, Some(35995))) /* TICK 35788 Neither house nor heat storage have any demand @@ -673,7 +677,7 @@ class ThermalGridIT Range(0, 2) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -699,8 +703,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(heatPumpAgent, Some(74421))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(heatPumpAgent, Some(74629))) } } @@ -719,7 +723,8 @@ class ThermalGridIT ) val gridAgent = TestProbe[GridAgent.Message]("GridAgent") - val resultListener: TestProbe[ResultEvent] = TestProbe("resultListener") + val resultServiceProxy: TestProbe[ResultEvent | ExpectResult] = + TestProbe("resultServiceProxy") val scheduler: TestProbe[SchedulerMessage] = TestProbe("scheduler") val primaryServiceProxy = TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") @@ -728,8 +733,8 @@ class ThermalGridIT given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> weatherService.ref), - resultListener = Iterable(resultListener.ref), ) val keys = ScheduleLock @@ -747,7 +752,8 @@ class ThermalGridIT "PRIORITIZED", simulationStartWithPv, parent = Left(scheduler.ref), - listener = Iterable(resultListener.ref), + listener = resultServiceProxy.ref, + None, ), "EmAgent", ) @@ -844,7 +850,7 @@ class ThermalGridIT weatherService.ref, 0L, ) - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(0))) val weatherDependentAgents = Seq(hpAgent.toClassic, pvAgent.toClassic) @@ -874,7 +880,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -913,7 +919,7 @@ class ThermalGridIT energy should equalWithTolerance(0.asMegaWattHour) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(1800))) /* TICK 1800 @@ -941,7 +947,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -975,7 +981,7 @@ class ThermalGridIT energy should equalWithTolerance(0.asMegaWattHour) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(5216))) /* TICK 5216 @@ -989,7 +995,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1029,7 +1035,7 @@ class ThermalGridIT energy should equalWithTolerance(0.01044.asMegaWattHour) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(5400))) /* TICK 5400 @@ -1057,7 +1063,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1087,8 +1093,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(6731))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(6824))) /* TICK 6731 The house reaches target temperature @@ -1101,7 +1107,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1141,7 +1147,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(9200))) /* TICK 9200 @@ -1169,7 +1175,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1199,8 +1205,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(10531))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(10551))) /* TICK 10531 Storage is full, now heating the house till target temperature. @@ -1213,7 +1219,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1253,7 +1259,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(11638))) /* TICK 11638 @@ -1267,7 +1273,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1297,7 +1303,7 @@ class ThermalGridIT indoorTemp should equalWithTolerance(19.99.asDegreeCelsius) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(12000))) /* TICK 12000 @@ -1326,7 +1332,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1356,7 +1362,7 @@ class ThermalGridIT indoorTemp should equalWithTolerance(19.96.asDegreeCelsius) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(12139))) /* TICK 12139 @@ -1370,7 +1376,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1400,7 +1406,7 @@ class ThermalGridIT indoorTemp should equalWithTolerance(20.asDegreeCelsius) } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(12500))) /* TICK 12500 @@ -1429,7 +1435,7 @@ class ThermalGridIT Range(0, 2) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => participantResult match { @@ -1445,8 +1451,8 @@ class ThermalGridIT emResult._4 should equalWithTolerance(0.asMegaVar) } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(24412))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(24413))) /* TICK 24412 House reaches lower boundary, since we don't have surplus energy from pv, we would use the energy from storage to heat the house. @@ -1459,7 +1465,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1499,7 +1505,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(25200))) /* TICK 25200 @@ -1527,7 +1533,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1557,7 +1563,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(27500))) /* TICK 27500 @@ -1585,7 +1591,7 @@ class ThermalGridIT Range(0, 2) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => participantResult match { @@ -1601,8 +1607,8 @@ class ThermalGridIT emResult._4 should equalWithTolerance(-0.00002100176.asMegaVar) } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(30872))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(30923))) /* TICK 30872 House reaches target temperature, since Hp is running we now charge the storage. @@ -1615,7 +1621,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1657,7 +1663,7 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() + resultServiceProxy.expectNoMessage() scheduler.expectMessage(Completion(emAgentActivation, Some(31000))) /* TICK 31000 @@ -1685,7 +1691,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1715,8 +1721,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(40942))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(40964))) /* TICK 40942 House reach lower temperature boundary @@ -1729,7 +1735,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1769,8 +1775,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(43698))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(43858))) /* TICK 43698 Storage is empty now. @@ -1787,7 +1793,7 @@ class ThermalGridIT Range(0, 4) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1827,8 +1833,8 @@ class ThermalGridIT case _ => fail("Unexpected thermal unit result") } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(46631))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(46635))) /* TICK 46631 House will reach the lower temperature boundary @@ -1841,7 +1847,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1871,8 +1877,8 @@ class ThermalGridIT indoorTemp should equalWithTolerance(18.asDegreeCelsius) } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(56274))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(56278))) /* TICK 56274 House will reach target temperature @@ -1885,7 +1891,7 @@ class ThermalGridIT Range(0, 3) .map { _ => - resultListener.expectMessageType[ResultEvent] + resultServiceProxy.expectMessageType[ResultEvent] } .foreach { case ParticipantResultEvent(participantResult) => @@ -1915,8 +1921,8 @@ class ThermalGridIT indoorTemp should equalWithTolerance(20.asDegreeCelsius) } } - resultListener.expectNoMessage() - scheduler.expectMessage(Completion(emAgentActivation, Some(66274))) + resultServiceProxy.expectNoMessage() + scheduler.expectMessage(Completion(emAgentActivation, Some(66279))) } } } diff --git a/src/test/scala/edu/ie3/simona/agent/grid/congestion/CongestionTestBaseData.scala b/src/test/scala/edu/ie3/simona/agent/grid/congestion/CongestionTestBaseData.scala index d6e856bce5..613bce682c 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/congestion/CongestionTestBaseData.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/congestion/CongestionTestBaseData.scala @@ -19,6 +19,7 @@ import edu.ie3.simona.model.grid.RefSystem import edu.ie3.simona.ontology.messages.SchedulerMessage import edu.ie3.simona.service.load.LoadProfileService import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy import edu.ie3.simona.service.weather.WeatherService import edu.ie3.simona.test.common.result.CongestedComponentsTestData import edu.ie3.simona.test.common.{ConfigTestData, TestSpawnerTyped} @@ -66,6 +67,9 @@ trait CongestionTestBaseData TestProbe( "primaryService" ) + protected val resultProxy: TestProbe[ResultServiceProxy.Message] = TestProbe( + "resultServiceProxy" + ) protected val weatherService: TestProbe[WeatherService.Message] = TestProbe( "weatherService" ) @@ -78,21 +82,17 @@ trait CongestionTestBaseData scheduler = scheduler.ref, runtimeEventListener = runtimeEvents.ref, primaryServiceProxy = primaryService.ref, + resultProxy = resultProxy.ref, weather = weatherService.ref, loadProfiles = loadProfileService.ref, emDataService = None, evDataService = None, ) - protected val resultListener: TestProbe[ResultEvent] = TestProbe( - "resultListener" - ) - protected implicit val constantData: GridAgentConstantData = GridAgentConstantData( environmentRefs, simonaConfig, - Iterable(resultListener.ref), 3600, startTime, endTime, diff --git a/src/test/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithmSpec.scala b/src/test/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithmSpec.scala index fde0c5b5ca..9e925102ee 100644 --- a/src/test/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithmSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/grid/congestion/DCMAlgorithmSpec.scala @@ -88,7 +88,7 @@ class DCMAlgorithmSpec ) // we should receive an empty result event - resultListener.expectMessageType[PowerFlowResultEvent] match { + resultProxy.expectMessageType[PowerFlowResultEvent] match { case PowerFlowResultEvent( nodeResults, _, diff --git a/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentInitSpec.scala b/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentInitSpec.scala index 489f7b2693..f90e3feb4a 100644 --- a/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentInitSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentInitSpec.scala @@ -36,6 +36,7 @@ import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.Data.PrimaryData.ActivePowerExtra import edu.ie3.simona.service.ServiceType import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.service.weather.WeatherDataType import edu.ie3.simona.service.weather.WeatherService.WeatherRegistrationData import edu.ie3.simona.test.common.input.{LoadInputTestData, PvInputTestData} @@ -94,13 +95,13 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[PrimaryServiceProxy.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map.empty, - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -143,13 +144,13 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map.empty, - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -201,13 +202,13 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map.empty, - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -264,13 +265,13 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map.empty, - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -351,14 +352,14 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() val service = createTestProbe[Any]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> service.ref), - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -419,14 +420,14 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() val service = createTestProbe[Any]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> service.ref), - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -482,14 +483,14 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() val service = createTestProbe[Any]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> service.ref), - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) @@ -563,14 +564,14 @@ class ParticipantAgentInitSpec val gridAgent = createTestProbe[GridAgent.Message]() val primaryService = createTestProbe[Any]() - val resultListener = createTestProbe[ResultEvent]() + val resultServiceProxy = createTestProbe[ResultEvent | ExpectResult]() val service = createTestProbe[Any]() given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryService.ref, + resultServiceProxy = resultServiceProxy.ref, services = Map(ServiceType.WeatherService -> service.ref), - resultListener = Iterable(resultListener.ref), ) val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) diff --git a/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentSpec.scala b/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentSpec.scala index f511824c62..cc7f992c09 100644 --- a/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentSpec.scala +++ b/src/test/scala/edu/ie3/simona/agent/participant/ParticipantAgentSpec.scala @@ -44,6 +44,7 @@ import edu.ie3.simona.ontology.messages.{ ServiceMessage, } import edu.ie3.simona.service.Data.PrimaryData.{ActivePower, ActivePowerExtra} +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.test.common.UnitSpec import edu.ie3.simona.util.TickUtil.TickLong import edu.ie3.util.TimeUtil @@ -95,7 +96,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val scheduler = createTestProbe[SchedulerMessage]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val responseReceiver = createTestProbe[MockResponseMessage]() // no additional activation ticks @@ -117,10 +118,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Left(scheduler.ref)) ) @@ -134,7 +132,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(8 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -209,7 +207,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -245,7 +243,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val scheduler = createTestProbe[SchedulerMessage]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val responseReceiver = createTestProbe[MockResponseMessage]() // with additional activation ticks @@ -273,10 +271,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Left(scheduler.ref)) ) @@ -290,7 +285,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(8 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -323,13 +318,13 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { q should approximate(Kilovars(0.968644209676)) } - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! GridSimulationFinished(12 * 3600, 24 * 3600) // calculation should start now - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -353,7 +348,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -393,7 +388,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val scheduler = createTestProbe[SchedulerMessage]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val responseReceiver = createTestProbe[MockResponseMessage]() val service = createTestProbe[ServiceMessage]() @@ -422,10 +417,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Left(scheduler.ref) ) @@ -439,7 +431,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(0) // nothing should happen, still waiting for secondary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! DataProvision( @@ -450,7 +442,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // outside of operation interval, 0 MW - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe simulationStartDate @@ -476,7 +468,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { Some(12 * 3600), ) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() // TICK 8 * 3600: Start of operation interval @@ -489,7 +481,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(8 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -526,7 +518,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(12 * 3600, 24 * 3600) // nothing should happen, still waiting for secondary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! DataProvision( @@ -537,7 +529,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // calculation should start now - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -567,7 +559,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // no-op activation, thus no result expected - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // new data is expected at 18 hours scheduler.expectMessage( @@ -582,7 +574,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(18 * 3600) // nothing should happen, still waiting for secondary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! DataProvision( @@ -593,7 +585,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // calculation should start now - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (18 * 3600).toDateTime @@ -612,7 +604,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -644,7 +636,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(24 * 3600, 36 * 3600) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() } @@ -657,7 +649,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val scheduler = createTestProbe[SchedulerMessage]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val service = createTestProbe[ServiceMessage]() // no additional activation ticks @@ -666,6 +658,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val modelFactory = PrimaryDataParticipantModel.Factory( physicalModel, ActivePowerExtra, + 1.0, ) val participantAgent = spawn( @@ -684,10 +677,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Left(scheduler.ref) ) @@ -698,7 +688,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(0) // nothing should happen, still waiting for primary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! DataProvision( @@ -709,7 +699,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // outside of operation interval, 0 MW - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe simulationStartDate @@ -732,14 +722,14 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { Some(12 * 3600), ) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() // TICK 8 * 3600: Start of operation interval participantAgent ! Activation(8 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -773,7 +763,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(12 * 3600, 24 * 3600) // nothing should happen, still waiting for primary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! DataProvision( @@ -784,7 +774,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // calculation should start now - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -802,7 +792,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(18 * 3600) // nothing should happen, still waiting for primary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() participantAgent ! DataProvision( @@ -813,7 +803,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { ) // calculation should start now - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (18 * 3600).toDateTime @@ -829,7 +819,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! Activation(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -858,7 +848,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(24 * 3600, 36 * 3600) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() scheduler.expectNoMessage() } @@ -875,7 +865,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val em = createTestProbe[FlexResponse]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val responseReceiver = createTestProbe[MockResponseMessage]() // no additional activation ticks @@ -897,10 +887,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Right(em.ref) ) @@ -927,7 +914,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(3)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -938,7 +925,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssuePowerControl(8 * 3600, Kilowatts(3)) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -999,7 +986,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(0)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -1010,7 +997,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -1051,7 +1038,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val em = createTestProbe[FlexResponse]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val responseReceiver = createTestProbe[MockResponseMessage]() // with additional activation ticks @@ -1084,10 +1071,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Right(em.ref) ) @@ -1108,7 +1092,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(3)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -1119,7 +1103,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssuePowerControl(8 * 3600, Kilowatts(3)) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -1161,7 +1145,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { q should approximate(Kilovars(0.48432210483)) } - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! GridSimulationFinished(12 * 3600, 24 * 3600) @@ -1178,7 +1162,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(3)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -1189,7 +1173,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(12 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -1227,7 +1211,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(0)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -1238,7 +1222,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -1286,7 +1270,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val em = createTestProbe[FlexResponse]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val responseReceiver = createTestProbe[MockResponseMessage]() val service = createTestProbe[ServiceMessage]() @@ -1320,10 +1304,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Right(em.ref)) ) @@ -1335,7 +1316,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! FlexActivation(0) // nothing should happen, still waiting for secondary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! DataProvision( @@ -1356,7 +1337,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(0)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe simulationStartDate @@ -1368,7 +1349,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(0) // outside of operation interval, 0 MW - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe simulationStartDate @@ -1404,7 +1385,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { Some(12 * 3600), ) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() // TICK 8 * 3600: Start of operation interval @@ -1428,7 +1409,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(4)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -1439,7 +1420,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssuePowerControl(8 * 3600, Kilowatts(3)) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -1486,7 +1467,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(12 * 3600, 24 * 3600) // nothing should happen, still waiting for secondary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! DataProvision( @@ -1508,7 +1489,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(5)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -1519,7 +1500,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(12 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -1549,7 +1530,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! FlexActivation(18 * 3600) // nothing should happen, still waiting for secondary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! DataProvision( @@ -1571,7 +1552,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(8)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (18 * 3600).toDateTime @@ -1582,7 +1563,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(18 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (18 * 3600).toDateTime @@ -1623,7 +1604,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(0)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -1634,7 +1615,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -1673,7 +1654,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(24 * 3600, 36 * 3600) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() } @@ -1686,7 +1667,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val em = createTestProbe[FlexResponse]() val gridAgent = createTestProbe[GridAgent.Message]() - val resultListener = createTestProbe[ResultEvent]() + val resultProxy = createTestProbe[ResultEvent | ExpectResult]() val service = createTestProbe[ServiceMessage]() // no additional activation ticks @@ -1695,6 +1676,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { val modelFactory = PrimaryDataParticipantModel.Factory( physicalModel, ActivePowerExtra, + 1.0, ) val participantAgent = spawn( @@ -1713,10 +1695,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { expectedRequestTick = 12 * 3600, requestVoltageDeviationTolerance = Each(1e-14), ), - ParticipantResultHandler( - Iterable(resultListener.ref), - notifierConfig, - ), + ParticipantResultHandler(resultProxy.ref, notifierConfig), )(using Right(em.ref) ) @@ -1727,7 +1706,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! FlexActivation(0) // nothing should happen, still waiting for primary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! DataProvision( @@ -1748,7 +1727,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(0)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe simulationStartDate @@ -1760,7 +1739,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(0) // outside of operation interval, 0 MW - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe simulationStartDate @@ -1793,7 +1772,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { Some(12 * 3600), ) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() // TICK 8 * 3600: Start of operation interval @@ -1811,7 +1790,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(3)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -1822,7 +1801,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssuePowerControl(8 * 3600, Kilowatts(3)) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (8 * 3600).toDateTime @@ -1866,7 +1845,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(12 * 3600, 24 * 3600) // nothing should happen, still waiting for primary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! DataProvision( @@ -1888,7 +1867,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(6)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -1899,7 +1878,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(12 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (12 * 3600).toDateTime @@ -1926,7 +1905,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! FlexActivation(18 * 3600) // nothing should happen, still waiting for primary data... - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() participantAgent ! DataProvision( @@ -1948,7 +1927,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(3)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (18 * 3600).toDateTime @@ -1959,7 +1938,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(18 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (18 * 3600).toDateTime @@ -1996,7 +1975,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { max should approximate(Kilowatts(0)) } - resultListener.expectMessageType[FlexOptionsResultEvent] match { + resultProxy.expectMessageType[FlexOptionsResultEvent] match { case FlexOptionsResultEvent(result: FlexOptionsResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -2007,7 +1986,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! IssueNoControl(20 * 3600) - resultListener.expectMessageType[ParticipantResultEvent] match { + resultProxy.expectMessageType[ParticipantResultEvent] match { case ParticipantResultEvent(result: MockResult) => result.getInputModel shouldBe MockParticipantModel.uuid result.getTime shouldBe (20 * 3600).toDateTime @@ -2043,7 +2022,7 @@ class ParticipantAgentSpec extends ScalaTestWithActorTestKit with UnitSpec { participantAgent ! GridSimulationFinished(24 * 3600, 36 * 3600) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() em.expectNoMessage() } diff --git a/src/test/scala/edu/ie3/simona/event/listener/ResultEventListenerSpec.scala b/src/test/scala/edu/ie3/simona/event/listener/ResultListenerSpec.scala similarity index 75% rename from src/test/scala/edu/ie3/simona/event/listener/ResultEventListenerSpec.scala rename to src/test/scala/edu/ie3/simona/event/listener/ResultListenerSpec.scala index 742c3b4d5d..6a3704d2a1 100644 --- a/src/test/scala/edu/ie3/simona/event/listener/ResultEventListenerSpec.scala +++ b/src/test/scala/edu/ie3/simona/event/listener/ResultListenerSpec.scala @@ -23,6 +23,10 @@ import edu.ie3.simona.event.ResultEvent.{ import edu.ie3.simona.io.result.ResultSinkType.Csv import edu.ie3.simona.io.result.{ResultEntitySink, ResultSinkType} import edu.ie3.simona.logging.LogbackConfiguration +import edu.ie3.simona.service.results.{ + ThreeWindingResultTestData, + Transformer3wResultSupport, +} import edu.ie3.simona.test.common.result.PowerFlowResultData import edu.ie3.simona.test.common.{IOTestCommons, UnitSpec} import edu.ie3.simona.util.ResultFileHierarchy @@ -34,6 +38,7 @@ import org.apache.pekko.actor.testkit.typed.scaladsl.{ ScalaTestWithActorTestKit, } import org.apache.pekko.testkit.TestKit.awaitCond +import edu.ie3.simona.ontology.messages.ResultMessage.ResultResponse import java.io.{File, FileInputStream} import java.util.UUID @@ -44,7 +49,7 @@ import scala.concurrent.{Await, Future} import scala.io.Source import scala.language.postfixOps -class ResultEventListenerSpec +class ResultListenerSpec extends ScalaTestWithActorTestKit( ActorTestKit.ApplicationTestConfig.withValue( "org.apache.pekko.actor.testkit.typed.filter-leeway", @@ -54,8 +59,7 @@ class ResultEventListenerSpec with UnitSpec with IOTestCommons with PowerFlowResultData - with ThreeWindingResultTestData - with Transformer3wResultSupport { + with ThreeWindingResultTestData { val simulationName = "testSim" val resultEntitiesToBeWritten: Set[Class[? <: ResultEntity]] = Set( classOf[PvResult], @@ -97,7 +101,7 @@ class ResultEventListenerSpec Symbol("initializeSinks") ) - ResultEventListener invokePrivate initializeSinks(resultFileHierarchy) + ResultListener invokePrivate initializeSinks(resultFileHierarchy) } private def getFileLinesLength(file: File) = { @@ -141,7 +145,7 @@ class ResultEventListenerSpec resultFileHierarchy(2, ".ttt", Set(classOf[Transformer3WResult])) val deathWatch = createTestProbe("deathWatch") val listener = spawn( - ResultEventListener( + ResultListener( fileHierarchy ) ) @@ -152,16 +156,16 @@ class ResultEventListenerSpec } "handling ordinary results" should { - "process a valid participants result correctly" in { + "process participants results correctly" in { val specificOutputFileHierarchy = resultFileHierarchy(3, ".csv") val listenerRef = spawn( - ResultEventListener( + ResultListener( specificOutputFileHierarchy ) ) - listenerRef ! ParticipantResultEvent(dummyPvResult) + listenerRef ! ResultResponse(dummyPvResult) val outputFile = specificOutputFileHierarchy.rawOutputDataFilePaths .getOrElse( @@ -203,22 +207,20 @@ class ResultEventListenerSpec resultFileSource.close() } - "process a valid power flow result correctly" in { + "process grid results correctly" in { val specificOutputFileHierarchy = resultFileHierarchy(4, ".csv") - val listenerRef = spawn( - ResultEventListener( - specificOutputFileHierarchy + val listenerRef = + spawn(ResultListener(specificOutputFileHierarchy)) + + listenerRef ! ResultResponse( + Iterable( + dummyNodeResult, + dummySwitchResult, + dummyLineResult, + dummyTrafo2wResult, ) ) - listenerRef ! PowerFlowResultEvent( - Iterable(dummyNodeResult), - Iterable(dummySwitchResult), - Iterable(dummyLineResult), - Iterable(dummyTrafo2wResult), - Iterable.empty[PartialTransformer3wResult], - ) - val outputFiles = Map( dummyNodeResultString -> specificOutputFileHierarchy.rawOutputDataFilePaths .getOrElse( @@ -286,103 +288,18 @@ class ResultEventListenerSpec } } - "handling three winding transformer results" should { - def powerflow3wResult( - partialResult: PartialTransformer3wResult - ): PowerFlowResultEvent = - PowerFlowResultEvent( - Iterable.empty[NodeResult], - Iterable.empty[SwitchResult], - Iterable.empty[LineResult], - Iterable.empty[Transformer2WResult], - Iterable(partialResult), - ) - - "correctly reacts on received results" in { - val fileHierarchy = - resultFileHierarchy(5, ".csv", Set(classOf[Transformer3WResult])) - val listener = spawn( - ResultEventListener( - fileHierarchy - ) - ) - - val outputFile = fileHierarchy.rawOutputDataFilePaths - .getOrElse( - classOf[Transformer3WResult], - fail( - s"Cannot get filepath for raw result file of class '${classOf[Transformer3WResult].getSimpleName}' from outputFileHierarchy!'" - ), - ) - .toFile - - /* The result file is created at start up and only contains a headline. */ - awaitCond( - outputFile.exists(), - interval = 500.millis, - max = timeoutDuration, - ) - getFileLinesLength(outputFile) shouldBe 1 - - /* Face the listener with data, as long as they are not comprehensive */ - listener ! powerflow3wResult(resultA) - - listener ! powerflow3wResult(resultC) - - /* Also add unrelated result for different input model */ - val otherResultA = resultA.copy(input = UUID.randomUUID()) - listener ! powerflow3wResult(otherResultA) - - /* Add result A again, which should lead to a failure internally, - but everything should still continue normally - */ - listener ! powerflow3wResult(resultA) - - /* Make sure, that there still is no content in file */ - getFileLinesLength(outputFile) shouldBe 1 - - /* Complete awaited result */ - listener ! powerflow3wResult(resultB) - - // stop listener so that result is flushed out - listener ! DelayedStopHelper.FlushAndStop - - /* Await that the result is written */ - awaitCond( - getFileLinesLength(outputFile) == 2, - interval = 500.millis, - max = timeoutDuration, - ) - /* Check the result */ - val resultFileSource = Source.fromFile(outputFile) - val resultFileLines = resultFileSource.getLines().toSeq - - resultFileLines.size shouldBe 2 - val resultLine = resultFileLines.lastOption.getOrElse( - fail( - "Cannot get csv row that should have been written out by the listener!" - ) - ) - - resultLine shouldBe "2.0,1.0,4.0,3.0,6.0,5.0,40d02538-d8dd-421c-8e68-400f1da170c7,-5," + TimeUtil.withDefaults - .toString(time) - - resultFileSource.close() - } - } - "shutting down" should { "shutdown and compress the data when requested to do so without any errors" in { val specificOutputFileHierarchy = resultFileHierarchy(6, ".csv.gz", compressResults = true) val listenerRef = spawn( - ResultEventListener( + ResultListener( specificOutputFileHierarchy ) ) ResultSinkType.Csv(fileFormat = ".csv.gz", delimiter = ",") - listenerRef ! ParticipantResultEvent(dummyPvResult) + listenerRef ! ResultResponse(dummyPvResult) val outputFile = new File( ".gz$".r.replaceAllIn( diff --git a/src/test/scala/edu/ie3/simona/io/result/ResultEntityKafkaSpec.scala b/src/test/scala/edu/ie3/simona/io/result/ResultEntityKafkaSpec.scala index bc6edb13dd..7e186d8379 100644 --- a/src/test/scala/edu/ie3/simona/io/result/ResultEntityKafkaSpec.scala +++ b/src/test/scala/edu/ie3/simona/io/result/ResultEntityKafkaSpec.scala @@ -6,10 +6,11 @@ package edu.ie3.simona.io.result -import org.apache.pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import edu.ie3.datamodel.models.result.NodeResult +import edu.ie3.simona.api.ontology.results.ProvideResultEntities import edu.ie3.simona.event.ResultEvent.PowerFlowResultEvent -import edu.ie3.simona.event.listener.ResultEventListener +import edu.ie3.simona.ontology.messages.ResultMessage.ResultResponse +import edu.ie3.simona.event.listener.ResultListener import edu.ie3.simona.io.result.plain.PlainResult.PlainNodeResult import edu.ie3.simona.io.result.plain.PlainWriter import edu.ie3.simona.logging.LogbackConfiguration @@ -23,6 +24,7 @@ import org.apache.kafka.clients.consumer.KafkaConsumer import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.{Deserializer, Serdes} import org.apache.kafka.common.utils.Bytes +import org.apache.pekko.actor.testkit.typed.scaladsl.ScalaTestWithActorTestKit import org.scalatest.GivenWhenThen import org.scalatest.concurrent.Eventually import org.scalatest.wordspec.AnyWordSpecLike @@ -91,7 +93,7 @@ class ResultEntityKafkaSpec // build the listener val listenerRef = spawn( - ResultEventListener( + ResultListener( ResultFileHierarchy( "out", "simName", @@ -132,12 +134,12 @@ class ResultEntityKafkaSpec ) When("receiving the NodeResults") - listenerRef ! PowerFlowResultEvent( - Iterable(nodeRes1, nodeRes2, nodeRes3), - Iterable.empty, - Iterable.empty, - Iterable.empty, - Iterable.empty, + listenerRef ! ResultResponse( + Map( + nodeRes1.getInputModel -> Iterable(nodeRes1), + nodeRes2.getInputModel -> Iterable(nodeRes2), + nodeRes3.getInputModel -> Iterable(nodeRes3), + ) ) Then("records can be fetched from Kafka") diff --git a/src/test/scala/edu/ie3/simona/model/participant/evcs/EvcsModelIT.scala b/src/test/scala/edu/ie3/simona/model/participant/evcs/EvcsModelIT.scala index 24c8e0e6a1..4cc9b3e53d 100644 --- a/src/test/scala/edu/ie3/simona/model/participant/evcs/EvcsModelIT.scala +++ b/src/test/scala/edu/ie3/simona/model/participant/evcs/EvcsModelIT.scala @@ -16,8 +16,8 @@ import edu.ie3.simona.agent.participant.ParticipantAgentInit.{ } import edu.ie3.simona.api.data.connection.ExtEvDataConnection import edu.ie3.simona.api.data.model.ev.EvModel -import edu.ie3.simona.api.ontology.ev.* import edu.ie3.simona.api.ontology.ScheduleDataServiceMessage +import edu.ie3.simona.api.ontology.ev.* import edu.ie3.simona.config.RuntimeConfig.EvcsRuntimeConfig import edu.ie3.simona.event.ResultEvent import edu.ie3.simona.event.ResultEvent.ParticipantResultEvent @@ -31,11 +31,13 @@ import edu.ie3.simona.ontology.messages.ServiceMessage.{ PrimaryServiceRegistrationMessage, } import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.ontology.messages.ResultMessage.RequestResult import edu.ie3.simona.scheduler.ScheduleLock import edu.ie3.simona.service.ServiceType import edu.ie3.simona.service.ev.ExtEvDataService import edu.ie3.simona.service.ev.ExtEvDataService.InitExtEvData import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult import edu.ie3.simona.test.common.input.EvcsInputTestData import edu.ie3.simona.test.common.{TestSpawnerTyped, UnitSpec} import edu.ie3.simona.util.SimonaConstants.{INIT_SIM_TICK, PRE_INIT_TICK} @@ -88,7 +90,8 @@ class EvcsModelIT "handle a few requests and arrivals as expected" in { val gridAgent = TestProbe[GridAgent.Message]("GridAgent") - val resultListener = TestProbe[ResultEvent]("ResultListener") + val resultProxy = + TestProbe[ResultEvent | ExpectResult]("ResultServiceProxy") val primaryServiceProxy = TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") val scheduler = TestProbe[SchedulerMessage]("Scheduler") @@ -124,8 +127,8 @@ class EvcsModelIT given ParticipantRefs = ParticipantRefs( gridAgent = gridAgent.ref, primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultProxy.ref, services = Map(ServiceType.EvMovementService -> evService), - resultListener = Iterable(resultListener.ref), ) val evcsKey = @@ -207,7 +210,7 @@ class EvcsModelIT scheduler.expectMessage(Completion(evService, None)) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // Send arrivals extEvData.provideArrivingEvs( @@ -227,7 +230,7 @@ class EvcsModelIT evcsActivation ! Activation(0) - resultListener + resultProxy .receiveMessages(3) .map { case ParticipantResultEvent(result) => result @@ -271,12 +274,12 @@ class EvcsModelIT scheduler.expectMessage(Completion(evService, None)) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // EVCS activation without arrivals evcsActivation ! Activation(1800) - resultListener + resultProxy .receiveMessages(2) .map { case ParticipantResultEvent(result) => result @@ -303,7 +306,7 @@ class EvcsModelIT evcsActivation ! Activation(3600) - resultListener + resultProxy .receiveMessages(2) .map { case ParticipantResultEvent(result) => result @@ -374,7 +377,7 @@ class EvcsModelIT evcsActivation ! Activation(9000) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // Next data at 10800 scheduler.expectMessage(Completion(evcsActivation, Some(10800))) @@ -394,7 +397,7 @@ class EvcsModelIT scheduler.expectMessage(Completion(evService, None)) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // Send arrivals extEvData.provideArrivingEvs( @@ -414,7 +417,7 @@ class EvcsModelIT evcsActivation ! Activation(10800) - resultListener + resultProxy .receiveMessages(2) .map { case ParticipantResultEvent(result) => result @@ -456,7 +459,7 @@ class EvcsModelIT // EVCS activation evcsActivation ! Activation(12600) - resultListener + resultProxy .receiveMessages(2) .map { case ParticipantResultEvent(result) => result @@ -527,7 +530,7 @@ class EvcsModelIT evcsActivation ! Activation(14400) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // evB is departing at 18000 scheduler.expectMessage(Completion(evcsActivation, Some(18000))) @@ -580,7 +583,7 @@ class EvcsModelIT evcsActivation ! Activation(18000) - resultListener.expectNoMessage() + resultProxy.expectNoMessage() // No future arrivals planned, next activation: end of simulation scheduler.expectMessage(Completion(evcsActivation, Some(48 * 3600))) diff --git a/src/test/scala/edu/ie3/simona/service/em/ExtEmBaseIT.scala b/src/test/scala/edu/ie3/simona/service/em/ExtEmBaseIT.scala new file mode 100644 index 0000000000..0bca7295d3 --- /dev/null +++ b/src/test/scala/edu/ie3/simona/service/em/ExtEmBaseIT.scala @@ -0,0 +1,642 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.em + +import edu.ie3.simona.agent.em.EmAgent +import edu.ie3.simona.agent.grid.GridAgent +import edu.ie3.simona.agent.participant.ParticipantAgent.{ + DataProvision, + RegistrationFailedMessage, + RegistrationSuccessfulMessage, +} +import edu.ie3.simona.agent.participant.ParticipantAgentInit +import edu.ie3.simona.agent.participant.ParticipantAgentInit.ParticipantRefs +import edu.ie3.simona.api.data.connection.ExtEmDataConnection +import edu.ie3.simona.api.data.connection.ExtEmDataConnection.EmMode +import edu.ie3.simona.api.data.model.em.EmSetPoint +import edu.ie3.simona.api.ontology.em.{ + EmCompletion, + FlexOptionsResponse, + RequestEmFlexResults, +} +import edu.ie3.simona.api.ontology.ScheduleDataServiceMessage +import edu.ie3.simona.api.ontology.simulation.ControlResponseMessageFromExt +import edu.ie3.simona.config.RuntimeConfig.{ + LoadRuntimeConfig, + PvRuntimeConfig, + StorageRuntimeConfig, +} +import edu.ie3.simona.event.ResultEvent +import edu.ie3.simona.model.InputModelContainer.SimpleInputContainer +import edu.ie3.simona.ontology.messages.SchedulerMessage.{ + Completion, + ScheduleActivation, +} +import edu.ie3.simona.ontology.messages.ServiceMessage.{ + Create, + PrimaryServiceRegistrationMessage, + SecondaryServiceRegistrationMessage, +} +import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.scheduler.ScheduleLock +import edu.ie3.simona.service.Data.SecondaryData.WeatherData +import edu.ie3.simona.service.ServiceType +import edu.ie3.simona.service.em.ExtEmDataService.InitExtEmData +import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.weather.WeatherService +import edu.ie3.simona.service.weather.WeatherService.Coordinate +import edu.ie3.simona.test.common.TestSpawnerTyped +import edu.ie3.simona.test.common.input.EmCommunicationTestData +import edu.ie3.simona.test.matchers.QuantityMatchers +import edu.ie3.simona.util.SimonaConstants.{INIT_SIM_TICK, PRE_INIT_TICK} +import edu.ie3.util.quantities.QuantityUtils.* +import edu.ie3.util.scala.quantities.WattsPerSquareMeter +import org.apache.pekko.actor.testkit.typed.scaladsl.{ + ScalaTestWithActorTestKit, + TestProbe, +} +import org.apache.pekko.actor.typed.ActorRef +import org.scalatest.wordspec.AnyWordSpecLike +import org.slf4j.{Logger, LoggerFactory} +import squants.motion.MetersPerSecond +import squants.thermal.Celsius + +import java.util.{Optional, UUID} +import scala.concurrent.duration.{DurationInt, FiniteDuration} +import scala.jdk.CollectionConverters.{ + MapHasAsJava, + MapHasAsScala, + SeqHasAsJava, +} + +class ExtEmBaseIT + extends ScalaTestWithActorTestKit + with AnyWordSpecLike + with EmCommunicationTestData + with QuantityMatchers + with TestSpawnerTyped { + + protected val messageTimeout: FiniteDuration = 30.seconds + + protected val log: Logger = LoggerFactory.getLogger("ExtEmCommunicationIT") + + private val emSupUuid = + UUID.fromString("858f3d3d-4189-49cd-9fe5-3cd49b88dc70") + private val emNode3Uuid = + UUID.fromString("fd1a8de9-722a-4304-8799-e1e976d9979c") + private val emNode4Uuid = + UUID.fromString("ff0b995a-86ff-4f4d-987e-e475a64f2180") + + private val gridAgent = TestProbe[GridAgent.Message]("GridAgent") + private val resultListener = TestProbe[ResultEvent]("ResultListener") + private val primaryServiceProxy = + TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") + private val weatherService = + TestProbe[WeatherService.Message]("WeatherService") + + private given ParticipantRefs = ParticipantRefs( + gridAgent = gridAgent.ref, + primaryServiceProxy = primaryServiceProxy.ref, + services = Map(ServiceType.WeatherService -> weatherService.ref), + resultListener = Iterable(resultListener.ref), + ) + + "An ExtEmDataService in base mode" should { + val scheduler = TestProbe[SchedulerMessage]("scheduler") + val extSimAdapter = + TestProbe[ControlResponseMessageFromExt]("extSimAdapter") + + val service = spawn(ExtEmDataService(scheduler.ref)) + + val connection = new ExtEmDataConnection( + List(emSupUuid, emNode3Uuid, emNode4Uuid).asJava, + EmMode.BASE, + ) + connection.setActorRefs(service, extSimAdapter.ref) + + val emAgentSup = spawn( + EmAgent( + emSup, + modelConfig, + outputConfig, + "PROPORTIONAL", + simulationStart, + parent = Left(scheduler.ref), + listener = Iterable(resultListener.ref), + Some(service), + ) + ) + + val emAgentNode3 = spawn( + EmAgent( + emNode3, + modelConfig, + outputConfig, + "PRIORITIZED", + simulationStart, + parent = Right(emAgentSup), + listener = Iterable(resultListener.ref), + Some(service), + ) + ) + + val emAgentNode4 = spawn( + EmAgent( + emNode4, + modelConfig, + outputConfig, + "PRIORITIZED", + simulationStart, + parent = Right(emAgentSup), + listener = Iterable(resultListener.ref), + Some(service), + ) + ) + + val keys = ScheduleLock + .multiKey(TSpawner, scheduler.ref, PRE_INIT_TICK, 4) + .iterator + val lockActivation = + scheduler.expectMessageType[ScheduleActivation].actor + lockActivation ! Activation(PRE_INIT_TICK) + + val pvAgentNode3 = spawn( + ParticipantAgentInit( + SimpleInputContainer(pvNode3), + PvRuntimeConfig(), + outputConfig, + Right(emAgentNode3), + keys.next(), + ) + ) + + val storageAgentNode3 = spawn( + ParticipantAgentInit( + SimpleInputContainer(storageInput), + StorageRuntimeConfig(), + outputConfig, + Right(emAgentNode3), + keys.next(), + ) + ) + + val pvAgentNode4 = spawn( + ParticipantAgentInit( + SimpleInputContainer(pvNode4), + PvRuntimeConfig(), + outputConfig, + Right(emAgentNode4), + keys.next(), + ) + ) + + val loadAgentNode4 = spawn( + ParticipantAgentInit( + SimpleInputContainer(loadInput), + LoadRuntimeConfig(), + outputConfig, + Right(emAgentNode4), + keys.next(), + ) + ) + + /* PRE_INIT */ + val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) + scheduler + .expectMessageType[ScheduleActivation] // lock activation scheduled + + service ! Create( + InitExtEmData(connection, simulationStart), + key, + ) + + scheduler.expectMessage( + ScheduleActivation(service, INIT_SIM_TICK, Some(key)) + ) + + // we expect a completion for the participant locks + scheduler.expectMessage(Completion(lockActivation)) + + /* INIT */ + + // activate the service for init tick + service ! Activation(INIT_SIM_TICK) + scheduler.expectMessage(Completion(service)) + + primaryServiceProxy.receiveMessages( + 4, + messageTimeout, + ) should contain allOf ( + PrimaryServiceRegistrationMessage( + pvAgentNode3, + pvNode3.getUuid, + ), + PrimaryServiceRegistrationMessage( + storageAgentNode3, + storageInput.getUuid, + ), + PrimaryServiceRegistrationMessage( + pvAgentNode4, + pvNode4.getUuid, + ), + PrimaryServiceRegistrationMessage( + loadAgentNode4, + loadInput.getUuid, + ) + ) + + // pv agent 3 + pvAgentNode3 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + // deal with weather service registration + weatherService.expectMessage( + SecondaryServiceRegistrationMessage( + pvAgentNode3, + Coordinate(pvNode3.getNode), + ) + ) + + pvAgentNode3 ! RegistrationSuccessfulMessage(weatherService.ref, 0L) + + // pv agent 4 + pvAgentNode4 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + weatherService.expectMessage( + SecondaryServiceRegistrationMessage( + pvAgentNode4, + Coordinate(pvNode4.getNode), + ) + ) + pvAgentNode4 ! RegistrationSuccessfulMessage(weatherService.ref, 0L) + + // storage + storageAgentNode3 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + // load + loadAgentNode4 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + "work correctly when requesting flex options" in { + /* TICK: 0 */ + + val weatherData0 = DataProvision( + 0L, + weatherService.ref, + WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ), + Some(900L), + ) + + pvAgentNode3 ! weatherData0 + pvAgentNode4 ! weatherData0 + + // we request the em option for the superior em agent + connection.sendExtMsg( + new RequestEmFlexResults(0L, List(emSupUuid).asJava, false) + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(0L) + + val receivedFlexOptions0 = connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + + receivedFlexOptions0.size shouldBe 1 + val flexOptions0 = receivedFlexOptions0(emSupUuid) + flexOptions0.getSender shouldBe emSupUuid + flexOptions0.getReceiver shouldBe emSupUuid + flexOptions0.getpMin should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions0.getpRef should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions0.getpMax should equalWithTolerance( + 0.006200000413468004.asMegaWatt + ) + + // we return a new set point + val setPoints0 = Map(emSupUuid -> new EmSetPoint(emSupUuid)) + + connection.sendSetPoints(0L, setPoints0.asJava, Optional.of(900L), log) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(0L) + + connection.receiveWithType(classOf[EmCompletion]) + + /* TICK: 900 */ + + val weatherData900 = DataProvision( + 900L, + weatherService.ref, + WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ), + Some(1800L), + ) + + pvAgentNode3 ! weatherData900 + pvAgentNode4 ! weatherData900 + + // we request the em option for the superior em agent + connection.sendExtMsg( + new RequestEmFlexResults(900L, List(emSupUuid).asJava, false) + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(900L) + + val receivedFlexOptions900 = connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + + receivedFlexOptions900.size shouldBe 1 + val flexOptions900 = receivedFlexOptions900(emSupUuid) + flexOptions900.getSender shouldBe emSupUuid + flexOptions900.getReceiver shouldBe emSupUuid + flexOptions900.getpMin should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions900.getpRef should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions900.getpMax should equalWithTolerance( + 0.006200000413468004.asMegaWatt + ) + + // we return a new set point + val setPoints900 = Map( + emSupUuid -> new EmSetPoint(emSupUuid, 0.006200000413468004.asMegaWatt) + ) + + connection.sendSetPoints( + 900L, + setPoints900.asJava, + Optional.of(1800L), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(900L) + + connection.receiveWithType(classOf[EmCompletion]) + } + + "work correctly when requesting disaggregated flex options" in { + /* TICK: 1800 */ + + val weatherData1800 = DataProvision( + 1800L, + weatherService.ref, + WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ), + Some(2700L), + ) + + pvAgentNode3 ! weatherData1800 + pvAgentNode4 ! weatherData1800 + + // we request the em option for the superior em agent + connection.sendExtMsg( + new RequestEmFlexResults(1800L, List(emSupUuid).asJava, true) + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(1800L) + + val receivedFlexOptions1800 = connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + + receivedFlexOptions1800.size shouldBe 1 + val flexOptions1800 = receivedFlexOptions1800(emSupUuid) + flexOptions1800.getSender shouldBe emSupUuid + flexOptions1800.getReceiver shouldBe emSupUuid + flexOptions1800.getpMin should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions1800.getpRef should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions1800.getpMax should equalWithTolerance( + 0.006200000413468004.asMegaWatt + ) + + flexOptions1800.hasDisaggregated shouldBe true + val disaggregated1800 = flexOptions1800.getDisaggregated.asScala + + val node3Flex1800 = disaggregated1800(emNode3Uuid) + node3Flex1800.getpMin should equalWithTolerance( + 0.asMegaWatt + ) + node3Flex1800.getpRef should equalWithTolerance( + 0.asMegaWatt + ) + node3Flex1800.getpMax should equalWithTolerance( + 0.004.asMegaWatt + ) + + val node4Flex1800 = disaggregated1800(emNode4Uuid) + node4Flex1800.getpMin should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + node4Flex1800.getpRef should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + node4Flex1800.getpMax should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + + // we return a new set point + val setPoints1800 = Map(emSupUuid -> new EmSetPoint(emSupUuid)) + + connection.sendSetPoints( + 1800L, + setPoints1800.asJava, + Optional.of(2700L), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(1800L) + + connection.receiveWithType(classOf[EmCompletion]) + + /* TICK: 2700 */ + + val weatherData2700 = DataProvision( + 2700L, + weatherService.ref, + WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ), + Some(3600L), + ) + + pvAgentNode3 ! weatherData2700 + pvAgentNode4 ! weatherData2700 + + // we request the em option for the superior em agent + connection.sendExtMsg( + new RequestEmFlexResults(2700L, List(emSupUuid).asJava, true) + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(2700L) + + val receivedFlexOptions2700 = connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + + receivedFlexOptions2700.size shouldBe 1 + val flexOptions2700 = receivedFlexOptions2700(emSupUuid) + flexOptions2700.getSender shouldBe emSupUuid + flexOptions2700.getReceiver shouldBe emSupUuid + flexOptions2700.getpMin should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions2700.getpRef should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + flexOptions2700.getpMax should equalWithTolerance( + 0.006200000413468004.asMegaWatt + ) + + flexOptions2700.hasDisaggregated shouldBe true + val disaggregated2700 = flexOptions2700.getDisaggregated.asScala + + val node3Flex2700 = disaggregated2700(emNode3Uuid) + node3Flex2700.getpMin should equalWithTolerance( + 0.asMegaWatt + ) + node3Flex2700.getpRef should equalWithTolerance( + 0.asMegaWatt + ) + node3Flex2700.getpMax should equalWithTolerance( + 0.004.asMegaWatt + ) + + val node4Flex2700 = disaggregated2700(emNode4Uuid) + node4Flex2700.getpMin should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + node4Flex2700.getpRef should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + node4Flex2700.getpMax should equalWithTolerance( + 0.002200000413468004.asMegaWatt + ) + + // we return a new set point + val setPoints2700 = Map( + emSupUuid -> new EmSetPoint(emSupUuid, 0.006200000413468004.asMegaWatt) + ) + + connection.sendSetPoints( + 2700L, + setPoints2700.asJava, + Optional.of(3600L), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(2700L) + + connection.receiveWithType(classOf[EmCompletion]) + } + + "work correctly without requesting flex options" in { + /* TICK: 3600 */ + + val weatherData3600 = DataProvision( + 3600L, + weatherService.ref, + WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ), + Some(4500L), + ) + + pvAgentNode3 ! weatherData3600 + pvAgentNode4 ! weatherData3600 + + // we send a new set point + val setPoints3600 = Map( + emSupUuid -> new EmSetPoint(emSupUuid, 0.002200000413468004.asMegaWatt) + ) + + connection.sendSetPoints( + 3600L, + setPoints3600.asJava, + Optional.of(4500L), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(3600L) + + connection.receiveWithType(classOf[EmCompletion]) + + /* TICK: 4500 */ + + val weatherData4500 = DataProvision( + 4500L, + weatherService.ref, + WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ), + Some(5400L), + ) + + pvAgentNode3 ! weatherData4500 + pvAgentNode4 ! weatherData4500 + + // we send a new set point + val setPoints4500 = Map( + emSupUuid -> new EmSetPoint(emSupUuid, 0.006200000413468004.asMegaWatt) + ) + + connection.sendSetPoints( + 4500L, + setPoints4500.asJava, + Optional.of(5400L), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(4500L) + + connection.receiveWithType(classOf[EmCompletion]) + + } + } +} diff --git a/src/test/scala/edu/ie3/simona/service/em/ExtEmCommunicationIT.scala b/src/test/scala/edu/ie3/simona/service/em/ExtEmCommunicationIT.scala new file mode 100644 index 0000000000..2a5f90363c --- /dev/null +++ b/src/test/scala/edu/ie3/simona/service/em/ExtEmCommunicationIT.scala @@ -0,0 +1,583 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.em + +import edu.ie3.simona.agent.em.EmAgent +import edu.ie3.simona.agent.grid.GridAgent +import edu.ie3.simona.agent.participant.ParticipantAgent.{ + DataProvision, + RegistrationFailedMessage, + RegistrationSuccessfulMessage, +} +import edu.ie3.simona.agent.participant.ParticipantAgentInit.ParticipantRefs +import edu.ie3.simona.agent.participant.{ParticipantAgent, ParticipantAgentInit} +import edu.ie3.simona.api.data.connection.ExtEmDataConnection +import edu.ie3.simona.api.data.connection.ExtEmDataConnection.EmMode +import edu.ie3.simona.api.data.model.em.{ + EmSetPoint, + FlexOptionRequest, + FlexOptionRequestResult, + FlexOptions, +} +import edu.ie3.simona.api.ontology.ScheduleDataServiceMessage +import edu.ie3.simona.api.ontology.em.{ + EmCompletion, + EmResultResponse, + FlexOptionsResponse, +} +import edu.ie3.simona.api.ontology.simulation.ControlResponseMessageFromExt +import edu.ie3.simona.config.RuntimeConfig.{ + LoadRuntimeConfig, + PvRuntimeConfig, + StorageRuntimeConfig, +} +import edu.ie3.simona.event.ResultEvent +import edu.ie3.simona.model.InputModelContainer.SimpleInputContainer +import edu.ie3.simona.ontology.messages.SchedulerMessage.{ + Completion, + ScheduleActivation, +} +import edu.ie3.simona.ontology.messages.ServiceMessage.{ + Create, + PrimaryServiceRegistrationMessage, + SecondaryServiceRegistrationMessage, +} +import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.scheduler.ScheduleLock +import edu.ie3.simona.service.Data.SecondaryData.WeatherData +import edu.ie3.simona.service.ServiceType +import edu.ie3.simona.service.em.ExtEmDataService.InitExtEmData +import edu.ie3.simona.service.primary.PrimaryServiceProxy +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult +import edu.ie3.simona.service.weather.WeatherService +import edu.ie3.simona.service.weather.WeatherService.Coordinate +import edu.ie3.simona.test.common.TestSpawnerTyped +import edu.ie3.simona.test.common.input.EmCommunicationTestData +import edu.ie3.simona.test.matchers.QuantityMatchers +import edu.ie3.simona.util.SimonaConstants.{INIT_SIM_TICK, PRE_INIT_TICK} +import edu.ie3.util.quantities.QuantityUtils.* +import edu.ie3.util.scala.quantities.WattsPerSquareMeter +import org.apache.pekko.actor.testkit.typed.scaladsl.{ + ScalaTestWithActorTestKit, + TestProbe, +} +import org.apache.pekko.actor.typed.ActorRef +import org.scalatest.OptionValues.convertOptionToValuable +import org.scalatest.wordspec.AnyWordSpecLike +import org.slf4j.{Logger, LoggerFactory} +import squants.motion.MetersPerSecond +import squants.thermal.Celsius +import tech.units.indriya.ComparableQuantity + +import java.util.{Optional, UUID} +import javax.measure.quantity.Power +import scala.concurrent.duration.{DurationInt, FiniteDuration} +import scala.jdk.CollectionConverters.{ + MapHasAsJava, + MapHasAsScala, + SeqHasAsJava, +} +import scala.jdk.OptionConverters.RichOptional + +class ExtEmCommunicationIT + extends ScalaTestWithActorTestKit + with AnyWordSpecLike + with EmCommunicationTestData + with QuantityMatchers + with TestSpawnerTyped { + + protected val messageTimeout: FiniteDuration = 30.seconds + + protected val log: Logger = LoggerFactory.getLogger("ExtEmCommunicationIT") + + private val emSupUuid = + UUID.fromString("858f3d3d-4189-49cd-9fe5-3cd49b88dc70") + private val emNode3Uuid = + UUID.fromString("fd1a8de9-722a-4304-8799-e1e976d9979c") + private val emNode4Uuid = + UUID.fromString("ff0b995a-86ff-4f4d-987e-e475a64f2180") + + private val connection = new ExtEmDataConnection( + List(emSupUuid, emNode3Uuid, emNode4Uuid).asJava, + EmMode.EM_COMMUNICATION, + ) + + private val scheduler = TestProbe[SchedulerMessage]("scheduler") + private val extSimAdapter = + TestProbe[ControlResponseMessageFromExt]("extSimAdapter") + private val gridAgent = TestProbe[GridAgent.Message]("GridAgent") + private val resultListener = + TestProbe[ResultEvent | ExpectResult]("ResultListener") + private val primaryServiceProxy = + TestProbe[PrimaryServiceProxy.Message]("PrimaryServiceProxy") + private val weatherService = + TestProbe[WeatherService.Message]("WeatherService") + + private given ParticipantRefs = ParticipantRefs( + gridAgent = gridAgent.ref, + primaryServiceProxy = primaryServiceProxy.ref, + resultServiceProxy = resultListener.ref, + services = Map(ServiceType.WeatherService -> weatherService.ref), + ) + + "An ExtEmDataService in communication mode" should { + given service: ActorRef[ExtEmDataService.Message] = + spawn(ExtEmDataService(scheduler.ref)) + connection.setActorRefs(service, extSimAdapter.ref) + + "with participant agents work correctly" in { + val emAgentSup = spawn( + EmAgent( + emSup, + modelConfig, + outputConfig, + "PROPORTIONAL", + simulationStart, + parent = Left(scheduler.ref), + listener = resultListener.ref, + Some(service), + ) + ) + + val emAgentNode3 = spawn( + EmAgent( + emNode3, + modelConfig, + outputConfig, + "PRIORITIZED", + simulationStart, + parent = Right(emAgentSup), + listener = resultListener.ref, + Some(service), + ) + ) + + val emAgentNode4 = spawn( + EmAgent( + emNode4, + modelConfig, + outputConfig, + "PRIORITIZED", + simulationStart, + parent = Right(emAgentSup), + listener = resultListener.ref, + Some(service), + ) + ) + + val keys = ScheduleLock + .multiKey(TSpawner, scheduler.ref, PRE_INIT_TICK, 4) + .iterator + val lockActivation = + scheduler.expectMessageType[ScheduleActivation].actor + lockActivation ! Activation(PRE_INIT_TICK) + + val pvAgentNode3 = spawn( + ParticipantAgentInit( + SimpleInputContainer(pvNode3), + PvRuntimeConfig(), + outputConfig, + Right(emAgentNode3), + keys.next(), + ), + "PvAgentNode3", + ) + + val storageAgentNode3 = spawn( + ParticipantAgentInit( + SimpleInputContainer(storageInput), + StorageRuntimeConfig(), + outputConfig, + Right(emAgentNode3), + keys.next(), + ), + "storageAgentNode3", + ) + + val pvAgentNode4 = spawn( + ParticipantAgentInit( + SimpleInputContainer(pvNode4), + PvRuntimeConfig(), + outputConfig, + Right(emAgentNode4), + keys.next(), + ), + "PvAgentNode4", + ) + + val loadAgentNode4 = spawn( + ParticipantAgentInit( + SimpleInputContainer(loadInput), + LoadRuntimeConfig(), + outputConfig, + Right(emAgentNode4), + keys.next(), + ), + "LoadAgentNode4", + ) + + /* PRE_INIT */ + val key = ScheduleLock.singleKey(TSpawner, scheduler.ref, PRE_INIT_TICK) + scheduler + .expectMessageType[ScheduleActivation] // lock activation scheduled + + service ! Create( + InitExtEmData(connection, simulationStart), + key, + ) + + scheduler.expectMessage( + ScheduleActivation(service, INIT_SIM_TICK, Some(key)) + ) + + // we expect a completion for the participant locks + scheduler.expectMessage(Completion(lockActivation)) + + /* INIT */ + + // activate the service for init tick + service ! Activation(INIT_SIM_TICK) + scheduler.expectMessage(Completion(service)) + + primaryServiceProxy.receiveMessages( + 4, + messageTimeout, + ) should contain allOf ( + PrimaryServiceRegistrationMessage( + pvAgentNode3, + pvNode3.getUuid, + ), + PrimaryServiceRegistrationMessage( + storageAgentNode3, + storageInput.getUuid, + ), + PrimaryServiceRegistrationMessage( + pvAgentNode4, + pvNode4.getUuid, + ), + PrimaryServiceRegistrationMessage( + loadAgentNode4, + loadInput.getUuid, + ) + ) + + // pv agent 3 + pvAgentNode3 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + // deal with weather service registration + weatherService.expectMessage( + SecondaryServiceRegistrationMessage( + pvAgentNode3, + Coordinate(pvNode3.getNode), + ) + ) + + pvAgentNode3 ! RegistrationSuccessfulMessage(weatherService.ref, 0L) + + // pv agent 4 + pvAgentNode4 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + weatherService.expectMessage( + SecondaryServiceRegistrationMessage( + pvAgentNode4, + Coordinate(pvNode4.getNode), + ) + ) + pvAgentNode4 ! RegistrationSuccessfulMessage(weatherService.ref, 0L) + + // storage + storageAgentNode3 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + // load + loadAgentNode4 ! RegistrationFailedMessage(primaryServiceProxy.ref) + + given pvAgents: Seq[ActorRef[ParticipantAgent.Request]] = + Seq(pvAgentNode3, pvAgentNode4) + + /* TICK: 0 */ + + val weatherData0 = WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ) + + communicate( + 0, + 900, + weatherData0, + Map( + emSupUuid -> new FlexOptions( + emSupUuid, + emSupUuid, + 0.002200000413468004.asMegaWatt, + 0.002200000413468004.asMegaWatt, + 0.006200000413468004.asMegaWatt, + ), + emNode3Uuid -> new FlexOptions( + emSupUuid, + emNode3Uuid, + 0.asMegaWatt, + 0.asMegaWatt, + 0.004.asMegaWatt, + ), + emNode4Uuid -> new FlexOptions( + emSupUuid, + emNode4Uuid, + 0.002200000413468004.asMegaWatt, + 0.002200000413468004.asMegaWatt, + 0.002200000413468004.asMegaWatt, + ), + ), + Map( + emSupUuid -> 2.200000413468004.asKiloWatt, + emNode3Uuid -> 0.asKiloWatt, + emNode4Uuid -> 2.200000413468004.asKiloWatt, + ), + ) + + /* TICK: 900 */ + + val weatherData900 = WeatherData( + WattsPerSquareMeter(0), + WattsPerSquareMeter(0), + Celsius(0d), + MetersPerSecond(0d), + ) + + communicate( + 900, + 1800, + weatherData900, + Map( + emSupUuid -> new FlexOptions( + emSupUuid, + emSupUuid, + 0.002200000413468004.asMegaWatt, + 0.002200000413468004.asMegaWatt, + 0.006200000413468004.asMegaWatt, + ), + emNode3Uuid -> new FlexOptions( + emSupUuid, + emNode3Uuid, + 0.asMegaWatt, + 0.asMegaWatt, + 0.004.asMegaWatt, + ), + emNode4Uuid -> new FlexOptions( + emSupUuid, + emNode4Uuid, + 0.002200000413468004.asMegaWatt, + 0.002200000413468004.asMegaWatt, + 0.002200000413468004.asMegaWatt, + ), + ), + Map( + emSupUuid -> 2.200000413468004.asKiloWatt, + emNode3Uuid -> 0.asKiloWatt, + emNode4Uuid -> 2.200000413468004.asKiloWatt, + ), + ) + + } + + // helper methods + + def communicate( + tick: Long, + nextTick: Long, + weatherData: WeatherData, + flexOptions: Map[UUID, FlexOptions], + setPoints: Map[UUID, ComparableQuantity[Power]], + )(using + service: ActorRef[ExtEmDataService.Message], + pvAgents: Seq[ActorRef[ParticipantAgent.Request]], + ): Unit = { + + /* start communication */ + val inferiorEms = Set(emNode3Uuid, emNode4Uuid) + + // we first send a flex option request to the superior em agent + connection.sendFlexRequests( + tick, + Map( + emSupUuid -> new FlexOptionRequest(emSupUuid, Optional.empty) + ).asJava, + Optional.of(nextTick), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(tick) + + // we expect to receive a request per inferior em agent + val requestsToInferior = connection + .receiveWithType(classOf[EmResultResponse]) + .emResults + .asScala + + requestsToInferior.size shouldBe 1 + requestsToInferior(emSupUuid) shouldBe new FlexOptionRequestResult( + simulationStart.plusSeconds(tick), + emSupUuid, + List(emNode3Uuid, emNode4Uuid).asJava, + ) + + // we send a request to each inferior em agent + connection.sendFlexRequests( + tick, + Map( + emNode3Uuid -> new FlexOptionRequest( + emNode3Uuid, + Optional.of(emSupUuid), + ), + emNode4Uuid -> new FlexOptionRequest( + emNode4Uuid, + Optional.of(emSupUuid), + ), + ).asJava, + Optional.of(nextTick), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(tick) + + val data = DataProvision( + tick, + weatherService.ref, + weatherData, + Some(nextTick), + ) + pvAgents.foreach(_ ! data) + + // we expect to receive flex options from the inferior em agents + val flexOptionResponseInferior = connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + + if flexOptionResponseInferior.size != 2 then { + flexOptionResponseInferior.addAll( + connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + ) + } + + flexOptionResponseInferior.keySet shouldBe inferiorEms + + flexOptionResponseInferior.foreach { case (receiver, results) => + val expectedOptions = flexOptions(receiver) + + results.getReceiver shouldBe expectedOptions.receiver + results.getSender shouldBe expectedOptions.sender + results.getpMin() should equalWithTolerance(expectedOptions.pMin) + results.getpRef() should equalWithTolerance(expectedOptions.pRef) + results.getpMax() should equalWithTolerance(expectedOptions.pMax) + } + + // we send the flex options to the superior em agent + connection.sendFlexOptions( + tick, + Map(emSupUuid -> inferiorEms.map(flexOptions).toList.asJava).asJava, + Optional.of(nextTick), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(tick) + + // we expect the total flex options of the grid from the superior em agent + val totalFlexOptions = connection + .receiveWithType(classOf[FlexOptionsResponse]) + .receiverToFlexOptions() + .asScala + + totalFlexOptions.keySet shouldBe Set(emSupUuid) + + totalFlexOptions.foreach { case (receiver, result) => + val expectedOptions = flexOptions(receiver) + + result.getReceiver shouldBe expectedOptions.receiver + result.getSender shouldBe expectedOptions.sender + result.getpMin() should equalWithTolerance(expectedOptions.pMin) + result.getpRef() should equalWithTolerance(expectedOptions.pRef) + result.getpMax() should equalWithTolerance(expectedOptions.pMax) + } + + // after we received all options we will send a message, to keep the current set point + connection.sendSetPoints( + tick, + Map( + emSupUuid -> new EmSetPoint(emSupUuid, setPoints(emSupUuid)) + ).asJava, + Optional.of(nextTick), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(tick) + + // we expect a new set point for each inferior em agent + val inferiorSetPoints = connection + .receiveWithType(classOf[EmResultResponse]) + .emResults + .asScala + .filter { case (uuid, _) => uuid == emNode3Uuid || uuid == emNode4Uuid } + + if inferiorSetPoints.size != 2 then { + inferiorSetPoints.addAll( + connection + .receiveWithType(classOf[EmResultResponse]) + .emResults + .asScala + .filter { case (uuid, _) => + uuid == emNode3Uuid || uuid == emNode4Uuid + } + ) + } + + inferiorSetPoints.keySet shouldBe inferiorEms + + inferiorSetPoints.foreach { case (receiver, results) => + results.size shouldBe 1 + + results.getFirst match { + case setPoint: EmSetPoint => + setPoint.power + .flatMap(_.getP) + .toScala + .value should equalWithTolerance( + setPoints(receiver) + ) + } + } + + def toSetPoint(uuid: UUID): (UUID, EmSetPoint) = + uuid -> new EmSetPoint(uuid, setPoints(uuid)) + + // we send the new set points to the inferior em agents + connection.sendSetPoints( + tick, + inferiorEms.map(toSetPoint).toMap.asJava, + Optional.of(nextTick), + log, + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(service)) + service ! Activation(tick) + + // we expect a finish message + connection.receiveWithType(classOf[EmCompletion]) + } + + } + +} diff --git a/src/test/scala/edu/ie3/simona/service/ev/ExtEvDataServiceSpec.scala b/src/test/scala/edu/ie3/simona/service/ev/ExtEvDataServiceSpec.scala index 23074f1841..5e1d3286c0 100644 --- a/src/test/scala/edu/ie3/simona/service/ev/ExtEvDataServiceSpec.scala +++ b/src/test/scala/edu/ie3/simona/service/ev/ExtEvDataServiceSpec.scala @@ -707,7 +707,8 @@ class ExtEvDataServiceSpec // ev service should receive movements msg at this moment // scheduler should receive schedule msg - extSimAdapter.expectMessageType[ScheduleDataServiceMessage] + extSimAdapter + .expectMessageType[ScheduleDataServiceMessage] // we trigger ev service evService ! Activation(0L) diff --git a/src/test/scala/edu/ie3/simona/service/primary/ExtPrimaryDataServiceSpec.scala b/src/test/scala/edu/ie3/simona/service/primary/ExtPrimaryDataServiceSpec.scala new file mode 100644 index 0000000000..d9e4077cd2 --- /dev/null +++ b/src/test/scala/edu/ie3/simona/service/primary/ExtPrimaryDataServiceSpec.scala @@ -0,0 +1,146 @@ +/* + * © 2021. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.primary + +import com.typesafe.scalalogging.LazyLogging +import edu.ie3.datamodel.models.value.Value +import edu.ie3.simona.agent.participant.ParticipantAgent.{ + DataProvision, + RegistrationSuccessfulMessage, +} +import edu.ie3.simona.api.data.connection.ExtPrimaryDataConnection +import edu.ie3.simona.api.ontology.simulation.ControlResponseMessageFromExt +import edu.ie3.simona.ontology.messages.SchedulerMessage.{ + Completion, + ScheduleActivation, +} +import edu.ie3.simona.ontology.messages.ServiceMessage.{ + Create, + PrimaryServiceRegistrationMessage, + SecondaryServiceRegistrationMessage, +} +import edu.ie3.simona.ontology.messages.{ + Activation, + SchedulerMessage, + ServiceMessage, +} +import edu.ie3.simona.scheduler.ScheduleLock +import edu.ie3.simona.service.Data.PrimaryData +import edu.ie3.simona.service.primary.ExtPrimaryDataService.InitExtPrimaryData +import edu.ie3.simona.service.weather.WeatherService.Coordinate +import edu.ie3.simona.test.common.TestSpawnerTyped +import edu.ie3.simona.util.SimonaConstants.INIT_SIM_TICK +import org.apache.pekko.actor.testkit.typed.scaladsl.{ + ScalaTestWithActorTestKit, + TestProbe, +} +import org.apache.pekko.actor.typed.scaladsl.adapter.TypedActorRefOps +import org.scalatest.PrivateMethodTester +import org.scalatest.matchers.should +import org.scalatest.wordspec.AnyWordSpecLike + +import java.util.UUID +import scala.concurrent.duration.DurationInt +import scala.jdk.CollectionConverters.* + +class ExtPrimaryDataServiceSpec + extends ScalaTestWithActorTestKit + with AnyWordSpecLike + with should.Matchers + with PrivateMethodTester + with LazyLogging + with TestSpawnerTyped { + + private val scheduler = TestProbe[SchedulerMessage]("scheduler") + private val extSimAdapter = + TestProbe[ControlResponseMessageFromExt]("extSimAdapter") + + private val extPrimaryDataConnection = new ExtPrimaryDataConnection( + Map.empty[UUID, Class[? <: Value]].asJava + ) + + "An uninitialized external primary data service" must { + + "send correct completion message after initialisation" in { + val primaryDataService = spawn(ExtPrimaryDataService(scheduler.ref)) + + val key = + ScheduleLock.singleKey(TSpawner, scheduler.ref, INIT_SIM_TICK) + scheduler + .expectMessageType[ScheduleActivation] // lock activation scheduled + + extPrimaryDataConnection.setActorRefs( + primaryDataService, + extSimAdapter.ref, + ) + + primaryDataService ! Create( + InitExtPrimaryData(extPrimaryDataConnection), + key, + ) + + scheduler.expectMessage( + ScheduleActivation(primaryDataService, INIT_SIM_TICK, Some(key)) + ) + + primaryDataService ! Activation(INIT_SIM_TICK) + scheduler.expectMessage(Completion(primaryDataService)) + } + } + + "An external primary service actor" should { + val primaryDataService = spawn(ExtPrimaryDataService(scheduler.ref)) + val systemParticipant = TestProbe[Any]("dummySystemParticipant") + + "refuse registration for wrong registration request" in { + val schedulerProbe = TestProbe[SchedulerMessage]("schedulerProbe") + + // we need to create another service, since we want to continue using the other in later tests + val service = spawn(ExtPrimaryDataService(schedulerProbe.ref)) + + val key = + ScheduleLock.singleKey(TSpawner, schedulerProbe.ref, INIT_SIM_TICK) + + primaryDataService ! Create( + InitExtPrimaryData(extPrimaryDataConnection), + key, + ) + + service ! Activation(INIT_SIM_TICK) + + service ! SecondaryServiceRegistrationMessage( + systemParticipant.ref, + Coordinate(51.4843281, 7.4116482), + ) + + val deathWatch = createTestProbe("deathWatch") + deathWatch.expectTerminated(service.ref) + } + + "correctly register a forwarded request" ignore { + primaryDataService ! PrimaryServiceRegistrationMessage( + systemParticipant.ref, + UUID.randomUUID(), + ) + + /* Wait for request approval */ + systemParticipant.expectMessage( + RegistrationSuccessfulMessage( + primaryDataService, + 0L, + ) + ) + + /* We cannot directly check, if the requesting actor is among the subscribers, therefore we ask the actor to + * provide data to all subscribed actors and check, if the subscribed probe gets one */ + primaryDataService ! Activation(0) + scheduler.expectMessage(Completion(primaryDataService)) + + systemParticipant.expectMessageType[DataProvision[PrimaryData]] + } + } +} diff --git a/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySpec.scala b/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySpec.scala index 68bf753c94..94ceacd42c 100644 --- a/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySpec.scala +++ b/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySpec.scala @@ -11,6 +11,8 @@ import edu.ie3.datamodel.io.naming.FileNamingStrategy import edu.ie3.datamodel.io.naming.timeseries.ColumnScheme import edu.ie3.datamodel.io.source.TimeSeriesMappingSource import edu.ie3.datamodel.io.source.csv.CsvTimeSeriesMappingSource +import edu.ie3.datamodel.models.value.{PValue, SValue, Value} +import edu.ie3.simona.api.data.connection.ExtPrimaryDataConnection import edu.ie3.datamodel.models.value.SValue import edu.ie3.simona.agent.participant.ParticipantAgent import edu.ie3.simona.agent.participant.ParticipantAgent.RegistrationFailedMessage @@ -66,6 +68,7 @@ import org.slf4j.{Logger, LoggerFactory} import java.nio.file.{Path, Paths} import java.time.ZonedDateTime import java.util.UUID +import scala.jdk.CollectionConverters.* import scala.language.implicitConversions import scala.util.{Failure, Success} @@ -138,11 +141,26 @@ class PrimaryServiceProxySpec when(m.self).thenReturn(service.ref) m } + private val validExtPrimaryDataService = spawn( + ExtPrimaryDataService(scheduler.ref) + ) + + private val extEntityId = + UUID.fromString("07bbe1aa-1f39-4dfb-b41b-339dec816ec4") + + private val valueMap: Map[UUID, Class[? <: Value]] = Map( + extEntityId -> classOf[PValue] + ) + + private val extPrimaryDataConnection = new ExtPrimaryDataConnection( + valueMap.asJava + ) val initStateData: InitPrimaryServiceProxyStateData = InitPrimaryServiceProxyStateData( validPrimaryConfig, simulationStart, + Seq.empty, ) val proxy: ActorRef[PrimaryServiceProxy.Message] = testKit.spawn(PrimaryServiceProxy(scheduler.ref, initStateData)) @@ -159,6 +177,7 @@ class PrimaryServiceProxySpec PrimaryServiceProxy.prepareStateData( maliciousConfig, simulationStart, + Seq.empty, ) match { case Success(emptyStateData) => emptyStateData.modelToTimeSeries shouldBe Map.empty @@ -182,6 +201,7 @@ class PrimaryServiceProxySpec PrimaryServiceProxy.prepareStateData( maliciousConfig, simulationStart, + Seq.empty, ) match { case Success(_) => fail("Building state data with missing config should fail") @@ -195,6 +215,7 @@ class PrimaryServiceProxySpec PrimaryServiceProxy.prepareStateData( validPrimaryConfig, simulationStart, + Seq.empty, ) match { case Success( PrimaryServiceStateData( @@ -202,6 +223,7 @@ class PrimaryServiceProxySpec timeSeriesToSourceRef, simulationStart, primaryConfig, + _, ) ) => modelToTimeSeries shouldBe Map( @@ -239,6 +261,28 @@ class PrimaryServiceProxySpec ) } } + + "build proxy correctly when there is an external simulation" in { + PrimaryServiceProxy.prepareStateData( + validPrimaryConfig, + simulationStart, + Seq((extPrimaryDataConnection, validExtPrimaryDataService)), + ) match { + case Success( + PrimaryServiceStateData( + _, + _, + _, + _, + extSubscribersToService, + ) + ) => + extSubscribersToService shouldBe Map( + extEntityId -> validExtPrimaryDataService + ) + } + } + } "Sending initialization information to an uninitialized actor" should { @@ -436,6 +480,7 @@ class PrimaryServiceProxySpec timeSeriesToSourceRef, simulationStart, primaryConfig, + _, ) => modelToTimeSeries shouldBe proxyStateData.modelToTimeSeries timeSeriesToSourceRef shouldBe Map( diff --git a/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySqlIT.scala b/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySqlIT.scala index 3534b6ae11..a4d6d24720 100644 --- a/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySqlIT.scala +++ b/src/test/scala/edu/ie3/simona/service/primary/PrimaryServiceProxySqlIT.scala @@ -94,6 +94,7 @@ class PrimaryServiceProxySqlIT sqlParams = Some(sqlParams), ), simulationStart, + Seq.empty, ) testKit.spawn( diff --git a/src/test/scala/edu/ie3/simona/service/results/ExtResultProviderSpec.scala b/src/test/scala/edu/ie3/simona/service/results/ExtResultProviderSpec.scala new file mode 100644 index 0000000000..bbca74d124 --- /dev/null +++ b/src/test/scala/edu/ie3/simona/service/results/ExtResultProviderSpec.scala @@ -0,0 +1,95 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.results + +import edu.ie3.simona.api.data.connection.ExtResultDataConnection +import edu.ie3.simona.api.ontology.ScheduleDataServiceMessage +import edu.ie3.simona.api.ontology.results.{ + ProvideResultEntities, + RequestResultEntities, +} +import edu.ie3.simona.api.ontology.simulation.ControlResponseMessageFromExt +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} +import edu.ie3.simona.ontology.messages.SchedulerMessage.Completion +import edu.ie3.simona.ontology.messages.{Activation, SchedulerMessage} +import edu.ie3.simona.test.common.UnitSpec +import edu.ie3.simona.test.common.result.PowerFlowResultData +import org.apache.pekko.actor.testkit.typed.scaladsl.{ + ScalaTestWithActorTestKit, + TestProbe, +} + +import scala.jdk.CollectionConverters.{ + ListHasAsScala, + SeqHasAsJava, + SetHasAsScala, +} + +class ExtResultProviderSpec + extends ScalaTestWithActorTestKit + with UnitSpec + with PowerFlowResultData { + + private val scheduler = TestProbe[SchedulerMessage]("scheduler") + private val resultProxy = TestProbe[RequestResult]("resultProxy") + + "The ExtResultProvider" should { + + "handle result responses correctly" in { + val connection = new ExtResultDataConnection(List(dummyInputModel).asJava) + val extSimAdapter = + TestProbe[ControlResponseMessageFromExt]("extSimAdapter") + val provider = + spawn(ExtResultProvider(connection, scheduler.ref, resultProxy.ref)) + connection.setActorRefs(provider.ref, extSimAdapter.ref) + + provider ! ResultResponse( + Map(dummyInputModel -> List(dummyNodeResult, dummyPvResult)) + ) + + val results = + connection.receiveWithType(classOf[ProvideResultEntities]).results + results.keySet.asScala shouldBe Set(dummyInputModel) + results.get(dummyInputModel).asScala shouldBe List( + dummyNodeResult, + dummyPvResult, + ) + + scheduler.expectMessage(Completion(provider.ref)) + } + + "handle result data message from external" in { + val connection = new ExtResultDataConnection(List(dummyInputModel).asJava) + val extSimAdapter = + TestProbe[ControlResponseMessageFromExt]("extSimAdapter") + val provider = + spawn(ExtResultProvider(connection, scheduler.ref, resultProxy.ref)) + connection.setActorRefs(provider.ref, extSimAdapter.ref) + + // requesting results from the result provider + connection.sendExtMsg( + new RequestResultEntities(3600L, List(dummyInputModel).asJava) + ) + + extSimAdapter.expectMessage(new ScheduleDataServiceMessage(provider.ref)) + provider ! Activation(3600L) + + resultProxy.expectMessage( + RequestResult(Seq(dummyInputModel), 3600L, provider.ref) + ) + + provider ! ResultResponse(Map(dummyInputModel -> List(dummyPvResult))) + + scheduler.expectMessage(Completion(provider.ref)) + } + + } + +} diff --git a/src/test/scala/edu/ie3/simona/service/results/ResultServiceProxySpec.scala b/src/test/scala/edu/ie3/simona/service/results/ResultServiceProxySpec.scala new file mode 100644 index 0000000000..17eca14dbd --- /dev/null +++ b/src/test/scala/edu/ie3/simona/service/results/ResultServiceProxySpec.scala @@ -0,0 +1,270 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.service.results + +import edu.ie3.simona.event.ResultEvent.{ + ParticipantResultEvent, + PowerFlowResultEvent, +} +import edu.ie3.simona.ontology.messages.ResultMessage +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} +import edu.ie3.simona.service.results.ResultServiceProxy.ExpectResult +import edu.ie3.simona.test.common.result.PowerFlowResultData +import edu.ie3.simona.test.common.{ConfigTestData, UnitSpec} +import org.apache.pekko.actor.testkit.typed.scaladsl.{ + ScalaTestWithActorTestKit, + TestProbe, +} + +class ResultServiceProxySpec + extends ScalaTestWithActorTestKit + with UnitSpec + with ConfigTestData + with PowerFlowResultData + with ThreeWindingResultTestData { + + "The ResultServiceProxy" should { + + "answer request for results correctly without waiting for results" in { + val resultProvider = TestProbe[ResultMessage.Response]("listener") + + val resultProxy = spawn(ResultServiceProxy(Seq.empty, startTime, 10)) + + resultProxy ! RequestResult( + Seq(dummyInputModel, inputModel), + 3600L, + resultProvider.ref, + ) + + // no results, since the result proxy received not waiting for result information + resultProvider + .expectMessageType[ResultResponse] + .results shouldBe Map.empty + } + + "answer request for results correctly with waiting for some results" in { + val resultProvider = TestProbe[ResultMessage.Response]("listener") + + val resultProxy = spawn(ResultServiceProxy(Seq.empty, startTime, 10)) + + // tells the proxy to wait for the results of dummyInputModel for tick 3600L + resultProxy ! ExpectResult(Seq(dummyInputModel), 3600L) + + resultProxy ! RequestResult( + Seq(dummyInputModel, inputModel), + 3600L, + resultProvider.ref, + ) + + // still waiting for results + resultProvider.expectNoMessage() + + resultProxy ! PowerFlowResultEvent( + Seq(dummyNodeResult), + Seq(dummySwitchResult), + Seq(dummyLineResult), + Seq(dummyTrafo2wResult), + Seq(resultA), + ) + + // no results for three winding transformers, because the proxy is not told to wait and the results was not received beforehand + resultProvider.expectMessageType[ResultResponse].results shouldBe Map( + dummyInputModel -> List( + dummyNodeResult, + dummySwitchResult, + dummyLineResult, + dummyTrafo2wResult, + ) + ) + } + + "answer request for results correctly with waiting for some results with different receive order" in { + val resultProvider = TestProbe[ResultMessage.Response]("listener") + + val resultProxy = spawn(ResultServiceProxy(Seq.empty, startTime, 10)) + + // tells the proxy to wait for the results with dumyInputModel for tick 3600L + resultProxy ! ExpectResult(Seq(dummyInputModel), 3600L) + + resultProxy ! RequestResult( + Seq(dummyInputModel, inputModel), + 3600L, + resultProvider.ref, + ) + + // receiving three winding results for port B and C beforehand + resultProxy ! PowerFlowResultEvent( + Seq.empty, + Seq.empty, + Seq.empty, + Seq.empty, + Seq(resultB, resultC), + ) + + // still waiting for results + resultProvider.expectNoMessage() + + resultProxy ! PowerFlowResultEvent( + Seq(dummyNodeResult), + Seq(dummySwitchResult), + Seq(dummyLineResult), + Seq(dummyTrafo2wResult), + Seq(resultA), + ) + + // receives three winding result, because all partial results are present + resultProvider.expectMessageType[ResultResponse].results shouldBe Map( + dummyInputModel -> List( + dummyNodeResult, + dummySwitchResult, + dummyLineResult, + dummyTrafo2wResult, + ), + inputModel -> List(expected), + ) + } + + "answer request for results correctly with waiting for all results" in { + val resultProvider = TestProbe[ResultMessage.Response]("listener") + + val resultProxy = spawn(ResultServiceProxy(Seq.empty, startTime, 10)) + + // tells the proxy to wait for the results of dumyInputModel for tick 3600L + resultProxy ! ExpectResult(Seq(dummyInputModel), 3600L) + + // tells the proxy to also wait for the results of inputModel for tick 3600L + resultProxy ! ExpectResult(Seq(inputModel), 3600L) + + resultProxy ! RequestResult( + Seq(dummyInputModel, inputModel), + 3600L, + resultProvider.ref, + ) + + // still waiting for results + resultProvider.expectNoMessage() + + resultProxy ! PowerFlowResultEvent( + Seq(dummyNodeResult), + Seq(dummySwitchResult), + Seq(dummyLineResult), + Seq(dummyTrafo2wResult), + Seq(resultA), + ) + + // still waiting for results + resultProvider.expectNoMessage() + + // receiving three winding results for port B and C + resultProxy ! PowerFlowResultEvent( + Seq.empty, + Seq.empty, + Seq.empty, + Seq.empty, + Seq(resultB, resultC), + ) + + // no results for three winding transformers, because the proxy is not told to wait and the results was not received beforehand + resultProvider.expectMessageType[ResultResponse].results shouldBe Map( + dummyInputModel -> List( + dummyNodeResult, + dummySwitchResult, + dummyLineResult, + dummyTrafo2wResult, + ), + inputModel -> List(expected), + ) + } + + "correctly handle grid result events" in { + val listener = TestProbe[ResultResponse]("listener") + + val resultProxy = + spawn(ResultServiceProxy(Seq(listener.ref), startTime, 10)) + + resultProxy ! PowerFlowResultEvent( + Seq(dummyNodeResult), + Seq(dummySwitchResult), + Seq(dummyLineResult), + Seq(dummyTrafo2wResult), + Seq.empty, + ) + + // all results have the same uuid, therefore, all result a grouped to this uuid + listener.expectMessageType[ResultResponse].results shouldBe Map( + dummyInputModel -> List( + dummyNodeResult, + dummySwitchResult, + dummyLineResult, + dummyTrafo2wResult, + ) + ) + } + + "correctly handle three winding transformer result events" in { + val listener = TestProbe[ResultResponse]("listener") + + val resultProxy = + spawn(ResultServiceProxy(Seq(listener.ref), startTime, 10)) + + // sending result for port A + resultProxy ! PowerFlowResultEvent( + Seq.empty, + Seq.empty, + Seq.empty, + Seq.empty, + Seq(resultA), + ) + + // no message, because the three winding result is not complete + listener.expectNoMessage() + + // sending result for port C + resultProxy ! PowerFlowResultEvent( + Seq.empty, + Seq.empty, + Seq.empty, + Seq.empty, + Seq(resultC), + ) + + // no message, because the three winding result is not complete + listener.expectNoMessage() + + // sending result for port B + resultProxy ! PowerFlowResultEvent( + Seq.empty, + Seq.empty, + Seq.empty, + Seq.empty, + Seq(resultB), + ) + + listener.expectMessageType[ResultResponse].results shouldBe Map( + inputModel -> List(expected) + ) + } + + "correctly handle participant result events" in { + val listener = TestProbe[ResultResponse]("listener") + + val resultProxy = + spawn(ResultServiceProxy(Seq(listener.ref), startTime, 10)) + + resultProxy ! ParticipantResultEvent(dummyPvResult) + + listener.expectMessageType[ResultResponse].results shouldBe Map( + dummyPvResult.getInputModel -> List(dummyPvResult) + ) + } + + } + +} diff --git a/src/test/scala/edu/ie3/simona/event/listener/ThreeWindingResultHandlingSpec.scala b/src/test/scala/edu/ie3/simona/service/results/ThreeWindingResultHandlingSpec.scala similarity index 95% rename from src/test/scala/edu/ie3/simona/event/listener/ThreeWindingResultHandlingSpec.scala rename to src/test/scala/edu/ie3/simona/service/results/ThreeWindingResultHandlingSpec.scala index 227b4ce9f5..1615462104 100644 --- a/src/test/scala/edu/ie3/simona/event/listener/ThreeWindingResultHandlingSpec.scala +++ b/src/test/scala/edu/ie3/simona/service/results/ThreeWindingResultHandlingSpec.scala @@ -4,9 +4,11 @@ * Research group Distribution grid planning and operation */ -package edu.ie3.simona.event.listener +package edu.ie3.simona.service.results import edu.ie3.simona.agent.grid.GridResultsSupport.PartialTransformer3wResult +import edu.ie3.simona.service.results.Transformer3wResultSupport +import edu.ie3.simona.service.results.Transformer3wResultSupport.AggregatedTransformer3wResult import edu.ie3.simona.test.common.UnitSpec import edu.ie3.util.TimeUtil import org.scalatest.prop.TableDrivenPropertyChecks @@ -19,8 +21,7 @@ import scala.util.{Failure, Success} class ThreeWindingResultHandlingSpec extends UnitSpec with TableDrivenPropertyChecks - with ThreeWindingResultTestData - with Transformer3wResultSupport { + with ThreeWindingResultTestData { "Handling three winding results" when { "assembling joint values" should { val mockAResult = PartialTransformer3wResult.PortA( diff --git a/src/test/scala/edu/ie3/simona/event/listener/ThreeWindingResultTestData.scala b/src/test/scala/edu/ie3/simona/service/results/ThreeWindingResultTestData.scala similarity index 97% rename from src/test/scala/edu/ie3/simona/event/listener/ThreeWindingResultTestData.scala rename to src/test/scala/edu/ie3/simona/service/results/ThreeWindingResultTestData.scala index de3ae98ec7..92011d19eb 100644 --- a/src/test/scala/edu/ie3/simona/event/listener/ThreeWindingResultTestData.scala +++ b/src/test/scala/edu/ie3/simona/service/results/ThreeWindingResultTestData.scala @@ -4,7 +4,7 @@ * Research group Distribution grid planning and operation */ -package edu.ie3.simona.event.listener +package edu.ie3.simona.service.results import edu.ie3.datamodel.models.StandardUnits import edu.ie3.datamodel.models.result.connector.Transformer3WResult diff --git a/src/test/scala/edu/ie3/simona/sim/SimonaSimSpec.scala b/src/test/scala/edu/ie3/simona/sim/SimonaSimSpec.scala index 1a21f0487a..3fbb7bfff0 100644 --- a/src/test/scala/edu/ie3/simona/sim/SimonaSimSpec.scala +++ b/src/test/scala/edu/ie3/simona/sim/SimonaSimSpec.scala @@ -12,15 +12,20 @@ import edu.ie3.simona.api.ExtSimAdapter import edu.ie3.simona.config.SimonaConfig import edu.ie3.simona.event.listener.{ DelayedStopHelper, - ResultEventListener, + ResultListener, RuntimeEventListener, } import edu.ie3.simona.event.{ResultEvent, RuntimeEvent} import edu.ie3.simona.main.RunSimona.SimonaEnded +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} import edu.ie3.simona.scheduler.TimeAdvancer import edu.ie3.simona.scheduler.core.Core.CoreFactory import edu.ie3.simona.scheduler.core.RegularSchedulerCore +import edu.ie3.simona.service.results.ResultServiceProxy import edu.ie3.simona.sim.SimonaSim.SimulationEnded import edu.ie3.simona.sim.SimonaSimSpec.* import edu.ie3.simona.sim.setup.{ExtSimSetupData, SimonaSetup} @@ -33,6 +38,7 @@ import org.apache.pekko.actor.typed.scaladsl.{ActorContext, Behaviors} import org.apache.pekko.actor.typed.{ActorRef, Behavior} import java.nio.file.Path +import java.time.ZonedDateTime import java.util.UUID class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { @@ -46,7 +52,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { val runtimeListener = TestProbe[RuntimeEventListener.Request]("runtimeEventListener") val resultListener = - TestProbe[ResultEventListener.Request]("resultEventListener") + TestProbe[ResultListener.Message]("resultEventListener") val timeAdvancer = TestProbe[TimeAdvancer.Request]("timeAdvancer") val extSimAdapter = TestProbe[ExtSimAdapter.Request]("extSimAdapter") @@ -60,6 +66,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { override def extSimulations( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], extSimPath: Option[Path], ): ExtSimSetupData = { // We cannot return a TestProbe ref here, @@ -74,6 +81,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { None, None, Seq.empty, + Seq.empty, ) } } @@ -113,7 +121,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { val runtimeListener = TestProbe[RuntimeEventListener.Request]("runtimeEventListener") val resultListener = - TestProbe[ResultEventListener.Request]("resultEventListener") + TestProbe[ResultListener.Message]("resultEventListener") val timeAdvancer = TestProbe[TimeAdvancer.Request]("timeAdvancer") val receiveThrowingActor = @@ -179,7 +187,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { val runtimeListener = TestProbe[RuntimeEventListener.Request]("runtimeEventListener") val resultListener = - TestProbe[ResultEventListener.Request]("resultEventListener") + TestProbe[ResultListener.Message]("resultEventListener") val timeAdvancer = TestProbe[TimeAdvancer.Request]("timeAdvancer") val receiveStoppingActor = @@ -242,7 +250,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { "RuntimeEventListener stops unexpectedly" in { val starter = TestProbe[SimonaEnded]("starter") val resultListener = - TestProbe[ResultEventListener.Request]("resultEventListener") + TestProbe[ResultListener.Message]("resultEventListener") val timeAdvancer = TestProbe[TimeAdvancer.Request]("timeAdvancer") val receiveThrowingActor = @@ -309,7 +317,7 @@ class SimonaSimSpec extends ScalaTestWithActorTestKit with UnitSpec { override def resultEventListener( context: ActorContext[?] - ): Seq[ActorRef[ResultEventListener.Request]] = + ): Seq[ActorRef[ResultListener.Message]] = throwTestException() } ), @@ -398,7 +406,7 @@ object SimonaSimSpec { */ class MockSetup( runtimeEventProbe: Option[ActorRef[RuntimeEventListener.Request]] = None, - resultEventProbe: Option[ActorRef[ResultEventListener.Request]] = None, + resultEventProbe: Option[ActorRef[ResultListener.Message]] = None, timeAdvancerProbe: Option[ActorRef[TimeAdvancer.Request]] = None, ) extends SimonaSetup with ConfigTestData { @@ -417,7 +425,7 @@ object SimonaSimSpec { override def resultEventListener( context: ActorContext[?] - ): Seq[ActorRef[ResultEventListener.Request]] = Seq( + ): Seq[ActorRef[ResultListener.Message]] = Seq( context.spawn( stoppableForwardMessage(resultEventProbe), uniqueName("resultEventForwarder"), @@ -431,6 +439,13 @@ object SimonaSimSpec { ): ActorRef[ServiceMessage] = context.spawn(empty, uniqueName("primaryService")) + override def resultServiceProxy( + context: ActorContext[?], + listeners: Seq[ActorRef[ResultResponse]], + simStartTime: ZonedDateTime, + ): ActorRef[ResultServiceProxy.Message] = + context.spawn(stoppableForwardMessage(None), uniqueName("resultService")) + override def weatherService( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], @@ -463,12 +478,12 @@ object SimonaSimSpec { override def gridAgents( context: ActorContext[?], environmentRefs: EnvironmentRefs, - resultEventListeners: Seq[ActorRef[ResultEvent]], ): Iterable[ActorRef[GridAgent.Message]] = Iterable.empty override def extSimulations( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], extSimPath: Option[Path], ): ExtSimSetupData = ExtSimSetupData.apply diff --git a/src/test/scala/edu/ie3/simona/sim/setup/ExtSimSetupDataSpec.scala b/src/test/scala/edu/ie3/simona/sim/setup/ExtSimSetupDataSpec.scala index a624f30a0e..2e1030d3e1 100644 --- a/src/test/scala/edu/ie3/simona/sim/setup/ExtSimSetupDataSpec.scala +++ b/src/test/scala/edu/ie3/simona/sim/setup/ExtSimSetupDataSpec.scala @@ -50,7 +50,8 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { updated.primaryDataServices shouldBe Seq((connection, primaryRef)) updated.emDataService shouldBe None updated.evDataService shouldBe None - updated.extResultListeners shouldBe empty + updated.resultListeners shouldBe empty + updated.resultProviders shouldBe empty } "be updated with multiple ExtPrimaryDataConnection correctly" in { @@ -75,7 +76,8 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { ) updated.emDataService shouldBe None updated.evDataService shouldBe None - updated.extResultListeners shouldBe empty + updated.resultListeners shouldBe empty + updated.resultProviders shouldBe empty } "be updated with an ExtInputDataConnection correctly" in { @@ -115,11 +117,12 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { forAll(cases) { (connection, serviceRef, expected) => val updated = extSimSetupData.update(connection, serviceRef) - updated.extSimAdapters shouldBe expected.extSimAdapters - updated.primaryDataServices shouldBe expected.primaryDataServices + updated.extSimAdapters shouldBe empty + updated.primaryDataServices shouldBe empty updated.emDataService shouldBe expected.emDataService updated.evDataService shouldBe expected.evDataService - updated.extResultListeners shouldBe empty + updated.resultListeners shouldBe empty + updated.resultProviders shouldBe empty } } @@ -127,15 +130,18 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { val extSimSetupData = ExtSimSetupData.apply val resultConnection = new ExtResultDataConnection(emptyUuidList) - val resultRef = TestProbe[ServiceMessage]("result_service").ref + val resultServiceProxyRef = + TestProbe[ServiceMessage]("resultServiceProxy").ref - val updated = extSimSetupData.update(resultConnection, resultRef) + val updated = + extSimSetupData.update(resultConnection, resultServiceProxyRef) updated.extSimAdapters shouldBe empty updated.primaryDataServices shouldBe empty updated.emDataService shouldBe None updated.evDataService shouldBe None - updated.extResultListeners shouldBe Seq(resultRef) + updated.resultListeners shouldBe empty + updated.resultProviders shouldBe Seq(resultServiceProxyRef) } "be updated with multiple different connections correctly" in { @@ -152,13 +158,13 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { val emRef = TestProbe[ExtEmDataService.Message]("em_service").ref val resultConnection = new ExtResultDataConnection(emptyUuidList) - val resultRef = TestProbe[ServiceMessage]("result_service").ref + val resultServiceProxyRef = + TestProbe[ServiceMessage]("resultServiceProxy").ref val updated = extSimSetupData .update(primaryConnection, primaryRef) - .update(emConnection, emRef) .update(evConnection, evRef) - .update(resultConnection, resultRef) + .update(resultConnection, resultServiceProxyRef) updated.extSimAdapters shouldBe empty updated.primaryDataServices shouldBe Seq( @@ -169,7 +175,8 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { ) updated.emDataService shouldBe Some(emRef) updated.evDataService shouldBe Some(evRef) - updated.extResultListeners shouldBe Seq(resultRef) + updated.resultListeners shouldBe empty + updated.resultProviders shouldBe Seq(resultServiceProxyRef) } "return evDataService correctly" in { @@ -206,5 +213,4 @@ class ExtSimSetupDataSpec extends ScalaTestWithActorTestKit with UnitSpec { } } } - } diff --git a/src/test/scala/edu/ie3/simona/sim/setup/SimonaSetupSpec.scala b/src/test/scala/edu/ie3/simona/sim/setup/SimonaSetupSpec.scala index 6819e6adc5..f9a01eb4f8 100644 --- a/src/test/scala/edu/ie3/simona/sim/setup/SimonaSetupSpec.scala +++ b/src/test/scala/edu/ie3/simona/sim/setup/SimonaSetupSpec.scala @@ -14,12 +14,17 @@ import edu.ie3.datamodel.models.input.connector.{ import edu.ie3.simona.agent.EnvironmentRefs import edu.ie3.simona.agent.grid.GridAgent import edu.ie3.simona.config.SimonaConfig -import edu.ie3.simona.event.listener.{ResultEventListener, RuntimeEventListener} +import edu.ie3.simona.event.listener.{ResultListener, RuntimeEventListener} import edu.ie3.simona.event.{ResultEvent, RuntimeEvent} import edu.ie3.simona.ontology.messages.{SchedulerMessage, ServiceMessage} +import edu.ie3.simona.ontology.messages.ResultMessage.{ + RequestResult, + ResultResponse, +} import edu.ie3.simona.scheduler.TimeAdvancer import edu.ie3.simona.scheduler.core.Core.CoreFactory import edu.ie3.simona.scheduler.core.RegularSchedulerCore +import edu.ie3.simona.service.results.ResultServiceProxy import edu.ie3.simona.sim.SimonaSim import edu.ie3.simona.test.common.model.grid.SubGridGateMokka import edu.ie3.simona.test.common.{ConfigTestData, UnitSpec} @@ -27,6 +32,7 @@ import org.apache.pekko.actor.typed.ActorRef import org.apache.pekko.actor.typed.scaladsl.ActorContext import java.nio.file.Path +import java.time.ZonedDateTime import java.util.UUID class SimonaSetupSpec @@ -49,7 +55,7 @@ class SimonaSetupSpec override def resultEventListener( context: ActorContext[?] - ): Seq[ActorRef[ResultEventListener.Request]] = + ): Seq[ActorRef[ResultListener.Message]] = throw new NotImplementedException("This is a dummy setup") override def primaryServiceProxy( @@ -60,6 +66,14 @@ class SimonaSetupSpec "This is a dummy setup" ) + override def resultServiceProxy( + context: ActorContext[?], + listeners: Seq[ActorRef[ResultResponse]], + simStartTime: ZonedDateTime, + ): ActorRef[ResultServiceProxy.Message] = throw new NotImplementedException( + "This is a dummy setup" + ) + override def weatherService( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], @@ -77,6 +91,7 @@ class SimonaSetupSpec override def extSimulations( context: ActorContext[?], scheduler: ActorRef[SchedulerMessage], + resultProxy: ActorRef[RequestResult], extSimPath: Option[Path], ): ExtSimSetupData = throw new NotImplementedException( "This is a dummy setup" @@ -101,7 +116,6 @@ class SimonaSetupSpec override def gridAgents( context: ActorContext[?], environmentRefs: EnvironmentRefs, - resultEventListeners: Seq[ActorRef[ResultEvent]], ): Iterable[ActorRef[GridAgent.Message]] = throw new NotImplementedException("This is a dummy setup") diff --git a/src/test/scala/edu/ie3/simona/test/common/ConfigTestData.scala b/src/test/scala/edu/ie3/simona/test/common/ConfigTestData.scala index 246548e7d2..c77fe63717 100644 --- a/src/test/scala/edu/ie3/simona/test/common/ConfigTestData.scala +++ b/src/test/scala/edu/ie3/simona/test/common/ConfigTestData.scala @@ -9,6 +9,8 @@ package edu.ie3.simona.test.common import com.typesafe.config.{Config, ConfigFactory} import edu.ie3.simona.config.SimonaConfig +import java.time.ZonedDateTime + /** Simple (empty) configuration data. Furthermore, it would make sense to * implement another class which reads a config and provides config based * values in the future. @@ -151,4 +153,5 @@ trait ConfigTestData { ) protected val simonaConfig: SimonaConfig = SimonaConfig(typesafeConfig) + protected val startTime: ZonedDateTime = simonaConfig.simona.time.simStartTime } diff --git a/src/test/scala/edu/ie3/simona/test/common/UnitSpec.scala b/src/test/scala/edu/ie3/simona/test/common/UnitSpec.scala index 5fe4e65f4a..52adb12213 100644 --- a/src/test/scala/edu/ie3/simona/test/common/UnitSpec.scala +++ b/src/test/scala/edu/ie3/simona/test/common/UnitSpec.scala @@ -35,7 +35,6 @@ trait UnitSpec with SquantsMatchers with DoubleMatchers with AnyWordSpecLike - with LogCapturing with OptionValues with Inside with Inspectors diff --git a/src/test/scala/edu/ie3/simona/test/common/input/EmCommunicationTestData.scala b/src/test/scala/edu/ie3/simona/test/common/input/EmCommunicationTestData.scala new file mode 100644 index 0000000000..fd585d133b --- /dev/null +++ b/src/test/scala/edu/ie3/simona/test/common/input/EmCommunicationTestData.scala @@ -0,0 +1,166 @@ +/* + * © 2025. TU Dortmund University, + * Institute of Energy Systems, Energy Efficiency and Energy Economics, + * Research group Distribution grid planning and operation + */ + +package edu.ie3.simona.test.common.input + +import edu.ie3.datamodel.models.input.system.`type`.StorageTypeInput +import edu.ie3.datamodel.models.input.system.characteristic.ReactivePowerCharacteristic +import edu.ie3.datamodel.models.input.system.{LoadInput, PvInput, StorageInput} +import edu.ie3.datamodel.models.input.{EmInput, NodeInput} +import edu.ie3.datamodel.models.profile.BdewStandardLoadProfile +import edu.ie3.datamodel.models.voltagelevels.GermanVoltageLevelUtils +import edu.ie3.simona.agent.participant.ParticipantAgentInit.SimulationParameters +import edu.ie3.simona.config.RuntimeConfig.EmRuntimeConfig +import edu.ie3.simona.config.SimonaConfig +import edu.ie3.simona.event.notifier.NotifierConfig +import edu.ie3.simona.test.common.DefaultTestData +import edu.ie3.util.TimeUtil +import edu.ie3.util.geo.GeoUtils +import edu.ie3.util.quantities.QuantityUtils.* +import squants.Each + +import java.time.ZonedDateTime +import java.util.UUID + +trait EmCommunicationTestData extends DefaultTestData { + + protected given simulationStart: ZonedDateTime = + TimeUtil.withDefaults.toZonedDateTime("2020-01-01T00:00:00Z") + protected given simulationEnd: ZonedDateTime = + simulationStart.plusHours(2) + + protected val simonaConfig: SimonaConfig = createSimonaConfig() + + protected val outputConfig: NotifierConfig = NotifierConfig( + simulationResultInfo = true, + powerRequestReply = false, + flexResult = true, // also test FlexOptionsResult if EM-controlled + ) + + protected given SimulationParameters = SimulationParameters( + expectedPowerRequestTick = Long.MaxValue, + requestVoltageDeviationTolerance = Each(1e-14d), + simulationStart = simulationStart, + simulationEnd = simulationEnd, + ) + + protected val modelConfig: EmRuntimeConfig = EmRuntimeConfig( + calculateMissingReactivePowerWithModel = false, + scaling = 1, + uuids = List.empty, + aggregateFlex = "SELF_OPT_EXCL_REG", + curtailRegenerative = false, + ) + + val node3 = new NodeInput( + UUID.fromString("33f29587-f63e-45b7-960b-037bda37a3cb"), + "Node_3", + 1.0.asPu, + false, + GeoUtils.buildPoint(51.4843281, 7.4116482), + GermanVoltageLevelUtils.LV, + 2, + ) + + val node4 = new NodeInput( + UUID.fromString("401f37f8-6f2c-4564-bc78-6736cb9cbf8d"), + "Node_4", + 1.0.asPu, + false, + GeoUtils.buildPoint(51.4843281, 7.4116482), + GermanVoltageLevelUtils.LV, + 2, + ) + + val emSup = new EmInput( + UUID.fromString("858f3d3d-4189-49cd-9fe5-3cd49b88dc70"), + "EM_SUP", + "PROPORTIONAL", + null, + ) + + val emNode3 = new EmInput( + UUID.fromString("fd1a8de9-722a-4304-8799-e1e976d9979c"), + "emNode3", + "PRIORITIZED", + emSup, + ) + + val emNode4 = new EmInput( + UUID.fromString("ff0b995a-86ff-4f4d-987e-e475a64f2180"), + "emNode4", + "PRIORITIZED", + emSup, + ) + + val pvNode3 = new PvInput( + UUID.fromString("9d7cd8e2-d859-4f4f-9c01-abba06ef2e2c"), + "PV_Node_3", + node3, + ReactivePowerCharacteristic.parse("cosPhiFixed:{(0.0,0.9)}"), + emNode3, + 0.20000000298023224, + -14.803051948547363.asDegreeGeom, + 96.0.asPercent, + 42.391395568847656.asDegreeGeom, + 0.8999999761581421, + 1.0, + false, + 10.0.asKiloVoltAmpere, + 0.8999999761581421, + ) + + val pvNode4 = new PvInput( + UUID.fromString("a1eb7fc1-3bee-4b65-a387-ef3046644bf0"), + "PV_Node_4", + node4, + ReactivePowerCharacteristic.parse("cosPhiFixed:{(0.0,0.9)}"), + emNode4, + 0.20000000298023224, + -8.999500274658203.asDegreeGeom, + 98.0.asPercent, + 37.14517593383789.asDegreeGeom, + 0.8999999761581421, + 1.0, + false, + 10.0.asKiloVoltAmpere, + 0.8999999761581421, + ) + + val storageType = new StorageTypeInput( + UUID.fromString("95d4c980-d9e1-4813-9f2a-b0942488a570"), + "Typ_1", + 0.0.asEuro, + 0.65.asEuroPerKiloWattHour, + 16.0.asKiloWattHour, + 4.166666666666667.asKiloVoltAmpere, + 0.96, + 4.0.asKiloWatt, + 1.0.asPercentPerHour, + 93.0.asPercent, + ) + + val storageInput: StorageInput = new StorageInput( + UUID.fromString("a2a92cfd-3492-465f-9587-e789f4620af8"), + "Storage_Node_3", + node3, + ReactivePowerCharacteristic.parse("cosPhiFixed:{(0.0,0.98)}"), + emNode3, + storageType, + ) + + val loadInput: LoadInput = new LoadInput( + UUID.fromString("283a1252-a774-4b04-bfcf-fe8879065982"), + "Load_Node_4", + node4, + ReactivePowerCharacteristic.parse("cosPhiFixed:{(0.0,1.0)}"), + emNode4, + BdewStandardLoadProfile.H0, + 4000.0.asKiloWattHour, + 2.3157899379730225.asKiloVoltAmpere, + 0.949999988079071, + ) +} diff --git a/src/test/scala/edu/ie3/simona/test/common/model/grid/DbfsTestGrid.scala b/src/test/scala/edu/ie3/simona/test/common/model/grid/DbfsTestGrid.scala index 48d46928a3..c40233138f 100644 --- a/src/test/scala/edu/ie3/simona/test/common/model/grid/DbfsTestGrid.scala +++ b/src/test/scala/edu/ie3/simona/test/common/model/grid/DbfsTestGrid.scala @@ -413,4 +413,13 @@ trait DbfsTestGrid extends SubGridGateMokka { subGridGates, ) } + + protected val assetsHv: Seq[UUID] = + hvGridContainer.allEntitiesAsList.asScala.map(_.getUuid).toSeq + protected val assetsHvPF: Seq[UUID] = + hvGridContainerPF.allEntitiesAsList.asScala.map(_.getUuid).toSeq + + protected val assetsEhv: Seq[UUID] = + ehvGridContainer.allEntitiesAsList.asScala.map(_.getUuid).toSeq + } diff --git a/src/test/scala/edu/ie3/simona/test/common/result/CongestedComponentsTestData.scala b/src/test/scala/edu/ie3/simona/test/common/result/CongestedComponentsTestData.scala index 1f938f4da3..fceb3acf1e 100644 --- a/src/test/scala/edu/ie3/simona/test/common/result/CongestedComponentsTestData.scala +++ b/src/test/scala/edu/ie3/simona/test/common/result/CongestedComponentsTestData.scala @@ -20,7 +20,6 @@ import edu.ie3.simona.model.grid.* import edu.ie3.simona.test.common.ConfigTestData import edu.ie3.simona.test.common.input.NodeInputTestData import edu.ie3.simona.test.common.model.grid.DbfsTestGrid -import edu.ie3.util.TimeUtil import edu.ie3.util.quantities.QuantityUtils.* import squants.electro.Kilovolts import squants.energy.Megawatts @@ -34,10 +33,6 @@ trait CongestedComponentsTestData with NodeInputTestData with DbfsTestGrid { - val startTime: ZonedDateTime = TimeUtil.withDefaults.toZonedDateTime( - simonaConfig.simona.time.startDateTime - ) - val endTime: ZonedDateTime = startTime.plusHours(2) val trafoType3W = new Transformer3WTypeInput( diff --git a/src/test/scala/edu/ie3/simona/test/common/result/PowerFlowResultData.scala b/src/test/scala/edu/ie3/simona/test/common/result/PowerFlowResultData.scala index f4ea226071..b9a71382e6 100644 --- a/src/test/scala/edu/ie3/simona/test/common/result/PowerFlowResultData.scala +++ b/src/test/scala/edu/ie3/simona/test/common/result/PowerFlowResultData.scala @@ -7,9 +7,8 @@ package edu.ie3.simona.test.common.result import java.util.UUID - import edu.ie3.datamodel.models.StandardUnits -import edu.ie3.datamodel.models.result.NodeResult +import edu.ie3.datamodel.models.result.{NodeResult, ResultEntity} import edu.ie3.datamodel.models.result.connector.{ LineResult, SwitchResult, @@ -25,9 +24,15 @@ trait PowerFlowResultData { private val dummyTime = TimeUtil.withDefaults.toZonedDateTime("2020-01-30T17:26:44Z") - private val dummyInputModel = + protected val dummyInputModel: UUID = UUID.fromString("e5ac84d3-c7a5-4870-a42d-837920aec9bb") + given Conversion[ResultEntity, Map[UUID, Iterable[ResultEntity]]] = + (res: ResultEntity) => Map(res.getInputModel -> Iterable(res)) + + given Conversion[Iterable[ResultEntity], Map[UUID, Iterable[ResultEntity]]] = + (res: Iterable[ResultEntity]) => res.groupBy(_.getInputModel) + val dummyPvResult = new PvResult( dummyTime, dummyInputModel,