diff --git a/rclcpp/CMakeLists.txt b/rclcpp/CMakeLists.txt index 6fd6c4586b..a294911c49 100644 --- a/rclcpp/CMakeLists.txt +++ b/rclcpp/CMakeLists.txt @@ -66,6 +66,8 @@ set(${PROJECT_NAME}_SRCS src/rclcpp/executors/static_executor_entities_collector.cpp src/rclcpp/executors/static_single_threaded_executor.cpp src/rclcpp/expand_topic_or_service_name.cpp + src/rclcpp/experimental/executors/events_executor/events_executor.cpp + src/rclcpp/experimental/timers_manager.cpp src/rclcpp/future_return_code.cpp src/rclcpp/generic_publisher.cpp src/rclcpp/generic_subscription.cpp diff --git a/rclcpp/include/rclcpp/executors/executor_notify_waitable.hpp b/rclcpp/include/rclcpp/executors/executor_notify_waitable.hpp index 88158952d9..eee8e59793 100644 --- a/rclcpp/include/rclcpp/executors/executor_notify_waitable.hpp +++ b/rclcpp/include/rclcpp/executors/executor_notify_waitable.hpp @@ -88,6 +88,25 @@ class ExecutorNotifyWaitable : public rclcpp::Waitable std::shared_ptr take_data() override; + /// Take the data from an entity ID so that it can be consumed with `execute`. + /** + * \param[in] id ID of the entity to take data from. + * \return If available, data to be used, otherwise nullptr + * \sa rclcpp::Waitable::take_data_by_entity_id + */ + RCLCPP_PUBLIC + std::shared_ptr + take_data_by_entity_id(size_t id) override; + + /// Set a callback to be called whenever the waitable becomes ready. + /** + * \param[in] callback callback to set + * \sa rclcpp::Waitable::set_on_ready_callback + */ + RCLCPP_PUBLIC + void + set_on_ready_callback(std::function callback) override; + /// Add a guard condition to be waited on. /** * \param[in] guard_condition The guard condition to add. @@ -96,13 +115,21 @@ class ExecutorNotifyWaitable : public rclcpp::Waitable void add_guard_condition(rclcpp::GuardCondition::WeakPtr guard_condition); + /// Unset any callback registered via set_on_ready_callback. + /** + * \sa rclcpp::Waitable::clear_on_ready_callback + */ + RCLCPP_PUBLIC + void + clear_on_ready_callback() override; + /// Remove a guard condition from being waited on. /** - * \param[in] guard_condition The guard condition to remove. + * \param[in] weak_guard_condition The guard condition to remove. */ RCLCPP_PUBLIC void - remove_guard_condition(rclcpp::GuardCondition::WeakPtr guard_condition); + remove_guard_condition(rclcpp::GuardCondition::WeakPtr weak_guard_condition); /// Get the number of ready guard_conditions /** @@ -118,6 +145,8 @@ class ExecutorNotifyWaitable : public rclcpp::Waitable std::mutex guard_condition_mutex_; + std::function on_ready_callback_; + /// The collection of guard conditions to be waited on. std::set> notify_guard_conditions_; diff --git a/rclcpp/include/rclcpp/experimental/executors/events_executor/events_executor.hpp b/rclcpp/include/rclcpp/experimental/executors/events_executor/events_executor.hpp new file mode 100644 index 0000000000..dd5b1ebe63 --- /dev/null +++ b/rclcpp/include/rclcpp/experimental/executors/events_executor/events_executor.hpp @@ -0,0 +1,294 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_EXECUTOR_HPP_ +#define RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_EXECUTOR_HPP_ + +#include +#include +#include +#include + +#include "rclcpp/executor.hpp" +#include "rclcpp/executors/executor_entities_collection.hpp" +#include "rclcpp/executors/executor_entities_collector.hpp" +#include "rclcpp/experimental/executors/events_executor/events_executor_event_types.hpp" +#include "rclcpp/experimental/executors/events_executor/events_queue.hpp" +#include "rclcpp/experimental/executors/events_executor/simple_events_queue.hpp" +#include "rclcpp/experimental/timers_manager.hpp" +#include "rclcpp/node.hpp" + +namespace rclcpp +{ +namespace experimental +{ +namespace executors +{ + +/// Events executor implementation +/** + * This executor uses an events queue and a timers manager to execute entities from its + * associated nodes and callback groups. + * ROS 2 entities allow to set callback functions that are invoked when the entity is triggered + * or has work to do. The events-executor sets these callbacks such that they push an + * event into its queue. + * + * This executor tries to reduce as much as possible the amount of maintenance operations. + * This allows to use customized `EventsQueue` classes to achieve different goals such + * as very low CPU usage, bounded memory requirement, determinism, etc. + * + * The executor uses a weak ownership model and it locks entities only while executing + * their related events. + * + * To run this executor: + * rclcpp::experimental::executors::EventsExecutor executor; + * executor.add_node(node); + * executor.spin(); + * executor.remove_node(node); + */ +class EventsExecutor : public rclcpp::Executor +{ + friend class EventsExecutorEntitiesCollector; + +public: + RCLCPP_SMART_PTR_DEFINITIONS(EventsExecutor) + + /// Default constructor. See the default constructor for Executor. + /** + * \param[in] events_queue The queue used to store events. + * \param[in] execute_timers_separate_thread If true, timers are executed in a separate + * thread. If false, timers are executed in the same thread as all other entities. + * \param[in] options Options used to configure the executor. + */ + RCLCPP_PUBLIC + explicit EventsExecutor( + rclcpp::experimental::executors::EventsQueue::UniquePtr events_queue = std::make_unique< + rclcpp::experimental::executors::SimpleEventsQueue>(), + bool execute_timers_separate_thread = false, + const rclcpp::ExecutorOptions & options = rclcpp::ExecutorOptions()); + + /// Default destructor. + RCLCPP_PUBLIC + virtual ~EventsExecutor(); + + /// Events executor implementation of spin. + /** + * This function will block until work comes in, execute it, and keep blocking. + * It will only be interrupted by a CTRL-C (managed by the global signal handler). + * \throws std::runtime_error when spin() called while already spinning + */ + RCLCPP_PUBLIC + void + spin() override; + + /// Events executor implementation of spin some + /** + * This non-blocking function will execute the timers and events + * that were ready when this API was called, until timeout or no + * more work available. New ready-timers/events arrived while + * executing work, won't be taken into account here. + * + * Example: + * while(condition) { + * spin_some(); + * sleep(); // User should have some sync work or + * // sleep to avoid a 100% CPU usage + * } + */ + RCLCPP_PUBLIC + void + spin_some(std::chrono::nanoseconds max_duration = std::chrono::nanoseconds(0)) override; + + /// Events executor implementation of spin all + /** + * This non-blocking function will execute timers and events + * until timeout or no more work available. If new ready-timers/events + * arrive while executing work available, they will be executed + * as long as the timeout hasn't expired. + * + * Example: + * while(condition) { + * spin_all(); + * sleep(); // User should have some sync work or + * // sleep to avoid a 100% CPU usage + * } + */ + RCLCPP_PUBLIC + void + spin_all(std::chrono::nanoseconds max_duration) override; + + /// Add a node to the executor. + /** + * \sa rclcpp::Executor::add_node + */ + RCLCPP_PUBLIC + void + add_node( + rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, + bool notify = true) override; + + /// Convenience function which takes Node and forwards NodeBaseInterface. + /** + * \sa rclcpp::EventsExecutor::add_node + */ + RCLCPP_PUBLIC + void + add_node(std::shared_ptr node_ptr, bool notify = true) override; + + /// Remove a node from the executor. + /** + * \sa rclcpp::Executor::remove_node + */ + RCLCPP_PUBLIC + void + remove_node( + rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, + bool notify = true) override; + + /// Convenience function which takes Node and forwards NodeBaseInterface. + /** + * \sa rclcpp::Executor::remove_node + */ + RCLCPP_PUBLIC + void + remove_node(std::shared_ptr node_ptr, bool notify = true) override; + + /// Add a callback group to an executor. + /** + * \sa rclcpp::Executor::add_callback_group + */ + RCLCPP_PUBLIC + void + add_callback_group( + rclcpp::CallbackGroup::SharedPtr group_ptr, + rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, + bool notify = true) override; + + /// Remove callback group from the executor + /** + * \sa rclcpp::Executor::remove_callback_group + */ + RCLCPP_PUBLIC + void + remove_callback_group( + rclcpp::CallbackGroup::SharedPtr group_ptr, + bool notify = true) override; + + /// Get callback groups that belong to executor. + /** + * \sa rclcpp::Executor::get_all_callback_groups() + */ + RCLCPP_PUBLIC + std::vector + get_all_callback_groups() override; + + /// Get callback groups that belong to executor. + /** + * \sa rclcpp::Executor::get_manually_added_callback_groups() + */ + RCLCPP_PUBLIC + std::vector + get_manually_added_callback_groups() override; + + /// Get callback groups that belong to executor. + /** + * \sa rclcpp::Executor::get_automatically_added_callback_groups_from_nodes() + */ + RCLCPP_PUBLIC + std::vector + get_automatically_added_callback_groups_from_nodes() override; + +protected: + /// Internal implementation of spin_once + RCLCPP_PUBLIC + void + spin_once_impl(std::chrono::nanoseconds timeout) override; + + /// Internal implementation of spin_some + RCLCPP_PUBLIC + void + spin_some_impl(std::chrono::nanoseconds max_duration, bool exhaustive); + +private: + RCLCPP_DISABLE_COPY(EventsExecutor) + + /// Execute a provided executor event if its associated entities are available + void + execute_event(const ExecutorEvent & event); + + /// Collect entities from callback groups and refresh the current collection with them + void + refresh_current_collection_from_callback_groups(); + + /// Refresh the current collection using the provided new_collection + void + refresh_current_collection(const rclcpp::executors::ExecutorEntitiesCollection & new_collection); + + /// Create a listener callback function for the provided entity + std::function + create_entity_callback(void * entity_key, ExecutorEventType type); + + /// Create a listener callback function for the provided waitable entity + std::function + create_waitable_callback(const rclcpp::Waitable * waitable_id); + + /// Utility to add the notify waitable to an entities collection + void + add_notify_waitable_to_collection( + rclcpp::executors::ExecutorEntitiesCollection::WaitableCollection & collection); + + /// Searches for the provided entity_id in the collection and returns the entity if valid + template + typename CollectionType::EntitySharedPtr + retrieve_entity(typename CollectionType::Key entity_id, CollectionType & collection) + { + // Check if the entity_id is in the collection + auto it = collection.find(entity_id); + if (it == collection.end()) { + return nullptr; + } + + // Check if the entity associated with the entity_id is valid + // and remove it from the collection if it isn't + auto entity = it->second.entity.lock(); + if (!entity) { + collection.erase(it); + } + + // Return the retrieved entity (this can be a nullptr if the entity was not valid) + return entity; + } + + /// Queue where entities can push events + rclcpp::experimental::executors::EventsQueue::UniquePtr events_queue_; + + std::shared_ptr entities_collector_; + std::shared_ptr notify_waitable_; + + /// Mutex to protect the current_entities_collection_ + std::recursive_mutex collection_mutex_; + std::shared_ptr current_entities_collection_; + + /// Flag used to reduce the number of unnecessary waitable events + std::atomic notify_waitable_event_pushed_ {false}; + + /// Timers manager used to track and/or execute associated timers + std::shared_ptr timers_manager_; +}; + +} // namespace executors +} // namespace experimental +} // namespace rclcpp + +#endif // RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_EXECUTOR_HPP_ diff --git a/rclcpp/include/rclcpp/experimental/executors/events_executor/events_executor_event_types.hpp b/rclcpp/include/rclcpp/experimental/executors/events_executor/events_executor_event_types.hpp new file mode 100644 index 0000000000..79c2c5f905 --- /dev/null +++ b/rclcpp/include/rclcpp/experimental/executors/events_executor/events_executor_event_types.hpp @@ -0,0 +1,46 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_EXECUTOR_EVENT_TYPES_HPP_ +#define RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_EXECUTOR_EVENT_TYPES_HPP_ + +namespace rclcpp +{ +namespace experimental +{ +namespace executors +{ + +enum ExecutorEventType +{ + CLIENT_EVENT, + SUBSCRIPTION_EVENT, + SERVICE_EVENT, + TIMER_EVENT, + WAITABLE_EVENT +}; + +struct ExecutorEvent +{ + const void * entity_key; + int waitable_data; + ExecutorEventType type; + size_t num_events; +}; + +} // namespace executors +} // namespace experimental +} // namespace rclcpp + +#endif // RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_EXECUTOR_EVENT_TYPES_HPP_ diff --git a/rclcpp/include/rclcpp/experimental/executors/events_executor/events_queue.hpp b/rclcpp/include/rclcpp/experimental/executors/events_executor/events_queue.hpp new file mode 100644 index 0000000000..24282d6027 --- /dev/null +++ b/rclcpp/include/rclcpp/experimental/executors/events_executor/events_queue.hpp @@ -0,0 +1,100 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_QUEUE_HPP_ +#define RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_QUEUE_HPP_ + +#include + +#include "rclcpp/macros.hpp" +#include "rclcpp/visibility_control.hpp" + +#include "rclcpp/experimental/executors/events_executor/events_executor_event_types.hpp" + +namespace rclcpp +{ +namespace experimental +{ +namespace executors +{ + +/** + * @brief This abstract class can be used to implement different types of queues + * where `ExecutorEvent` can be stored. + * The derived classes should choose which underlying container to use and + * the strategy for pushing and popping events. + * For example a queue implementation may be bounded or unbounded and have + * different pruning strategies. + * Implementations may or may not check the validity of events and decide how to handle + * the situation where an event is not valid anymore (e.g. a subscription history cache overruns) + */ +class EventsQueue +{ +public: + RCLCPP_SMART_PTR_ALIASES_ONLY(EventsQueue) + + RCLCPP_PUBLIC + EventsQueue() = default; + + /** + * @brief Destruct the object. + */ + RCLCPP_PUBLIC + virtual ~EventsQueue() = default; + + /** + * @brief push event into the queue + * @param event The event to push into the queue + */ + RCLCPP_PUBLIC + virtual + void + enqueue(const rclcpp::experimental::executors::ExecutorEvent & event) = 0; + + /** + * @brief Extracts an event from the queue, eventually waiting until timeout + * if none is available. + * @return true if event has been found, false if timeout + */ + RCLCPP_PUBLIC + virtual + bool + dequeue( + rclcpp::experimental::executors::ExecutorEvent & event, + std::chrono::nanoseconds timeout = std::chrono::nanoseconds::max()) = 0; + + /** + * @brief Test whether queue is empty + * @return true if the queue's size is 0, false otherwise. + */ + RCLCPP_PUBLIC + virtual + bool + empty() const = 0; + + /** + * @brief Returns the number of elements in the queue. + * @return the number of elements in the queue. + */ + RCLCPP_PUBLIC + virtual + size_t + size() const = 0; +}; + +} // namespace executors +} // namespace experimental +} // namespace rclcpp + +#endif // RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__EVENTS_QUEUE_HPP_ diff --git a/rclcpp/include/rclcpp/experimental/executors/events_executor/simple_events_queue.hpp b/rclcpp/include/rclcpp/experimental/executors/events_executor/simple_events_queue.hpp new file mode 100644 index 0000000000..7b18a95fcf --- /dev/null +++ b/rclcpp/include/rclcpp/experimental/executors/events_executor/simple_events_queue.hpp @@ -0,0 +1,134 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__SIMPLE_EVENTS_QUEUE_HPP_ +#define RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__SIMPLE_EVENTS_QUEUE_HPP_ + +#include +#include +#include +#include + +#include "rclcpp/experimental/executors/events_executor/events_queue.hpp" + +namespace rclcpp +{ +namespace experimental +{ +namespace executors +{ + +/** + * @brief This class implements an EventsQueue as a simple wrapper around a std::queue. + * It does not perform any checks about the size of queue, which can grow + * unbounded without being pruned. + * The simplicity of this implementation makes it suitable for optimizing CPU usage. + */ +class SimpleEventsQueue : public EventsQueue +{ +public: + RCLCPP_PUBLIC + ~SimpleEventsQueue() override = default; + + /** + * @brief enqueue event into the queue + * Thread safe + * @param event The event to enqueue into the queue + */ + RCLCPP_PUBLIC + void + enqueue(const rclcpp::experimental::executors::ExecutorEvent & event) override + { + rclcpp::experimental::executors::ExecutorEvent single_event = event; + single_event.num_events = 1; + { + std::unique_lock lock(mutex_); + for (size_t ev = 0; ev < event.num_events; ev++) { + event_queue_.push(single_event); + } + } + events_queue_cv_.notify_one(); + } + + /** + * @brief waits for an event until timeout, gets a single event + * Thread safe + * @return true if event, false if timeout + */ + RCLCPP_PUBLIC + bool + dequeue( + rclcpp::experimental::executors::ExecutorEvent & event, + std::chrono::nanoseconds timeout = std::chrono::nanoseconds::max()) override + { + std::unique_lock lock(mutex_); + + // Initialize to true because it's only needed if we have a valid timeout + bool has_data = true; + if (timeout != std::chrono::nanoseconds::max()) { + has_data = + events_queue_cv_.wait_for(lock, timeout, [this]() {return !event_queue_.empty();}); + } else { + events_queue_cv_.wait(lock, [this]() {return !event_queue_.empty();}); + } + + if (has_data) { + event = event_queue_.front(); + event_queue_.pop(); + return true; + } + + return false; + } + + /** + * @brief Test whether queue is empty + * Thread safe + * @return true if the queue's size is 0, false otherwise. + */ + RCLCPP_PUBLIC + bool + empty() const override + { + std::unique_lock lock(mutex_); + return event_queue_.empty(); + } + + /** + * @brief Returns the number of elements in the queue. + * Thread safe + * @return the number of elements in the queue. + */ + RCLCPP_PUBLIC + size_t + size() const override + { + std::unique_lock lock(mutex_); + return event_queue_.size(); + } + +private: + // The underlying queue implementation + std::queue event_queue_; + // Mutex to protect read/write access to the queue + mutable std::mutex mutex_; + // Variable used to notify when an event is added to the queue + std::condition_variable events_queue_cv_; +}; + +} // namespace executors +} // namespace experimental +} // namespace rclcpp + +#endif // RCLCPP__EXPERIMENTAL__EXECUTORS__EVENTS_EXECUTOR__SIMPLE_EVENTS_QUEUE_HPP_ diff --git a/rclcpp/include/rclcpp/experimental/timers_manager.hpp b/rclcpp/include/rclcpp/experimental/timers_manager.hpp new file mode 100644 index 0000000000..197397e8b8 --- /dev/null +++ b/rclcpp/include/rclcpp/experimental/timers_manager.hpp @@ -0,0 +1,555 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef RCLCPP__EXPERIMENTAL__TIMERS_MANAGER_HPP_ +#define RCLCPP__EXPERIMENTAL__TIMERS_MANAGER_HPP_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rclcpp/context.hpp" +#include "rclcpp/timer.hpp" + +namespace rclcpp +{ +namespace experimental +{ + +/** + * @brief This class provides a way for storing and executing timer objects. + * It provides APIs to suit the needs of different applications and execution models. + * All public APIs provided by this class are thread-safe. + * + * Timers management + * This class provides APIs to add/remove timers to/from an internal storage. + * It keeps a list of weak pointers from added timers, and locks them only when + * they need to be executed or modified. + * Timers are kept ordered in a binary-heap priority queue. + * Calls to add/remove APIs will temporarily block the execution of the timers and + * will require to reorder the internal priority queue. + * Because of this, they have a not-negligible impact on the performance. + * + * Timers execution + * The most efficient use of this class consists in letting a TimersManager object + * to spawn a thread where timers are monitored and optionally executed. + * This can be controlled via the `start` and `stop` methods. + * Ready timers can either be executed or an on_ready_callback can be used to notify + * other entities that they are ready and need to be executed. + * Other APIs allow to directly execute a given timer. + * + * This class assumes that the `execute_callback()` API of the stored timers is never + * called by other entities, but it can only be called from here. + * If this assumption is not respected, the heap property may be invalidated, + * so timers may be executed out of order, without this object noticing it. + * + */ +class TimersManager +{ +public: + RCLCPP_SMART_PTR_DEFINITIONS_NOT_COPYABLE(TimersManager) + + /** + * @brief Construct a new TimersManager object + * + * @param context custom context to be used. + * Shared ownership of the context is held until destruction. + * @param on_ready_callback The timers on ready callback. The presence of this function + * indicates what to do when the TimersManager is running and a timer becomes ready. + * The TimersManager is considered "running" when the `start` method has been called. + * If it's callable, it will be invoked instead of the timer callback. + * If it's not callable, then the TimersManager will + * directly execute timers when they are ready. + * All the methods that execute a given timer (e.g. `execute_head_timer` + * or `execute_ready_timer`) without the TimersManager being `running`, i.e. + * without actually explicitly waiting for the timer to become ready, will ignore this + * callback. + */ + RCLCPP_PUBLIC + TimersManager( + std::shared_ptr context, + std::function on_ready_callback = nullptr); + + /** + * @brief Destruct the TimersManager object making sure to stop thread and release memory. + */ + RCLCPP_PUBLIC + ~TimersManager(); + + /** + * @brief Adds a new timer to the storage, maintaining weak ownership of it. + * Function is thread safe and it can be called regardless of the state of the timers thread. + * + * @param timer the timer to add. + * @throws std::invalid_argument if timer is a nullptr. + */ + RCLCPP_PUBLIC + void add_timer(rclcpp::TimerBase::SharedPtr timer); + + /** + * @brief Remove a single timer from the object storage. + * Will do nothing if the timer was not being stored here. + * Function is thread safe and it can be called regardless of the state of the timers thread. + * + * @param timer the timer to remove. + */ + RCLCPP_PUBLIC + void remove_timer(rclcpp::TimerBase::SharedPtr timer); + + /** + * @brief Remove all the timers stored in the object. + * Function is thread safe and it can be called regardless of the state of the timers thread. + */ + RCLCPP_PUBLIC + void clear(); + + /** + * @brief Starts a thread that takes care of executing the timers stored in this object. + * Function will throw an error if the timers thread was already running. + */ + RCLCPP_PUBLIC + void start(); + + /** + * @brief Stops the timers thread. + * Will do nothing if the timer thread was not running. + */ + RCLCPP_PUBLIC + void stop(); + + /** + * @brief Get the number of timers that are currently ready. + * This function is thread safe. + * + * @return size_t number of ready timers. + * @throws std::runtime_error if the timers thread was already running. + */ + RCLCPP_PUBLIC + size_t get_number_ready_timers(); + + /** + * @brief Executes head timer if ready. + * This function is thread safe. + * This function will try to execute the timer callback regardless of whether + * the TimersManager on_ready_callback was passed during construction. + * + * @return true if head timer was ready. + * @throws std::runtime_error if the timers thread was already running. + */ + RCLCPP_PUBLIC + bool execute_head_timer(); + + /** + * @brief Executes timer identified by its ID. + * This function is thread safe. + * This function will try to execute the timer callback regardless of whether + * the TimersManager on_ready_callback was passed during construction. + * + * @param timer_id the timer ID of the timer to execute + */ + RCLCPP_PUBLIC + void execute_ready_timer(const rclcpp::TimerBase * timer_id); + + /** + * @brief Get the amount of time before the next timer triggers. + * This function is thread safe. + * + * @return std::optional to wait, + * the returned value could be negative if the timer is already expired + * or std::chrono::nanoseconds::max() if there are no timers stored in the object. + * If the head timer was cancelled, then this will return a nullopt. + * @throws std::runtime_error if the timers thread was already running. + */ + RCLCPP_PUBLIC + std::optional get_head_timeout(); + +private: + RCLCPP_DISABLE_COPY(TimersManager) + + using TimerPtr = rclcpp::TimerBase::SharedPtr; + using WeakTimerPtr = rclcpp::TimerBase::WeakPtr; + + // Forward declaration + class TimersHeap; + + /** + * @brief This class allows to store weak pointers to timers in a heap-like data structure. + * The root of the heap is the timer that triggers first. + * Since this class uses weak ownership, it is not guaranteed that it represents a valid heap + * at any point in time as timers could go out of scope, thus invalidating it. + * The "validate_and_lock" API allows to restore the heap property and also returns a locked version + * of the timers heap. + * This class is not thread safe and requires external mutexes to protect its usage. + */ + class WeakTimersHeap + { +public: + /** + * @brief Add a new timer to the heap. After the addition, the heap property is enforced. + * + * @param timer new timer to add. + * @return true if timer has been added, false if it was already there. + */ + bool add_timer(TimerPtr timer) + { + TimersHeap locked_heap = this->validate_and_lock(); + bool added = locked_heap.add_timer(std::move(timer)); + + if (added) { + // Re-create the weak heap with the new timer added + this->store(locked_heap); + } + + return added; + } + + /** + * @brief Remove a timer from the heap. After the removal, the heap property is enforced. + * + * @param timer timer to remove. + * @return true if timer has been removed, false if it was not there. + */ + bool remove_timer(TimerPtr timer) + { + TimersHeap locked_heap = this->validate_and_lock(); + bool removed = locked_heap.remove_timer(std::move(timer)); + + if (removed) { + // Re-create the weak heap with the timer removed + this->store(locked_heap); + } + + return removed; + } + + /** + * @brief Retrieve the timer identified by the key + * @param timer_id The ID of the timer to retrieve. + * @return TimerPtr if there's a timer associated with the ID, nullptr otherwise + */ + TimerPtr get_timer(const rclcpp::TimerBase * timer_id) + { + for (auto & weak_timer : weak_heap_) { + auto timer = weak_timer.lock(); + if (timer.get() == timer_id) { + return timer; + } + } + return nullptr; + } + + /** + * @brief Returns a const reference to the front element. + */ + const WeakTimerPtr & front() const + { + return weak_heap_.front(); + } + + /** + * @brief Returns whether the heap is empty or not. + */ + bool empty() const + { + return weak_heap_.empty(); + } + + /** + * @brief This function restores the current object as a valid heap + * and it returns a locked version of it. + * Timers that went out of scope are removed from the container. + * It is the only public API to access and manipulate the stored timers. + * + * @return TimersHeap owned timers corresponding to the current object + */ + TimersHeap validate_and_lock() + { + TimersHeap locked_heap; + bool any_timer_destroyed = false; + + for (auto weak_timer : weak_heap_) { + auto timer = weak_timer.lock(); + if (timer) { + // This timer is valid, so add it to the locked heap + // Note: we access friend private `owned_heap_` member field. + locked_heap.owned_heap_.push_back(std::move(timer)); + } else { + // This timer went out of scope, so we don't add it to locked heap + // and we mark the corresponding flag. + // It's not needed to erase it from weak heap, as we are going to re-heapify. + // Note: we can't exit from the loop here, as we need to find all valid timers. + any_timer_destroyed = true; + } + } + + // If a timer has gone out of scope, then the remaining elements do not represent + // a valid heap anymore. We need to re-heapify the timers heap. + if (any_timer_destroyed) { + locked_heap.heapify(); + // Re-create the weak heap now that elements have been heapified again + this->store(locked_heap); + } + + return locked_heap; + } + + /** + * @brief This function allows to recreate the heap of weak pointers + * from an heap of owned pointers. + * It is required to be called after a locked TimersHeap generated from this object + * has been modified in any way (e.g. timers triggered, added, removed). + * + * @param heap timers heap to store as weak pointers + */ + void store(const TimersHeap & heap) + { + weak_heap_.clear(); + // Note: we access friend private `owned_heap_` member field. + for (auto t : heap.owned_heap_) { + weak_heap_.push_back(t); + } + } + + /** + * @brief Remove all timers from the heap. + */ + void clear() + { + weak_heap_.clear(); + } + +private: + std::vector weak_heap_; + }; + + /** + * @brief This class is the equivalent of WeakTimersHeap but with ownership of the timers. + * It can be generated by locking the weak version. + * It provides operations to manipulate the heap. + * This class is not thread safe and requires external mutexes to protect its usage. + */ + class TimersHeap + { +public: + /** + * @brief Try to add a new timer to the heap. + * After the addition, the heap property is preserved. + * @param timer new timer to add. + * @return true if timer has been added, false if it was already there. + */ + bool add_timer(TimerPtr timer) + { + // Nothing to do if the timer is already stored here + auto it = std::find(owned_heap_.begin(), owned_heap_.end(), timer); + if (it != owned_heap_.end()) { + return false; + } + + owned_heap_.push_back(std::move(timer)); + std::push_heap(owned_heap_.begin(), owned_heap_.end(), timer_greater); + + return true; + } + + /** + * @brief Try to remove a timer from the heap. + * After the removal, the heap property is preserved. + * @param timer timer to remove. + * @return true if timer has been removed, false if it was not there. + */ + bool remove_timer(TimerPtr timer) + { + // Nothing to do if the timer is not stored here + auto it = std::find(owned_heap_.begin(), owned_heap_.end(), timer); + if (it == owned_heap_.end()) { + return false; + } + + owned_heap_.erase(it); + this->heapify(); + + return true; + } + + /** + * @brief Returns a reference to the front element. + * @return reference to front element. + */ + TimerPtr & front() + { + return owned_heap_.front(); + } + + /** + * @brief Returns a const reference to the front element. + * @return const reference to front element. + */ + const TimerPtr & front() const + { + return owned_heap_.front(); + } + + /** + * @brief Returns whether the heap is empty or not. + * @return true if the heap is empty. + */ + bool empty() const + { + return owned_heap_.empty(); + } + + /** + * @brief Returns the size of the heap. + * @return the number of valid timers in the heap. + */ + size_t size() const + { + return owned_heap_.size(); + } + + /** + * @brief Get the number of timers that are currently ready. + * @return size_t number of ready timers. + */ + size_t get_number_ready_timers() const + { + size_t ready_timers = 0; + + for (TimerPtr t : owned_heap_) { + if (t->is_ready()) { + ready_timers++; + } + } + + return ready_timers; + } + + /** + * @brief Restore a valid heap after the root value has been replaced (e.g. timer triggered). + */ + void heapify_root() + { + // The following code is a more efficient version than doing + // pop_heap, pop_back, push_back, push_heap + // as it removes the need for the last push_heap + + // Push the modified element (i.e. the current root) at the bottom of the heap + owned_heap_.push_back(owned_heap_[0]); + // Exchange first and last-1 elements and reheapify + std::pop_heap(owned_heap_.begin(), owned_heap_.end(), timer_greater); + // Remove last element + owned_heap_.pop_back(); + } + + /** + * @brief Completely restores the structure to a valid heap + */ + void heapify() + { + std::make_heap(owned_heap_.begin(), owned_heap_.end(), timer_greater); + } + + /** + * @brief Helper function to clear the "on_reset_callback" on all associated timers. + */ + void clear_timers_on_reset_callbacks() + { + for (TimerPtr & t : owned_heap_) { + t->clear_on_reset_callback(); + } + } + + /** + * @brief Friend declaration to allow the `validate_and_lock()` function to access the + * underlying heap container + */ + friend TimersHeap WeakTimersHeap::validate_and_lock(); + + /** + * @brief Friend declaration to allow the `store()` function to access the + * underlying heap container + */ + friend void WeakTimersHeap::store(const TimersHeap & heap); + +private: + /** + * @brief Comparison function between timers. + * @return true if `a` triggers after `b`. + */ + static bool timer_greater(TimerPtr a, TimerPtr b) + { + // TODO(alsora): this can cause an error if timers are using different clocks + return a->time_until_trigger() > b->time_until_trigger(); + } + + std::vector owned_heap_; + }; + + /** + * @brief Implements a loop that keeps executing ready timers. + * This function is executed in the timers thread. + */ + void run_timers(); + + /** + * @brief Get the amount of time before the next timer triggers. + * This function is not thread safe, acquire a mutex before calling it. + * + * @return std::optional to wait, + * the returned value could be negative if the timer is already expired + * or std::chrono::nanoseconds::max() if the heap is empty. + * If the head timer was cancelled, then this will return a nullopt. + * This function is not thread safe, acquire the timers_mutex_ before calling it. + */ + std::optional get_head_timeout_unsafe(); + + /** + * @brief Executes all the timers currently ready when the function is invoked + * while keeping the heap correctly sorted. + * This function is not thread safe, acquire the timers_mutex_ before calling it. + */ + void execute_ready_timers_unsafe(); + + // Callback to be called when timer is ready + std::function on_ready_callback_; + + // Thread used to run the timers execution task + std::thread timers_thread_; + // Protects access to timers + std::mutex timers_mutex_; + // Protects access to stop() + std::mutex stop_mutex_; + // Notifies the timers thread whenever timers are added/removed + std::condition_variable timers_cv_; + // Flag used as predicate by timers_cv_ that denotes one or more timers being added/removed + bool timers_updated_ {false}; + // Indicates whether the timers thread is currently running or not + std::atomic running_ {false}; + // Parent context used to understand if ROS is still active + std::shared_ptr context_; + // Timers heap storage with weak ownership + WeakTimersHeap weak_timers_heap_; +}; + +} // namespace experimental +} // namespace rclcpp + +#endif // RCLCPP__EXPERIMENTAL__TIMERS_MANAGER_HPP_ diff --git a/rclcpp/src/rclcpp/executors/executor_notify_waitable.cpp b/rclcpp/src/rclcpp/executors/executor_notify_waitable.cpp index c0ad8a25a4..15a31cd60d 100644 --- a/rclcpp/src/rclcpp/executors/executor_notify_waitable.cpp +++ b/rclcpp/src/rclcpp/executors/executor_notify_waitable.cpp @@ -99,6 +99,52 @@ ExecutorNotifyWaitable::take_data() return nullptr; } +std::shared_ptr +ExecutorNotifyWaitable::take_data_by_entity_id(size_t id) +{ + (void) id; + return nullptr; +} + +void +ExecutorNotifyWaitable::set_on_ready_callback(std::function callback) +{ + // The second argument of the callback could be used to identify which guard condition + // triggered the event. + // We could indicate which of the guard conditions was triggered, but the executor + // is already going to check that. + auto gc_callback = [callback](size_t count) { + callback(count, 0); + }; + + std::lock_guard lock(guard_condition_mutex_); + + on_ready_callback_ = gc_callback; + for (auto weak_gc : notify_guard_conditions_) { + auto gc = weak_gc.lock(); + if (!gc) { + continue; + } + gc->set_on_trigger_callback(on_ready_callback_); + } +} + +RCLCPP_PUBLIC +void +ExecutorNotifyWaitable::clear_on_ready_callback() +{ + std::lock_guard lock(guard_condition_mutex_); + + on_ready_callback_ = nullptr; + for (auto weak_gc : notify_guard_conditions_) { + auto gc = weak_gc.lock(); + if (!gc) { + continue; + } + gc->set_on_trigger_callback(nullptr); + } +} + void ExecutorNotifyWaitable::add_guard_condition(rclcpp::GuardCondition::WeakPtr weak_guard_condition) { @@ -106,15 +152,23 @@ ExecutorNotifyWaitable::add_guard_condition(rclcpp::GuardCondition::WeakPtr weak auto guard_condition = weak_guard_condition.lock(); if (guard_condition && notify_guard_conditions_.count(weak_guard_condition) == 0) { notify_guard_conditions_.insert(weak_guard_condition); + if (on_ready_callback_) { + guard_condition->set_on_trigger_callback(on_ready_callback_); + } } } void -ExecutorNotifyWaitable::remove_guard_condition(rclcpp::GuardCondition::WeakPtr guard_condition) +ExecutorNotifyWaitable::remove_guard_condition(rclcpp::GuardCondition::WeakPtr weak_guard_condition) { std::lock_guard lock(guard_condition_mutex_); - if (notify_guard_conditions_.count(guard_condition) != 0) { - notify_guard_conditions_.erase(guard_condition); + if (notify_guard_conditions_.count(weak_guard_condition) != 0) { + notify_guard_conditions_.erase(weak_guard_condition); + auto guard_condition = weak_guard_condition.lock(); + // If this notify waitable doesn't have an on_ready_callback, then there's nothing to unset + if (guard_condition && on_ready_callback_) { + guard_condition->set_on_trigger_callback(nullptr); + } } } diff --git a/rclcpp/src/rclcpp/experimental/executors/events_executor/events_executor.cpp b/rclcpp/src/rclcpp/experimental/executors/events_executor/events_executor.cpp new file mode 100644 index 0000000000..c977c8c904 --- /dev/null +++ b/rclcpp/src/rclcpp/experimental/executors/events_executor/events_executor.cpp @@ -0,0 +1,517 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "rclcpp/experimental/executors/events_executor/events_executor.hpp" + +#include +#include +#include + +#include "rcpputils/scope_exit.hpp" + +using namespace std::chrono_literals; + +using rclcpp::experimental::executors::EventsExecutor; + +EventsExecutor::EventsExecutor( + rclcpp::experimental::executors::EventsQueue::UniquePtr events_queue, + bool execute_timers_separate_thread, + const rclcpp::ExecutorOptions & options) +: rclcpp::Executor(options) +{ + // Get ownership of the queue used to store events. + if (!events_queue) { + throw std::invalid_argument("events_queue can't be a null pointer"); + } + events_queue_ = std::move(events_queue); + + // Create timers manager + // The timers manager can be used either to only track timers (in this case an expired + // timer will generate an executor event and then it will be executed by the executor thread) + // or it can also take care of executing expired timers in its dedicated thread. + std::function timer_on_ready_cb = nullptr; + if (!execute_timers_separate_thread) { + timer_on_ready_cb = [this](const rclcpp::TimerBase * timer_id) { + ExecutorEvent event = {timer_id, -1, ExecutorEventType::TIMER_EVENT, 1}; + this->events_queue_->enqueue(event); + }; + } + timers_manager_ = + std::make_shared(context_, timer_on_ready_cb); + + this->current_entities_collection_ = + std::make_shared(); + + notify_waitable_ = std::make_shared( + [this]() { + // This callback is invoked when: + // - the interrupt or shutdown guard condition is triggered: + // ---> we need to wake up the executor so that it can terminate + // - a node or callback group guard condition is triggered: + // ---> the entities collection is changed, we need to update callbacks + notify_waitable_event_pushed_ = false; + this->refresh_current_collection_from_callback_groups(); + }); + + // Make sure that the notify waitable is immediately added to the collection + // to avoid missing events + this->add_notify_waitable_to_collection(current_entities_collection_->waitables); + + notify_waitable_->add_guard_condition(interrupt_guard_condition_); + notify_waitable_->add_guard_condition(shutdown_guard_condition_); + + notify_waitable_->set_on_ready_callback( + this->create_waitable_callback(notify_waitable_.get())); + + auto notify_waitable_entity_id = notify_waitable_.get(); + notify_waitable_->set_on_ready_callback( + [this, notify_waitable_entity_id](size_t num_events, int waitable_data) { + // The notify waitable has a special callback. + // We don't care about how many events as when we wake up the executor we are going to + // process everything regardless. + // For the same reason, if an event of this type has already been pushed but it has not been + // processed yet, we avoid pushing additional events. + (void)num_events; + if (notify_waitable_event_pushed_.exchange(true)) { + return; + } + + ExecutorEvent event = + {notify_waitable_entity_id, waitable_data, ExecutorEventType::WAITABLE_EVENT, 1}; + this->events_queue_->enqueue(event); + }); + + this->entities_collector_ = + std::make_shared(notify_waitable_); +} + +EventsExecutor::~EventsExecutor() +{ + spinning.store(false); + notify_waitable_->clear_on_ready_callback(); + this->refresh_current_collection({}); +} + +void +EventsExecutor::spin() +{ + if (spinning.exchange(true)) { + throw std::runtime_error("spin() called while already spinning"); + } + RCPPUTILS_SCOPE_EXIT(this->spinning.store(false); ); + + timers_manager_->start(); + RCPPUTILS_SCOPE_EXIT(timers_manager_->stop(); ); + + while (rclcpp::ok(context_) && spinning.load()) { + // Wait until we get an event + ExecutorEvent event; + bool has_event = events_queue_->dequeue(event); + if (has_event) { + this->execute_event(event); + } + } +} + +void +EventsExecutor::spin_some(std::chrono::nanoseconds max_duration) +{ + return this->spin_some_impl(max_duration, false); +} + +void +EventsExecutor::spin_all(std::chrono::nanoseconds max_duration) +{ + if (max_duration <= 0ns) { + throw std::invalid_argument("max_duration must be positive"); + } + return this->spin_some_impl(max_duration, true); +} + +void +EventsExecutor::spin_some_impl(std::chrono::nanoseconds max_duration, bool exhaustive) +{ + if (spinning.exchange(true)) { + throw std::runtime_error("spin_some() called while already spinning"); + } + + RCPPUTILS_SCOPE_EXIT(this->spinning.store(false); ); + + auto start = std::chrono::steady_clock::now(); + + auto max_duration_not_elapsed = [max_duration, start]() { + if (std::chrono::nanoseconds(0) == max_duration) { + // told to spin forever if need be + return true; + } else if (std::chrono::steady_clock::now() - start < max_duration) { + // told to spin only for some maximum amount of time + return true; + } + // spun too long + return false; + }; + + // Get the number of events and timers ready at start + const size_t ready_events_at_start = events_queue_->size(); + size_t executed_events = 0; + const size_t ready_timers_at_start = timers_manager_->get_number_ready_timers(); + size_t executed_timers = 0; + + while (rclcpp::ok(context_) && spinning.load() && max_duration_not_elapsed()) { + // Execute first ready event from queue if exists + if (exhaustive || (executed_events < ready_events_at_start)) { + bool has_event = !events_queue_->empty(); + + if (has_event) { + ExecutorEvent event; + bool ret = events_queue_->dequeue(event, std::chrono::nanoseconds(0)); + if (ret) { + this->execute_event(event); + executed_events++; + continue; + } + } + } + + // Execute first timer if it is ready + if (exhaustive || (executed_timers < ready_timers_at_start)) { + bool timer_executed = timers_manager_->execute_head_timer(); + if (timer_executed) { + executed_timers++; + continue; + } + } + + // If there's no more work available, exit + break; + } +} + +void +EventsExecutor::spin_once_impl(std::chrono::nanoseconds timeout) +{ + // In this context a negative input timeout means no timeout + if (timeout < 0ns) { + timeout = std::chrono::nanoseconds::max(); + } + + // Select the smallest between input timeout and timer timeout. + // Cancelled timers are not considered. + bool is_timer_timeout = false; + auto next_timer_timeout = timers_manager_->get_head_timeout(); + if (next_timer_timeout.has_value() && next_timer_timeout.value() < timeout) { + timeout = next_timer_timeout.value(); + is_timer_timeout = true; + } + + ExecutorEvent event; + bool has_event = events_queue_->dequeue(event, timeout); + + // If we wake up from the wait with an event, it means that it + // arrived before any of the timers expired. + if (has_event) { + this->execute_event(event); + } else if (is_timer_timeout) { + timers_manager_->execute_head_timer(); + } +} + +void +EventsExecutor::add_node( + rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, bool notify) +{ + // This field is unused because we don't have to wake up the executor when a node is added. + (void) notify; + + // Add node to entities collector + this->entities_collector_->add_node(node_ptr); + + this->refresh_current_collection_from_callback_groups(); +} + +void +EventsExecutor::add_node(std::shared_ptr node_ptr, bool notify) +{ + this->add_node(node_ptr->get_node_base_interface(), notify); +} + +void +EventsExecutor::remove_node( + rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, bool notify) +{ + // This field is unused because we don't have to wake up the executor when a node is removed. + (void)notify; + + // Remove node from entities collector. + // This will result in un-setting all the event callbacks from its entities. + // After this function returns, this executor will not receive any more events associated + // to these entities. + this->entities_collector_->remove_node(node_ptr); + + this->refresh_current_collection_from_callback_groups(); +} + +void +EventsExecutor::remove_node(std::shared_ptr node_ptr, bool notify) +{ + this->remove_node(node_ptr->get_node_base_interface(), notify); +} + +void +EventsExecutor::execute_event(const ExecutorEvent & event) +{ + switch (event.type) { + case ExecutorEventType::CLIENT_EVENT: + { + rclcpp::ClientBase::SharedPtr client; + { + std::lock_guard lock(collection_mutex_); + client = this->retrieve_entity( + static_cast(event.entity_key), + current_entities_collection_->clients); + } + if (client) { + for (size_t i = 0; i < event.num_events; i++) { + execute_client(client); + } + } + + break; + } + case ExecutorEventType::SUBSCRIPTION_EVENT: + { + rclcpp::SubscriptionBase::SharedPtr subscription; + { + std::lock_guard lock(collection_mutex_); + subscription = this->retrieve_entity( + static_cast(event.entity_key), + current_entities_collection_->subscriptions); + } + if (subscription) { + for (size_t i = 0; i < event.num_events; i++) { + execute_subscription(subscription); + } + } + break; + } + case ExecutorEventType::SERVICE_EVENT: + { + rclcpp::ServiceBase::SharedPtr service; + { + std::lock_guard lock(collection_mutex_); + service = this->retrieve_entity( + static_cast(event.entity_key), + current_entities_collection_->services); + } + if (service) { + for (size_t i = 0; i < event.num_events; i++) { + execute_service(service); + } + } + + break; + } + case ExecutorEventType::TIMER_EVENT: + { + timers_manager_->execute_ready_timer( + static_cast(event.entity_key)); + break; + } + case ExecutorEventType::WAITABLE_EVENT: + { + rclcpp::Waitable::SharedPtr waitable; + { + std::lock_guard lock(collection_mutex_); + waitable = this->retrieve_entity( + static_cast(event.entity_key), + current_entities_collection_->waitables); + } + if (waitable) { + for (size_t i = 0; i < event.num_events; i++) { + auto data = waitable->take_data_by_entity_id(event.waitable_data); + waitable->execute(data); + } + } + break; + } + } +} + +void +EventsExecutor::add_callback_group( + rclcpp::CallbackGroup::SharedPtr group_ptr, + rclcpp::node_interfaces::NodeBaseInterface::SharedPtr node_ptr, + bool notify) +{ + // This field is unused because we don't have to wake up + // the executor when a callback group is added. + (void)notify; + (void)node_ptr; + + this->entities_collector_->add_callback_group(group_ptr); + + this->refresh_current_collection_from_callback_groups(); +} + +void +EventsExecutor::remove_callback_group( + rclcpp::CallbackGroup::SharedPtr group_ptr, bool notify) +{ + // This field is unused because we don't have to wake up + // the executor when a callback group is removed. + (void)notify; + + this->entities_collector_->remove_callback_group(group_ptr); + + this->refresh_current_collection_from_callback_groups(); +} + +std::vector +EventsExecutor::get_all_callback_groups() +{ + this->entities_collector_->update_collections(); + return this->entities_collector_->get_all_callback_groups(); +} + +std::vector +EventsExecutor::get_manually_added_callback_groups() +{ + this->entities_collector_->update_collections(); + return this->entities_collector_->get_manually_added_callback_groups(); +} + +std::vector +EventsExecutor::get_automatically_added_callback_groups_from_nodes() +{ + this->entities_collector_->update_collections(); + return this->entities_collector_->get_automatically_added_callback_groups(); +} + +void +EventsExecutor::refresh_current_collection_from_callback_groups() +{ + // Build the new collection + this->entities_collector_->update_collections(); + auto callback_groups = this->entities_collector_->get_all_callback_groups(); + rclcpp::executors::ExecutorEntitiesCollection new_collection; + rclcpp::executors::build_entities_collection(callback_groups, new_collection); + + // TODO(alsora): this may be implemented in a better way. + // We need the notify waitable to be included in the executor "current_collection" + // because we need to be able to retrieve events for it. + // We could explicitly check for the notify waitable ID when we receive a waitable event + // but I think that it's better if the waitable was in the collection and it could be + // retrieved in the "standard" way. + // To do it, we need to add the notify waitable as an entry in both the new and + // current collections such that it's neither added or removed. + this->add_notify_waitable_to_collection(new_collection.waitables); + + // Acquire lock before modifying the current collection + std::lock_guard lock(collection_mutex_); + this->add_notify_waitable_to_collection(current_entities_collection_->waitables); + + this->refresh_current_collection(new_collection); +} + +void +EventsExecutor::refresh_current_collection( + const rclcpp::executors::ExecutorEntitiesCollection & new_collection) +{ + // Acquire lock before modifying the current collection + std::lock_guard lock(collection_mutex_); + + current_entities_collection_->timers.update( + new_collection.timers, + [this](rclcpp::TimerBase::SharedPtr timer) {timers_manager_->add_timer(timer);}, + [this](rclcpp::TimerBase::SharedPtr timer) {timers_manager_->remove_timer(timer);}); + + current_entities_collection_->subscriptions.update( + new_collection.subscriptions, + [this](auto subscription) { + subscription->set_on_new_message_callback( + this->create_entity_callback( + subscription->get_subscription_handle().get(), ExecutorEventType::SUBSCRIPTION_EVENT)); + }, + [](auto subscription) {subscription->clear_on_new_message_callback();}); + + current_entities_collection_->clients.update( + new_collection.clients, + [this](auto client) { + client->set_on_new_response_callback( + this->create_entity_callback( + client->get_client_handle().get(), ExecutorEventType::CLIENT_EVENT)); + }, + [](auto client) {client->clear_on_new_response_callback();}); + + current_entities_collection_->services.update( + new_collection.services, + [this](auto service) { + service->set_on_new_request_callback( + this->create_entity_callback( + service->get_service_handle().get(), ExecutorEventType::SERVICE_EVENT)); + }, + [](auto service) {service->clear_on_new_request_callback();}); + + // DO WE NEED THIS? WE ARE NOT DOING ANYTHING WITH GUARD CONDITIONS + /* + current_entities_collection_->guard_conditions.update(new_collection.guard_conditions, + [](auto guard_condition) {(void)guard_condition;}, + [](auto guard_condition) {guard_condition->set_on_trigger_callback(nullptr);}); + */ + + current_entities_collection_->waitables.update( + new_collection.waitables, + [this](auto waitable) { + waitable->set_on_ready_callback( + this->create_waitable_callback(waitable.get())); + }, + [](auto waitable) {waitable->clear_on_ready_callback();}); +} + +std::function +EventsExecutor::create_entity_callback( + void * entity_key, ExecutorEventType event_type) +{ + std::function + callback = [this, entity_key, event_type](size_t num_events) { + ExecutorEvent event = {entity_key, -1, event_type, num_events}; + this->events_queue_->enqueue(event); + }; + return callback; +} + +std::function +EventsExecutor::create_waitable_callback(const rclcpp::Waitable * entity_key) +{ + std::function + callback = [this, entity_key](size_t num_events, int waitable_data) { + ExecutorEvent event = + {entity_key, waitable_data, ExecutorEventType::WAITABLE_EVENT, num_events}; + this->events_queue_->enqueue(event); + }; + return callback; +} + +void +EventsExecutor::add_notify_waitable_to_collection( + rclcpp::executors::ExecutorEntitiesCollection::WaitableCollection & collection) +{ + // The notify waitable is not associated to any group, so use an invalid one + rclcpp::CallbackGroup::WeakPtr weak_group_ptr; + collection.insert( + { + this->notify_waitable_.get(), + {this->notify_waitable_, weak_group_ptr} + }); +} diff --git a/rclcpp/src/rclcpp/experimental/timers_manager.cpp b/rclcpp/src/rclcpp/experimental/timers_manager.cpp new file mode 100644 index 0000000000..39924afa56 --- /dev/null +++ b/rclcpp/src/rclcpp/experimental/timers_manager.cpp @@ -0,0 +1,318 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "rclcpp/experimental/timers_manager.hpp" + +#include + +#include +#include +#include +#include + +#include "rcpputils/scope_exit.hpp" + +using rclcpp::experimental::TimersManager; + +TimersManager::TimersManager( + std::shared_ptr context, + std::function on_ready_callback) +: on_ready_callback_(on_ready_callback), + context_(context) +{ +} + +TimersManager::~TimersManager() +{ + // Remove all timers + this->clear(); + + // Make sure timers thread is stopped before destroying this object + this->stop(); +} + +void TimersManager::add_timer(rclcpp::TimerBase::SharedPtr timer) +{ + if (!timer) { + throw std::invalid_argument("TimersManager::add_timer() trying to add nullptr timer"); + } + + bool added = false; + { + std::unique_lock lock(timers_mutex_); + added = weak_timers_heap_.add_timer(timer); + timers_updated_ = timers_updated_ || added; + } + + timer->set_on_reset_callback( + [this](size_t arg) { + { + (void)arg; + std::unique_lock lock(timers_mutex_); + timers_updated_ = true; + } + timers_cv_.notify_one(); + }); + + if (added) { + // Notify that a timer has been added + timers_cv_.notify_one(); + } +} + +void TimersManager::start() +{ + // Make sure that the thread is not already running + if (running_.exchange(true)) { + throw std::runtime_error("TimersManager::start() can't start timers thread as already running"); + } + + timers_thread_ = std::thread(&TimersManager::run_timers, this); +} + +void TimersManager::stop() +{ + // Lock stop() function to prevent race condition in destructor + std::unique_lock lock(stop_mutex_); + running_ = false; + + // Notify the timers manager thread to wake up + { + std::unique_lock lock(timers_mutex_); + timers_updated_ = true; + } + timers_cv_.notify_one(); + + // Join timers thread if it's running + if (timers_thread_.joinable()) { + timers_thread_.join(); + } +} + +std::optional TimersManager::get_head_timeout() +{ + // Do not allow to interfere with the thread running + if (running_) { + throw std::runtime_error( + "get_head_timeout() can't be used while timers thread is running"); + } + + std::unique_lock lock(timers_mutex_); + return this->get_head_timeout_unsafe(); +} + +size_t TimersManager::get_number_ready_timers() +{ + // Do not allow to interfere with the thread running + if (running_) { + throw std::runtime_error( + "get_number_ready_timers() can't be used while timers thread is running"); + } + + std::unique_lock lock(timers_mutex_); + TimersHeap locked_heap = weak_timers_heap_.validate_and_lock(); + return locked_heap.get_number_ready_timers(); +} + +bool TimersManager::execute_head_timer() +{ + // Do not allow to interfere with the thread running + if (running_) { + throw std::runtime_error( + "execute_head_timer() can't be used while timers thread is running"); + } + + std::unique_lock lock(timers_mutex_); + + TimersHeap timers_heap = weak_timers_heap_.validate_and_lock(); + + // Nothing to do if we don't have any timer + if (timers_heap.empty()) { + return false; + } + + TimerPtr head_timer = timers_heap.front(); + + const bool timer_ready = head_timer->is_ready(); + if (timer_ready) { + // NOTE: here we always execute the timer, regardless of whether the + // on_ready_callback is set or not. + head_timer->call(); + head_timer->execute_callback(); + timers_heap.heapify_root(); + weak_timers_heap_.store(timers_heap); + } + + return timer_ready; +} + +void TimersManager::execute_ready_timer(const rclcpp::TimerBase * timer_id) +{ + TimerPtr ready_timer; + { + std::unique_lock lock(timers_mutex_); + ready_timer = weak_timers_heap_.get_timer(timer_id); + } + if (ready_timer) { + ready_timer->execute_callback(); + } +} + +std::optional TimersManager::get_head_timeout_unsafe() +{ + // If we don't have any weak pointer, then we just return maximum timeout + if (weak_timers_heap_.empty()) { + return std::chrono::nanoseconds::max(); + } + // Weak heap is not empty, so try to lock the first element. + // If it is still a valid pointer, it is guaranteed to be the correct head + TimerPtr head_timer = weak_timers_heap_.front().lock(); + + if (!head_timer) { + // The first element has expired, we can't make other assumptions on the heap + // and we need to entirely validate it. + TimersHeap locked_heap = weak_timers_heap_.validate_and_lock(); + // NOTE: the following operations will not modify any element in the heap, so we + // don't have to call `weak_timers_heap_.store(locked_heap)` at the end. + + if (locked_heap.empty()) { + return std::chrono::nanoseconds::max(); + } + head_timer = locked_heap.front(); + } + if (head_timer->is_canceled()) { + return std::nullopt; + } + return head_timer->time_until_trigger(); +} + +void TimersManager::execute_ready_timers_unsafe() +{ + // We start by locking the timers + TimersHeap locked_heap = weak_timers_heap_.validate_and_lock(); + + // Nothing to do if we don't have any timer + if (locked_heap.empty()) { + return; + } + + // Keep executing timers until they are ready and they were already ready when we started. + // The two checks prevent this function from blocking indefinitely if the + // time required for executing the timers is longer than their period. + + TimerPtr head_timer = locked_heap.front(); + const size_t number_ready_timers = locked_heap.get_number_ready_timers(); + size_t executed_timers = 0; + while (executed_timers < number_ready_timers && head_timer->is_ready()) { + head_timer->call(); + if (on_ready_callback_) { + on_ready_callback_(head_timer.get()); + } else { + head_timer->execute_callback(); + } + + executed_timers++; + // Executing a timer will result in updating its time_until_trigger, so re-heapify + locked_heap.heapify_root(); + // Get new head timer + head_timer = locked_heap.front(); + } + + // After having performed work on the locked heap we reflect the changes to weak one. + // Timers will be already sorted the next time we need them if none went out of scope. + weak_timers_heap_.store(locked_heap); +} + +void TimersManager::run_timers() +{ + // Make sure the running flag is set to false when we exit from this function + // to allow restarting the timers thread. + RCPPUTILS_SCOPE_EXIT(this->running_.store(false); ); + + while (rclcpp::ok(context_) && running_) { + // Lock mutex + std::unique_lock lock(timers_mutex_); + + std::optional time_to_sleep = get_head_timeout_unsafe(); + + // If head timer was cancelled, try to reheap and get a new head. + // This avoids an edge condition where head timer is cancelled, but other + // valid timers remain in the heap. + if (!time_to_sleep.has_value()) { + // Re-heap to (possibly) move cancelled timer from head of heap. If + // entire heap is cancelled, this will still result in a nullopt. + TimersHeap locked_heap = weak_timers_heap_.validate_and_lock(); + locked_heap.heapify(); + weak_timers_heap_.store(locked_heap); + time_to_sleep = get_head_timeout_unsafe(); + } + + // If no timers, or all timers cancelled, wait for an update. + if (!time_to_sleep.has_value() || (time_to_sleep.value() == std::chrono::nanoseconds::max()) ) { + // Wait until notification that timers have been updated + timers_cv_.wait(lock, [this]() {return timers_updated_;}); + + // Re-heap in case ordering changed due to a cancelled timer + // re-activating. + TimersHeap locked_heap = weak_timers_heap_.validate_and_lock(); + locked_heap.heapify(); + weak_timers_heap_.store(locked_heap); + } else if (time_to_sleep.value() != std::chrono::nanoseconds::zero()) { + // If time_to_sleep is zero, we immediately execute. Otherwise, wait + // until timeout or notification that timers have been updated + timers_cv_.wait_for(lock, time_to_sleep.value(), [this]() {return timers_updated_;}); + } + + // Reset timers updated flag + timers_updated_ = false; + + // Execute timers + this->execute_ready_timers_unsafe(); + } +} + +void TimersManager::clear() +{ + { + // Lock mutex and then clear all data structures + std::unique_lock lock(timers_mutex_); + + TimersHeap locked_heap = weak_timers_heap_.validate_and_lock(); + locked_heap.clear_timers_on_reset_callbacks(); + + weak_timers_heap_.clear(); + + timers_updated_ = true; + } + + // Notify timers thread such that it can re-compute its timeout + timers_cv_.notify_one(); +} + +void TimersManager::remove_timer(TimerPtr timer) +{ + bool removed = false; + { + std::unique_lock lock(timers_mutex_); + removed = weak_timers_heap_.remove_timer(timer); + + timers_updated_ = timers_updated_ || removed; + } + + if (removed) { + // Notify timers thread such that it can re-compute its timeout + timers_cv_.notify_one(); + timer->clear_on_reset_callback(); + } +} diff --git a/rclcpp/test/rclcpp/CMakeLists.txt b/rclcpp/test/rclcpp/CMakeLists.txt index c08ecfc826..efc30bff62 100644 --- a/rclcpp/test/rclcpp/CMakeLists.txt +++ b/rclcpp/test/rclcpp/CMakeLists.txt @@ -591,6 +591,12 @@ if(TARGET test_timer) target_link_libraries(test_timer ${PROJECT_NAME} mimick) endif() +ament_add_gtest(test_timers_manager test_timers_manager.cpp + APPEND_LIBRARY_DIRS "${append_library_dirs}") +if(TARGET test_timers_manager) + target_link_libraries(test_timers_manager ${PROJECT_NAME}) +endif() + ament_add_gtest(test_time_source test_time_source.cpp APPEND_LIBRARY_DIRS "${append_library_dirs}") if(TARGET test_time_source) @@ -622,18 +628,22 @@ if(TARGET test_interface_traits) target_link_libraries(test_interface_traits ${PROJECT_NAME}) endif() -# TODO(brawner) remove when destroying Node for Connext is resolved. See: -# https://github.com/ros2/rclcpp/issues/1250 ament_add_gtest( test_executors executors/test_executors.cpp APPEND_LIBRARY_DIRS "${append_library_dirs}" TIMEOUT 180) if(TARGET test_executors) - ament_target_dependencies(test_executors - "rcl" - "test_msgs") - target_link_libraries(test_executors ${PROJECT_NAME}) + target_link_libraries(test_executors ${PROJECT_NAME} rcl::rcl ${test_msgs_TARGETS}) +endif() + +ament_add_gtest( + test_executors_timer_cancel_behavior + executors/test_executors_timer_cancel_behavior.cpp + APPEND_LIBRARY_DIRS "${append_library_dirs}" + TIMEOUT 180) +if(TARGET test_executors) + target_link_libraries(test_executors_timer_cancel_behavior ${PROJECT_NAME} ${rosgraph_msgs_TARGETS}) endif() ament_add_gtest(test_static_single_threaded_executor executors/test_static_single_threaded_executor.cpp @@ -679,6 +689,17 @@ if(TARGET test_executor_notify_waitable) target_link_libraries(test_executor_notify_waitable ${PROJECT_NAME} mimick) endif() +ament_add_gtest(test_events_executor executors/test_events_executor.cpp TIMEOUT 5) +if(TARGET test_events_executor) + target_link_libraries(test_events_executor ${PROJECT_NAME} ${test_msgs_TARGETS}) +endif() + +ament_add_gtest(test_events_queue executors/test_events_queue.cpp + APPEND_LIBRARY_DIRS "${append_library_dirs}") +if(TARGET test_events_queue) + target_link_libraries(test_events_queue ${PROJECT_NAME}) +endif() + ament_add_gtest(test_guard_condition test_guard_condition.cpp APPEND_LIBRARY_DIRS "${append_library_dirs}") if(TARGET test_guard_condition) diff --git a/rclcpp/test/rclcpp/executors/executor_types.hpp b/rclcpp/test/rclcpp/executors/executor_types.hpp new file mode 100644 index 0000000000..0218a9b547 --- /dev/null +++ b/rclcpp/test/rclcpp/executors/executor_types.hpp @@ -0,0 +1,70 @@ +// Copyright 2017 Open Source Robotics Foundation, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef RCLCPP__EXECUTORS__EXECUTOR_TYPES_HPP_ +#define RCLCPP__EXECUTORS__EXECUTOR_TYPES_HPP_ + +#include + +#include +#include + +#include "rclcpp/experimental/executors/events_executor/events_executor.hpp" +#include "rclcpp/executors/single_threaded_executor.hpp" +#include "rclcpp/executors/static_single_threaded_executor.hpp" +#include "rclcpp/executors/multi_threaded_executor.hpp" + +using ExecutorTypes = + ::testing::Types< + rclcpp::executors::SingleThreadedExecutor, + rclcpp::executors::MultiThreadedExecutor, + rclcpp::executors::StaticSingleThreadedExecutor, + rclcpp::experimental::executors::EventsExecutor>; + +class ExecutorTypeNames +{ +public: + template + static std::string GetName(int idx) + { + (void)idx; + if (std::is_same()) { + return "SingleThreadedExecutor"; + } + + if (std::is_same()) { + return "MultiThreadedExecutor"; + } + + if (std::is_same()) { + return "StaticSingleThreadedExecutor"; + } + + if (std::is_same()) { + return "EventsExecutor"; + } + + return ""; + } +}; + +// StaticSingleThreadedExecutor is not included in these tests for now, due to: +// https://github.com/ros2/rclcpp/issues/1219 +using StandardExecutors = + ::testing::Types< + rclcpp::executors::SingleThreadedExecutor, + rclcpp::executors::MultiThreadedExecutor, + rclcpp::experimental::executors::EventsExecutor>; + +#endif // RCLCPP__EXECUTORS__EXECUTOR_TYPES_HPP_ diff --git a/rclcpp/test/rclcpp/executors/test_events_executor.cpp b/rclcpp/test/rclcpp/executors/test_events_executor.cpp new file mode 100644 index 0000000000..13092b7067 --- /dev/null +++ b/rclcpp/test/rclcpp/executors/test_events_executor.cpp @@ -0,0 +1,492 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include + +#include "rclcpp/experimental/executors/events_executor/events_executor.hpp" + +#include "test_msgs/srv/empty.hpp" +#include "test_msgs/msg/empty.hpp" + +using namespace std::chrono_literals; + +using rclcpp::experimental::executors::EventsExecutor; + +class TestEventsExecutor : public ::testing::Test +{ +public: + void SetUp() + { + rclcpp::init(0, nullptr); + } + + void TearDown() + { + rclcpp::shutdown(); + } +}; + +TEST_F(TestEventsExecutor, run_pub_sub) +{ + auto node = std::make_shared("node"); + + bool msg_received = false; + auto subscription = node->create_subscription( + "topic", rclcpp::SensorDataQoS(), + [&msg_received](test_msgs::msg::Empty::ConstSharedPtr msg) + { + (void)msg; + msg_received = true; + }); + + auto publisher = node->create_publisher("topic", rclcpp::SensorDataQoS()); + + EventsExecutor executor; + executor.add_node(node); + + bool spin_exited = false; + std::thread spinner([&spin_exited, &executor]() { + executor.spin(); + spin_exited = true; + }); + + auto msg = std::make_unique(); + publisher->publish(std::move(msg)); + + // Wait some time for the subscription to receive the message + auto start = std::chrono::high_resolution_clock::now(); + while ( + !msg_received && + !spin_exited && + (std::chrono::high_resolution_clock::now() - start < 1s)) + { + std::this_thread::sleep_for(25ms); + } + + executor.cancel(); + spinner.join(); + executor.remove_node(node); + + EXPECT_TRUE(msg_received); + EXPECT_TRUE(spin_exited); +} + +TEST_F(TestEventsExecutor, run_clients_servers) +{ + auto node = std::make_shared("node"); + + bool request_received = false; + bool response_received = false; + auto service = + node->create_service( + "service", + [&request_received]( + const test_msgs::srv::Empty::Request::SharedPtr, + test_msgs::srv::Empty::Response::SharedPtr) + { + request_received = true; + }); + auto client = node->create_client("service"); + + EventsExecutor executor; + executor.add_node(node); + + bool spin_exited = false; + std::thread spinner([&spin_exited, &executor]() { + executor.spin(); + spin_exited = true; + }); + + auto request = std::make_shared(); + client->async_send_request( + request, + [&response_received](rclcpp::Client::SharedFuture result_future) { + (void)result_future; + response_received = true; + }); + + // Wait some time for the client-server to be invoked + auto start = std::chrono::steady_clock::now(); + while ( + !response_received && + !spin_exited && + (std::chrono::steady_clock::now() - start < 1s)) + { + std::this_thread::sleep_for(5ms); + } + + executor.cancel(); + spinner.join(); + executor.remove_node(node); + + EXPECT_TRUE(request_received); + EXPECT_TRUE(response_received); + EXPECT_TRUE(spin_exited); +} + +TEST_F(TestEventsExecutor, spin_once_max_duration_timeout) +{ + auto node = std::make_shared("node"); + + EventsExecutor executor; + executor.add_node(node); + + // Consume previous events so we have a fresh start + executor.spin_all(1s); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 10s, + [&]() { + t_runs++; + }); + + // This first spin_once takes care of the waitable event + // generated by the addition of the timer to the node + executor.spin_once(1s); + EXPECT_EQ(0u, t_runs); + + auto start = std::chrono::steady_clock::now(); + + // This second spin_once should take care of the timer, + executor.spin_once(10ms); + + // but doesn't spin the time enough to call the timer callback. + EXPECT_EQ(0u, t_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 200ms); +} + +TEST_F(TestEventsExecutor, spin_once_max_duration_timer) +{ + auto node = std::make_shared("node"); + + EventsExecutor executor; + executor.add_node(node); + + // Consume previous events so we have a fresh start + executor.spin_all(1s); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 10ms, + [&]() { + t_runs++; + }); + + // This first spin_once takes care of the waitable event + // generated by the addition of the timer to the node + executor.spin_once(1s); + EXPECT_EQ(0u, t_runs); + + auto start = std::chrono::steady_clock::now(); + + // This second spin_once should take care of the timer + executor.spin_once(11ms); + + EXPECT_EQ(1u, t_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 200ms); +} + +TEST_F(TestEventsExecutor, spin_some_max_duration) +{ + { + auto node = std::make_shared("node"); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 10s, + [&]() { + t_runs++; + }); + + EventsExecutor executor; + executor.add_node(node); + + auto start = std::chrono::steady_clock::now(); + executor.spin_some(10ms); + + EXPECT_EQ(0u, t_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 200ms); + } + + { + auto node = std::make_shared("node"); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 10ms, + [&]() { + t_runs++; + }); + + // Sleep some time for the timer to be ready when spin + std::this_thread::sleep_for(10ms); + + EventsExecutor executor; + executor.add_node(node); + + auto start = std::chrono::steady_clock::now(); + executor.spin_some(10s); + + EXPECT_EQ(1u, t_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 200ms); + } +} + +TEST_F(TestEventsExecutor, spin_some_zero_duration) +{ + auto node = std::make_shared("node"); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 20ms, + [&]() { + t_runs++; + }); + + // Sleep some time for the timer to be ready when spin + std::this_thread::sleep_for(20ms); + + EventsExecutor executor; + executor.add_node(node); + executor.spin_some(0ms); + + EXPECT_EQ(1u, t_runs); +} + +TEST_F(TestEventsExecutor, spin_all_max_duration) +{ + { + auto node = std::make_shared("node"); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 10s, + [&]() { + t_runs++; + }); + + EventsExecutor executor; + executor.add_node(node); + + auto start = std::chrono::steady_clock::now(); + executor.spin_all(10ms); + + EXPECT_EQ(0u, t_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 200ms); + } + + { + auto node = std::make_shared("node"); + + size_t t_runs = 0; + auto t = node->create_wall_timer( + 10ms, + [&]() { + t_runs++; + }); + + // Sleep some time for the timer to be ready when spin + std::this_thread::sleep_for(10ms); + + EventsExecutor executor; + executor.add_node(node); + + auto start = std::chrono::steady_clock::now(); + executor.spin_all(10s); + + EXPECT_EQ(1u, t_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 200ms); + } + + EventsExecutor executor; + EXPECT_THROW(executor.spin_all(0ms), std::invalid_argument); + EXPECT_THROW(executor.spin_all(-5ms), std::invalid_argument); +} + +TEST_F(TestEventsExecutor, cancel_while_timers_running) +{ + auto node = std::make_shared("node"); + + EventsExecutor executor; + executor.add_node(node); + + // Take care of previous events for a fresh start + executor.spin_all(1s); + + size_t t1_runs = 0; + auto t1 = node->create_wall_timer( + 1ms, + [&]() { + t1_runs++; + std::this_thread::sleep_for(50ms); + }); + + size_t t2_runs = 0; + auto t2 = node->create_wall_timer( + 1ms, + [&]() { + t2_runs++; + std::this_thread::sleep_for(50ms); + }); + + + std::thread spinner([&executor]() {executor.spin();}); + + std::this_thread::sleep_for(10ms); + // Call cancel while t1 callback is still being executed + executor.cancel(); + spinner.join(); + + // Depending on the latency on the system, t2 may start to execute before cancel is signaled + EXPECT_GE(1u, t1_runs); + EXPECT_GE(1u, t2_runs); +} + +TEST_F(TestEventsExecutor, cancel_while_timers_waiting) +{ + auto node = std::make_shared("node"); + + size_t t1_runs = 0; + auto t1 = node->create_wall_timer( + 100s, + [&]() { + t1_runs++; + }); + + EventsExecutor executor; + executor.add_node(node); + + auto start = std::chrono::steady_clock::now(); + std::thread spinner([&executor]() {executor.spin();}); + + std::this_thread::sleep_for(10ms); + executor.cancel(); + spinner.join(); + + EXPECT_EQ(0u, t1_runs); + EXPECT_TRUE(std::chrono::steady_clock::now() - start < 1s); +} + +TEST_F(TestEventsExecutor, destroy_entities) +{ + // This test fails on Windows! We skip it for now + GTEST_SKIP(); + + // Create a publisher node and start publishing messages + auto node_pub = std::make_shared("node_pub"); + auto publisher = node_pub->create_publisher("topic", rclcpp::QoS(10)); + auto timer = node_pub->create_wall_timer( + 2ms, [&]() {publisher->publish(std::make_unique());}); + EventsExecutor executor_pub; + executor_pub.add_node(node_pub); + std::thread spinner([&executor_pub]() {executor_pub.spin();}); + + // Create a node with two different subscriptions to the topic + auto node_sub = std::make_shared("node_sub"); + size_t callback_count_1 = 0; + auto subscription_1 = + node_sub->create_subscription( + "topic", rclcpp::QoS(10), [&](test_msgs::msg::Empty::ConstSharedPtr) {callback_count_1++;}); + size_t callback_count_2 = 0; + auto subscription_2 = + node_sub->create_subscription( + "topic", rclcpp::QoS(10), [&](test_msgs::msg::Empty::ConstSharedPtr) {callback_count_2++;}); + EventsExecutor executor_sub; + executor_sub.add_node(node_sub); + + // Wait some time while messages are published + std::this_thread::sleep_for(10ms); + + // Destroy one of the two subscriptions + subscription_1.reset(); + + // Let subscriptions executor spin + executor_sub.spin_some(10ms); + + // The callback count of the destroyed subscription remained at 0 + EXPECT_EQ(0u, callback_count_1); + EXPECT_LT(0u, callback_count_2); + + executor_pub.cancel(); + spinner.join(); +} + +// Testing construction of a subscriptions with QoS event callback functions. +std::string * g_pub_log_msg; +std::string * g_sub_log_msg; +std::promise * g_log_msgs_promise; +TEST_F(TestEventsExecutor, test_default_incompatible_qos_callbacks) +{ + auto node = std::make_shared("node"); + rcutils_logging_output_handler_t original_output_handler = rcutils_logging_get_output_handler(); + + std::string pub_log_msg; + std::string sub_log_msg; + std::promise log_msgs_promise; + g_pub_log_msg = &pub_log_msg; + g_sub_log_msg = &sub_log_msg; + g_log_msgs_promise = &log_msgs_promise; + auto logger_callback = []( + const rcutils_log_location_t * /*location*/, + int /*level*/, const char * /*name*/, rcutils_time_point_value_t /*timestamp*/, + const char * format, va_list * args) -> void { + char buffer[1024]; + vsnprintf(buffer, sizeof(buffer), format, *args); + const std::string msg = buffer; + if (msg.rfind("New subscription discovered on topic '/test_topic'", 0) == 0) { + *g_pub_log_msg = buffer; + } else if (msg.rfind("New publisher discovered on topic '/test_topic'", 0) == 0) { + *g_sub_log_msg = buffer; + } + + if (!g_pub_log_msg->empty() && !g_sub_log_msg->empty()) { + g_log_msgs_promise->set_value(); + } + }; + rcutils_logging_set_output_handler(logger_callback); + + std::shared_future log_msgs_future = log_msgs_promise.get_future(); + + rclcpp::QoS qos_profile_publisher(10); + qos_profile_publisher.durability(RMW_QOS_POLICY_DURABILITY_VOLATILE); + auto publisher = node->create_publisher( + "test_topic", qos_profile_publisher); + + rclcpp::QoS qos_profile_subscription(10); + qos_profile_subscription.durability(RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL); + auto subscription = node->create_subscription( + "test_topic", qos_profile_subscription, [&](test_msgs::msg::Empty::ConstSharedPtr) {}); + + EventsExecutor ex; + ex.add_node(node->get_node_base_interface()); + + const auto timeout = std::chrono::seconds(10); + ex.spin_until_future_complete(log_msgs_future, timeout); + + EXPECT_EQ( + "New subscription discovered on topic '/test_topic', requesting incompatible QoS. " + "No messages will be sent to it. Last incompatible policy: DURABILITY_QOS_POLICY", + pub_log_msg); + EXPECT_EQ( + "New publisher discovered on topic '/test_topic', offering incompatible QoS. " + "No messages will be sent to it. Last incompatible policy: DURABILITY_QOS_POLICY", + sub_log_msg); + + rcutils_logging_set_output_handler(original_output_handler); +} diff --git a/rclcpp/test/rclcpp/executors/test_events_queue.cpp b/rclcpp/test/rclcpp/executors/test_events_queue.cpp new file mode 100644 index 0000000000..de8242b55b --- /dev/null +++ b/rclcpp/test/rclcpp/executors/test_events_queue.cpp @@ -0,0 +1,82 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +#include "rclcpp/experimental/executors/events_executor/events_executor_event_types.hpp" +#include "rclcpp/experimental/executors/events_executor/simple_events_queue.hpp" + +using namespace std::chrono_literals; + +TEST(TestEventsQueue, SimpleQueueTest) +{ + // Create a SimpleEventsQueue and a local queue + auto simple_queue = std::make_unique(); + rclcpp::experimental::executors::ExecutorEvent event {}; + bool ret = false; + + // Make sure the queue is empty at startup + EXPECT_TRUE(simple_queue->empty()); + EXPECT_EQ(simple_queue->size(), 0u); + + // Push 11 messages + for (uint32_t i = 1; i < 11; i++) { + rclcpp::experimental::executors::ExecutorEvent stub_event {}; + stub_event.num_events = 1; + simple_queue->enqueue(stub_event); + + EXPECT_FALSE(simple_queue->empty()); + EXPECT_EQ(simple_queue->size(), i); + } + + // Pop one message + ret = simple_queue->dequeue(event); + EXPECT_TRUE(ret); + EXPECT_FALSE(simple_queue->empty()); + EXPECT_EQ(simple_queue->size(), 9u); + + // Pop one message + ret = simple_queue->dequeue(event, std::chrono::nanoseconds(0)); + EXPECT_TRUE(ret); + EXPECT_FALSE(simple_queue->empty()); + EXPECT_EQ(simple_queue->size(), 8u); + + while (!simple_queue->empty()) { + ret = simple_queue->dequeue(event); + EXPECT_TRUE(ret); + } + + EXPECT_TRUE(simple_queue->empty()); + EXPECT_EQ(simple_queue->size(), 0u); + + ret = simple_queue->dequeue(event, std::chrono::nanoseconds(0)); + EXPECT_FALSE(ret); + + // Lets push an event into the queue and get it back + rclcpp::experimental::executors::ExecutorEvent push_event = { + simple_queue.get(), + 99, + rclcpp::experimental::executors::ExecutorEventType::SUBSCRIPTION_EVENT, + 1}; + + simple_queue->enqueue(push_event); + ret = simple_queue->dequeue(event); + EXPECT_TRUE(ret); + EXPECT_EQ(push_event.entity_key, event.entity_key); + EXPECT_EQ(push_event.waitable_data, event.waitable_data); + EXPECT_EQ(push_event.type, event.type); + EXPECT_EQ(push_event.num_events, event.num_events); +} diff --git a/rclcpp/test/rclcpp/executors/test_executors.cpp b/rclcpp/test/rclcpp/executors/test_executors.cpp index eb6652f19b..4086c14668 100644 --- a/rclcpp/test/rclcpp/executors/test_executors.cpp +++ b/rclcpp/test/rclcpp/executors/test_executors.cpp @@ -15,17 +15,19 @@ /** * This test checks all implementations of rclcpp::executor to check they pass they basic API * tests. Anything specific to any executor in particular should go in a separate test file. - * */ + #include #include +#include #include #include #include #include #include #include +#include #include "rcl/error_handling.h" #include "rcl/time.h" @@ -34,27 +36,22 @@ #include "rclcpp/duration.hpp" #include "rclcpp/guard_condition.hpp" #include "rclcpp/rclcpp.hpp" +#include "rclcpp/time_source.hpp" #include "test_msgs/msg/empty.hpp" +#include "./executor_types.hpp" + using namespace std::chrono_literals; template class TestExecutors : public ::testing::Test { public: - static void SetUpTestCase() + void SetUp() { rclcpp::init(0, nullptr); - } - static void TearDownTestCase() - { - rclcpp::shutdown(); - } - - void SetUp() - { const auto test_info = ::testing::UnitTest::GetInstance()->current_test_info(); std::stringstream test_name; test_name << test_info->test_case_name() << "_" << test_info->name(); @@ -75,6 +72,8 @@ class TestExecutors : public ::testing::Test publisher.reset(); subscription.reset(); node.reset(); + + rclcpp::shutdown(); } rclcpp::Node::SharedPtr node; @@ -88,49 +87,13 @@ class TestExecutors : public ::testing::Test template class TestExecutorsStable : public TestExecutors {}; -using ExecutorTypes = - ::testing::Types< - rclcpp::executors::SingleThreadedExecutor, - rclcpp::executors::MultiThreadedExecutor, - rclcpp::executors::StaticSingleThreadedExecutor>; - -class ExecutorTypeNames -{ -public: - template - static std::string GetName(int idx) - { - (void)idx; - if (std::is_same()) { - return "SingleThreadedExecutor"; - } - - if (std::is_same()) { - return "MultiThreadedExecutor"; - } - - if (std::is_same()) { - return "StaticSingleThreadedExecutor"; - } - - return ""; - } -}; - -// TYPED_TEST_SUITE is deprecated as of gtest 1.9, use TYPED_TEST_SUITE when gtest dependency -// is updated. TYPED_TEST_SUITE(TestExecutors, ExecutorTypes, ExecutorTypeNames); -// StaticSingleThreadedExecutor is not included in these tests for now, due to: -// https://github.com/ros2/rclcpp/issues/1219 -using StandardExecutors = - ::testing::Types< - rclcpp::executors::SingleThreadedExecutor, - rclcpp::executors::MultiThreadedExecutor>; TYPED_TEST_SUITE(TestExecutorsStable, StandardExecutors, ExecutorTypeNames); // Make sure that executors detach from nodes when destructing -TYPED_TEST(TestExecutors, detachOnDestruction) { +TYPED_TEST(TestExecutors, detachOnDestruction) +{ using ExecutorType = TypeParam; { ExecutorType executor; @@ -145,7 +108,8 @@ TYPED_TEST(TestExecutors, detachOnDestruction) { // Make sure that the executor can automatically remove expired nodes correctly // Currently fails for StaticSingleThreadedExecutor so it is being skipped, see: // https://github.com/ros2/rclcpp/issues/1231 -TYPED_TEST(TestExecutorsStable, addTemporaryNode) { +TYPED_TEST(TestExecutorsStable, addTemporaryNode) +{ using ExecutorType = TypeParam; ExecutorType executor; @@ -163,8 +127,20 @@ TYPED_TEST(TestExecutorsStable, addTemporaryNode) { spinner.join(); } +// Make sure that a spinning empty executor can be cancelled +TYPED_TEST(TestExecutors, emptyExecutor) +{ + using ExecutorType = TypeParam; + ExecutorType executor; + std::thread spinner([&]() {EXPECT_NO_THROW(executor.spin());}); + std::this_thread::sleep_for(50ms); + executor.cancel(); + spinner.join(); +} + // Check executor throws properly if the same node is added a second time -TYPED_TEST(TestExecutors, addNodeTwoExecutors) { +TYPED_TEST(TestExecutors, addNodeTwoExecutors) +{ using ExecutorType = TypeParam; ExecutorType executor1; ExecutorType executor2; @@ -174,7 +150,8 @@ TYPED_TEST(TestExecutors, addNodeTwoExecutors) { } // Check simple spin example -TYPED_TEST(TestExecutors, spinWithTimer) { +TYPED_TEST(TestExecutors, spinWithTimer) +{ using ExecutorType = TypeParam; ExecutorType executor; @@ -196,7 +173,8 @@ TYPED_TEST(TestExecutors, spinWithTimer) { executor.remove_node(this->node, true); } -TYPED_TEST(TestExecutors, spinWhileAlreadySpinning) { +TYPED_TEST(TestExecutors, spinWhileAlreadySpinning) +{ using ExecutorType = TypeParam; ExecutorType executor; executor.add_node(this->node); @@ -222,7 +200,8 @@ TYPED_TEST(TestExecutors, spinWhileAlreadySpinning) { } // Check executor exits immediately if future is complete. -TYPED_TEST(TestExecutors, testSpinUntilFutureComplete) { +TYPED_TEST(TestExecutors, testSpinUntilFutureComplete) +{ using ExecutorType = TypeParam; ExecutorType executor; executor.add_node(this->node); @@ -244,7 +223,8 @@ TYPED_TEST(TestExecutors, testSpinUntilFutureComplete) { } // Same test, but uses a shared future. -TYPED_TEST(TestExecutors, testSpinUntilSharedFutureComplete) { +TYPED_TEST(TestExecutors, testSpinUntilSharedFutureComplete) +{ using ExecutorType = TypeParam; ExecutorType executor; executor.add_node(this->node); @@ -267,7 +247,8 @@ TYPED_TEST(TestExecutors, testSpinUntilSharedFutureComplete) { } // For a longer running future that should require several iterations of spin_once -TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteNoTimeout) { +TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteNoTimeout) +{ using ExecutorType = TypeParam; ExecutorType executor; executor.add_node(this->node); @@ -313,7 +294,8 @@ TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteNoTimeout) { } // Check spin_until_future_complete timeout works as expected -TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteWithTimeout) { +TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteWithTimeout) +{ using ExecutorType = TypeParam; ExecutorType executor; executor.add_node(this->node); @@ -380,6 +362,13 @@ class TestWaitable : public rclcpp::Waitable return nullptr; } + std::shared_ptr + take_data_by_entity_id(size_t id) override + { + (void) id; + return nullptr; + } + void execute(std::shared_ptr & data) override { @@ -388,6 +377,21 @@ class TestWaitable : public rclcpp::Waitable std::this_thread::sleep_for(3ms); } + void + set_on_ready_callback(std::function callback) override + { + auto gc_callback = [callback](size_t count) { + callback(count, 0); + }; + gc_.set_on_trigger_callback(gc_callback); + } + + void + clear_on_ready_callback() override + { + gc_.set_on_trigger_callback(nullptr); + } + size_t get_number_of_ready_guard_conditions() override {return 1;} @@ -402,7 +406,8 @@ class TestWaitable : public rclcpp::Waitable rclcpp::GuardCondition gc_; }; -TYPED_TEST(TestExecutors, spinAll) { +TYPED_TEST(TestExecutors, spinAll) +{ using ExecutorType = TypeParam; ExecutorType executor; auto waitable_interfaces = this->node->get_node_waitables_interface(); @@ -443,7 +448,8 @@ TYPED_TEST(TestExecutors, spinAll) { spinner.join(); } -TYPED_TEST(TestExecutors, spinSome) { +TYPED_TEST(TestExecutors, spinSome) +{ using ExecutorType = TypeParam; ExecutorType executor; auto waitable_interfaces = this->node->get_node_waitables_interface(); @@ -472,8 +478,9 @@ TYPED_TEST(TestExecutors, spinSome) { this->publisher->publish(test_msgs::msg::Empty()); std::this_thread::sleep_for(1ms); } - - EXPECT_EQ(1u, my_waitable->get_count()); + // The count of "execute" depends on whether the executor starts spinning before (1) or after (0) + // the first iteration of the while loop + EXPECT_LE(1u, my_waitable->get_count()); waitable_interfaces->remove_waitable(my_waitable, nullptr); EXPECT_TRUE(spin_exited); // Cancel if it hasn't exited already. @@ -483,7 +490,8 @@ TYPED_TEST(TestExecutors, spinSome) { } // Check spin_node_until_future_complete with node base pointer -TYPED_TEST(TestExecutors, testSpinNodeUntilFutureCompleteNodeBasePtr) { +TYPED_TEST(TestExecutors, testSpinNodeUntilFutureCompleteNodeBasePtr) +{ using ExecutorType = TypeParam; ExecutorType executor; @@ -498,7 +506,8 @@ TYPED_TEST(TestExecutors, testSpinNodeUntilFutureCompleteNodeBasePtr) { } // Check spin_node_until_future_complete with node pointer -TYPED_TEST(TestExecutors, testSpinNodeUntilFutureCompleteNodePtr) { +TYPED_TEST(TestExecutors, testSpinNodeUntilFutureCompleteNodePtr) +{ using ExecutorType = TypeParam; ExecutorType executor; @@ -513,7 +522,8 @@ TYPED_TEST(TestExecutors, testSpinNodeUntilFutureCompleteNodePtr) { } // Check spin_until_future_complete can be properly interrupted. -TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteInterrupted) { +TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteInterrupted) +{ using ExecutorType = TypeParam; ExecutorType executor; executor.add_node(this->node); @@ -555,8 +565,81 @@ TYPED_TEST(TestExecutors, testSpinUntilFutureCompleteInterrupted) { spinner.join(); } +// This test verifies that the add_node operation is robust wrt race conditions. +// It's mostly meant to prevent regressions in the events-executor, but the operation should be +// thread-safe in all executor implementations. +// The initial implementation of the events-executor contained a bug where the executor +// would end up in an inconsistent state and stop processing interrupt/shutdown notifications. +// Manually adding a node to the executor results in a) producing a notify waitable event +// and b) refreshing the executor collections. +// The inconsistent state would happen if the event was processed before the collections were +// finished to be refreshed: the executor would pick up the event but be unable to process it. +// This would leave the `notify_waitable_event_pushed_` flag to true, preventing additional +// notify waitable events to be pushed. +// The behavior is observable only under heavy load, so this test spawns several worker +// threads. Due to the nature of the bug, this test may still succeed even if the +// bug is present. However repeated runs will show its flakiness nature and indicate +// an eventual regression. +TYPED_TEST(TestExecutors, testRaceConditionAddNode) +{ + using ExecutorType = TypeParam; + // rmw_connextdds doesn't support events-executor + if ( + std::is_same() && + std::string(rmw_get_implementation_identifier()).find("rmw_connextdds") == 0) + { + GTEST_SKIP(); + } + + // Spawn some threads to do some heavy work + std::atomic should_cancel = false; + std::vector stress_threads; + for (size_t i = 0; i < 5 * std::thread::hardware_concurrency(); i++) { + stress_threads.emplace_back( + [&should_cancel, i]() { + // This is just some arbitrary heavy work + volatile size_t total = 0; + for (size_t k = 0; k < 549528914167; k++) { + if (should_cancel) { + break; + } + total += k * (i + 42); + (void)total; + } + }); + } + + // Create an executor + auto executor = std::make_shared(); + // Start spinning + auto executor_thread = std::thread( + [executor]() { + executor->spin(); + }); + // Add a node to the executor + executor->add_node(this->node); + + // Cancel the executor (make sure that it's already spinning first) + while (!executor->is_spinning() && rclcpp::ok()) { + continue; + } + executor->cancel(); + + // Try to join the thread after cancelling the executor + // This is the "test". We want to make sure that we can still cancel the executor + // regardless of the presence of race conditions + executor_thread.join(); + + // The test is now completed: we can join the stress threads + should_cancel = true; + for (auto & t : stress_threads) { + t.join(); + } +} + // Check spin_until_future_complete with node base pointer (instantiates its own executor) -TEST(TestExecutors, testSpinUntilFutureCompleteNodeBasePtr) { +TEST(TestExecutors, testSpinUntilFutureCompleteNodeBasePtr) +{ rclcpp::init(0, nullptr); { @@ -576,7 +659,8 @@ TEST(TestExecutors, testSpinUntilFutureCompleteNodeBasePtr) { } // Check spin_until_future_complete with node pointer (instantiates its own executor) -TEST(TestExecutors, testSpinUntilFutureCompleteNodePtr) { +TEST(TestExecutors, testSpinUntilFutureCompleteNodePtr) +{ rclcpp::init(0, nullptr); { @@ -593,106 +677,3 @@ TEST(TestExecutors, testSpinUntilFutureCompleteNodePtr) { rclcpp::shutdown(); } - -template -class TestIntraprocessExecutors : public ::testing::Test -{ -public: - static void SetUpTestCase() - { - rclcpp::init(0, nullptr); - } - - static void TearDownTestCase() - { - rclcpp::shutdown(); - } - - void SetUp() - { - const auto test_info = ::testing::UnitTest::GetInstance()->current_test_info(); - std::stringstream test_name; - test_name << test_info->test_case_name() << "_" << test_info->name(); - node = std::make_shared("node", test_name.str()); - - callback_count = 0; - - const std::string topic_name = std::string("topic_") + test_name.str(); - - rclcpp::PublisherOptions po; - po.use_intra_process_comm = rclcpp::IntraProcessSetting::Enable; - publisher = node->create_publisher(topic_name, rclcpp::QoS(1), po); - - auto callback = [this](test_msgs::msg::Empty::ConstSharedPtr) { - this->callback_count.fetch_add(1); - }; - - rclcpp::SubscriptionOptions so; - so.use_intra_process_comm = rclcpp::IntraProcessSetting::Enable; - subscription = - node->create_subscription( - topic_name, rclcpp::QoS(kNumMessages), std::move(callback), so); - } - - void TearDown() - { - publisher.reset(); - subscription.reset(); - node.reset(); - } - - const size_t kNumMessages = 100; - - rclcpp::Node::SharedPtr node; - rclcpp::Publisher::SharedPtr publisher; - rclcpp::Subscription::SharedPtr subscription; - std::atomic_int callback_count; -}; - -TYPED_TEST_SUITE(TestIntraprocessExecutors, ExecutorTypes, ExecutorTypeNames); - -TYPED_TEST(TestIntraprocessExecutors, testIntraprocessRetrigger) { - // This tests that executors will continue to service intraprocess subscriptions in the case - // that publishers aren't continuing to publish. - // This was previously broken in that intraprocess guard conditions were only triggered on - // publish and the test was added to prevent future regressions. - const size_t kNumMessages = 100; - - using ExecutorType = TypeParam; - ExecutorType executor; - executor.add_node(this->node); - - EXPECT_EQ(0, this->callback_count.load()); - this->publisher->publish(test_msgs::msg::Empty()); - - // Wait for up to 5 seconds for the first message to come available. - const std::chrono::milliseconds sleep_per_loop(10); - int loops = 0; - while (1u != this->callback_count.load() && loops < 500) { - rclcpp::sleep_for(sleep_per_loop); - executor.spin_some(); - loops++; - } - EXPECT_EQ(1u, this->callback_count.load()); - - // reset counter - this->callback_count.store(0); - - for (size_t ii = 0; ii < kNumMessages; ++ii) { - this->publisher->publish(test_msgs::msg::Empty()); - } - - // Fire a timer every 10ms up to 5 seconds waiting for subscriptions to be read. - loops = 0; - auto timer = this->node->create_wall_timer( - std::chrono::milliseconds(10), [this, &executor, &loops, &kNumMessages]() { - loops++; - if (kNumMessages == this->callback_count.load() || - loops == 500) - { - executor.cancel(); - } - }); - executor.spin(); - EXPECT_EQ(kNumMessages, this->callback_count.load()); -} diff --git a/rclcpp/test/rclcpp/executors/test_executors_timer_cancel_behavior.cpp b/rclcpp/test/rclcpp/executors/test_executors_timer_cancel_behavior.cpp new file mode 100644 index 0000000000..ecee459a19 --- /dev/null +++ b/rclcpp/test/rclcpp/executors/test_executors_timer_cancel_behavior.cpp @@ -0,0 +1,408 @@ +// Copyright 2024 Open Source Robotics Foundation, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include +#include + +#include "rclcpp/node.hpp" +#include "rclcpp/parameter_client.hpp" +#include "rclcpp/utilities.hpp" + +#include "rosgraph_msgs/msg/clock.hpp" + +#include "./executor_types.hpp" + +using namespace std::chrono_literals; + +class TimerNode : public rclcpp::Node +{ +public: + explicit TimerNode(std::string subname) + : Node("timer_node", subname) + { + timer1_ = rclcpp::create_timer( + this->get_node_base_interface(), get_node_timers_interface(), + get_clock(), 1ms, + std::bind(&TimerNode::Timer1Callback, this)); + + timer2_ = + rclcpp::create_timer( + this->get_node_base_interface(), get_node_timers_interface(), + get_clock(), 1ms, + std::bind(&TimerNode::Timer2Callback, this)); + } + + int GetTimer1Cnt() {return cnt1_;} + int GetTimer2Cnt() {return cnt2_;} + + void ResetTimer1() + { + timer1_->reset(); + } + + void ResetTimer2() + { + timer2_->reset(); + } + + void CancelTimer1() + { + RCLCPP_DEBUG(this->get_logger(), "Timer 1 cancelling!"); + timer1_->cancel(); + } + + void CancelTimer2() + { + RCLCPP_DEBUG(this->get_logger(), "Timer 2 cancelling!"); + timer2_->cancel(); + } + +private: + void Timer1Callback() + { + RCLCPP_DEBUG(this->get_logger(), "Timer 1!"); + cnt1_++; + } + + void Timer2Callback() + { + RCLCPP_DEBUG(this->get_logger(), "Timer 2!"); + cnt2_++; + } + + rclcpp::TimerBase::SharedPtr timer1_; + rclcpp::TimerBase::SharedPtr timer2_; + int cnt1_ = 0; + int cnt2_ = 0; +}; + +// Sets up a separate thread to publish /clock messages. +// Clock rate relative to real clock is controlled by realtime_update_rate. +// This is set conservatively slow to ensure unit tests are reliable on Windows +// environments, where timing performance is subpar. +// +// Use `sleep_for` in tests to advance the clock. Clock should run and be published +// in separate thread continuously to ensure correct behavior in node under test. +class ClockPublisher : public rclcpp::Node +{ +public: + explicit ClockPublisher(float simulated_clock_step = .001f, float realtime_update_rate = 0.25f) + : Node("clock_publisher"), + ros_update_duration_(0, 0), + realtime_clock_step_(0, 0), + rostime_(0, 0) + { + clock_publisher_ = this->create_publisher("clock", 10); + realtime_clock_step_ = + rclcpp::Duration::from_seconds(simulated_clock_step / realtime_update_rate); + ros_update_duration_ = rclcpp::Duration::from_seconds(simulated_clock_step); + + timer_thread_ = std::thread(&ClockPublisher::RunTimer, this); + } + + ~ClockPublisher() + { + running_ = false; + if (timer_thread_.joinable()) { + timer_thread_.join(); + } + } + + void sleep_for(rclcpp::Duration duration) + { + rclcpp::Time start_time(0, 0, RCL_ROS_TIME); + { + const std::lock_guard lock(mutex_); + start_time = rostime_; + } + rclcpp::Time current_time = start_time; + + while (true) { + { + const std::lock_guard lock(mutex_); + current_time = rostime_; + } + if ((current_time - start_time) >= duration) { + return; + } + std::this_thread::sleep_for(realtime_clock_step_.to_chrono()); + rostime_ += ros_update_duration_; + } + } + +private: + void RunTimer() + { + while (running_) { + PublishClock(); + std::this_thread::sleep_for(realtime_clock_step_.to_chrono()); + } + } + + void PublishClock() + { + const std::lock_guard lock(mutex_); + auto message = rosgraph_msgs::msg::Clock(); + message.clock = rostime_; + clock_publisher_->publish(message); + } + + rclcpp::Publisher::SharedPtr clock_publisher_; + + rclcpp::Duration ros_update_duration_; + rclcpp::Duration realtime_clock_step_; + // Rostime must be guarded by a mutex, since accessible in running thread + // as well as sleep_for + rclcpp::Time rostime_; + std::mutex mutex_; + std::thread timer_thread_; + std::atomic running_ = true; +}; + + +template +class TestTimerCancelBehavior : public ::testing::Test +{ +public: + static void SetUpTestCase() + { + rclcpp::init(0, nullptr); + } + + static void TearDownTestCase() + { + rclcpp::shutdown(); + } + + void SetUp() + { + const auto test_info = ::testing::UnitTest::GetInstance()->current_test_info(); + std::stringstream test_name; + test_name << test_info->test_case_name() << "_" << test_info->name(); + node = std::make_shared(test_name.str()); + param_client = std::make_shared(node); + ASSERT_TRUE(param_client->wait_for_service(5s)); + + auto set_parameters_results = param_client->set_parameters( + {rclcpp::Parameter("use_sim_time", false)}); + for (auto & result : set_parameters_results) { + ASSERT_TRUE(result.successful); + } + + // Run standalone thread to publish clock time + sim_clock_node = std::make_shared(); + + // Spin the executor in a standalone thread + executor.add_node(this->node); + standalone_thread = std::thread( + [this]() { + executor.spin(); + }); + } + + void TearDown() + { + node.reset(); + + // Clean up thread object + if (standalone_thread.joinable()) { + standalone_thread.join(); + } + } + + std::shared_ptr node; + std::shared_ptr sim_clock_node; + rclcpp::SyncParametersClient::SharedPtr param_client; + std::thread standalone_thread; + T executor; +}; + +TYPED_TEST_SUITE(TestTimerCancelBehavior, ExecutorTypes, ExecutorTypeNames); + +TYPED_TEST(TestTimerCancelBehavior, testTimer1CancelledWithExecutorSpin) { + // Validate that cancelling one timer yields no change in behavior for other + // timers. Specifically, this tests the behavior when using spin() to run the + // executor, which is the most common usecase. + + // Cancel to stop the spin after some time. + this->sim_clock_node->sleep_for(50ms); + this->node->CancelTimer1(); + this->sim_clock_node->sleep_for(150ms); + this->executor.cancel(); + + int t1_runs = this->node->GetTimer1Cnt(); + int t2_runs = this->node->GetTimer2Cnt(); + EXPECT_NE(t1_runs, t2_runs); + // Check that t2 has significantly more calls + EXPECT_LT(t1_runs + 50, t2_runs); +} + +TYPED_TEST(TestTimerCancelBehavior, testTimer2CancelledWithExecutorSpin) { + // Validate that cancelling one timer yields no change in behavior for other + // timers. Specifically, this tests the behavior when using spin() to run the + // executor, which is the most common usecase. + + // Cancel to stop the spin after some time. + this->sim_clock_node->sleep_for(50ms); + this->node->CancelTimer2(); + this->sim_clock_node->sleep_for(150ms); + this->executor.cancel(); + + int t1_runs = this->node->GetTimer1Cnt(); + int t2_runs = this->node->GetTimer2Cnt(); + EXPECT_NE(t1_runs, t2_runs); + // Check that t1 has significantly more calls + EXPECT_LT(t2_runs + 50, t1_runs); +} + +TYPED_TEST(TestTimerCancelBehavior, testHeadTimerCancelThenResetBehavior) { + // Validate that cancelling timer doesn't affect operation of other timers, + // and that the cancelled timer starts executing normally once reset manually. + + // Cancel to stop the spin after some time. + this->sim_clock_node->sleep_for(50ms); + this->node->CancelTimer1(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_initial = this->node->GetTimer1Cnt(); + int t2_runs_initial = this->node->GetTimer2Cnt(); + + // Manually reset timer 1, then sleep again + // Counts should update. + this->node->ResetTimer1(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_final = this->node->GetTimer1Cnt(); + int t2_runs_final = this->node->GetTimer2Cnt(); + + this->executor.cancel(); + + // T1 should have been restarted, and execute about 15 additional times. + // Check 10 greater than initial, to account for some timing jitter. + EXPECT_LT(t1_runs_initial + 50, t1_runs_final); + + EXPECT_LT(t1_runs_initial + 50, t2_runs_initial); + // Check that t2 has significantly more calls, and keeps getting called. + EXPECT_LT(t2_runs_initial + 50, t2_runs_final); +} + +TYPED_TEST(TestTimerCancelBehavior, testBackTimerCancelThenResetBehavior) { + // Validate that cancelling timer doesn't affect operation of other timers, + // and that the cancelled timer starts executing normally once reset manually. + + // Cancel to stop the spin after some time. + this->sim_clock_node->sleep_for(50ms); + this->node->CancelTimer2(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_initial = this->node->GetTimer1Cnt(); + int t2_runs_initial = this->node->GetTimer2Cnt(); + + // Manually reset timer 1, then sleep again + // Counts should update. + this->node->ResetTimer2(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_final = this->node->GetTimer1Cnt(); + int t2_runs_final = this->node->GetTimer2Cnt(); + + this->executor.cancel(); + + // T2 should have been restarted, and execute about 15 additional times. + // Check 10 greater than initial, to account for some timing jitter. + EXPECT_LT(t2_runs_initial + 50, t2_runs_final); + + EXPECT_LT(t2_runs_initial + 50, t1_runs_initial); + // Check that t1 has significantly more calls, and keeps getting called. + EXPECT_LT(t1_runs_initial + 50, t1_runs_final); +} + +TYPED_TEST(TestTimerCancelBehavior, testBothTimerCancelThenResetT1Behavior) { + // Validate behavior from cancelling 2 timers, then only re-enabling one of them. + // Ensure that only the reset timer is executed. + + // Cancel to stop the spin after some time. + this->sim_clock_node->sleep_for(50ms); + this->node->CancelTimer1(); + this->node->CancelTimer2(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_initial = this->node->GetTimer1Cnt(); + int t2_runs_initial = this->node->GetTimer2Cnt(); + + // Manually reset timer 1, then sleep again + // Counts should update. + this->node->ResetTimer1(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_intermediate = this->node->GetTimer1Cnt(); + int t2_runs_intermediate = this->node->GetTimer2Cnt(); + + this->node->ResetTimer2(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_final = this->node->GetTimer1Cnt(); + int t2_runs_final = this->node->GetTimer2Cnt(); + + this->executor.cancel(); + + // T1 and T2 should have the same initial count. + EXPECT_LE(std::abs(t1_runs_initial - t2_runs_initial), 1); + + // Expect that T1 has up to 15 more calls than t2. Add some buffer + // to account for jitter. + EXPECT_EQ(t2_runs_initial, t2_runs_intermediate); + EXPECT_LT(t1_runs_initial + 50, t1_runs_intermediate); + + // Expect that by end of test, both are running properly again. + EXPECT_LT(t1_runs_intermediate + 50, t1_runs_final); + EXPECT_LT(t2_runs_intermediate + 50, t2_runs_final); +} + +TYPED_TEST(TestTimerCancelBehavior, testBothTimerCancelThenResetT2Behavior) { + // Validate behavior from cancelling 2 timers, then only re-enabling one of them. + // Ensure that only the reset timer is executed. + + // Cancel to stop the spin after some time. + this->sim_clock_node->sleep_for(50ms); + this->node->CancelTimer1(); + this->node->CancelTimer2(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_initial = this->node->GetTimer1Cnt(); + int t2_runs_initial = this->node->GetTimer2Cnt(); + + // Manually reset timer 1, then sleep again + // Counts should update. + this->node->ResetTimer2(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_intermediate = this->node->GetTimer1Cnt(); + int t2_runs_intermediate = this->node->GetTimer2Cnt(); + + this->node->ResetTimer1(); + this->sim_clock_node->sleep_for(150ms); + int t1_runs_final = this->node->GetTimer1Cnt(); + int t2_runs_final = this->node->GetTimer2Cnt(); + + this->executor.cancel(); + + // T1 and T2 should have the same initial count. + EXPECT_LE(std::abs(t1_runs_initial - t2_runs_initial), 1); + + // Expect that T1 has up to 15 more calls than t2. Add some buffer + // to account for jitter. + EXPECT_EQ(t1_runs_initial, t1_runs_intermediate); + EXPECT_LT(t2_runs_initial + 50, t2_runs_intermediate); + + // Expect that by end of test, both are running properly again. + EXPECT_LT(t1_runs_intermediate + 50, t1_runs_final); + EXPECT_LT(t2_runs_intermediate + 50, t2_runs_final); +} diff --git a/rclcpp/test/rclcpp/test_intra_process_manager.cpp b/rclcpp/test/rclcpp/test_intra_process_manager.cpp index 9d8df74352..eeb4715973 100644 --- a/rclcpp/test/rclcpp/test_intra_process_manager.cpp +++ b/rclcpp/test/rclcpp/test_intra_process_manager.cpp @@ -247,60 +247,6 @@ class Publisher : public PublisherBase } // namespace mock } // namespace rclcpp -namespace rclcpp -{ -namespace experimental -{ -namespace buffers -{ -namespace mock -{ -template< - typename MessageT, - typename Alloc = std::allocator, - typename MessageDeleter = std::default_delete> -class IntraProcessBuffer -{ -public: - using ConstMessageSharedPtr = std::shared_ptr; - using MessageUniquePtr = std::unique_ptr; - - RCLCPP_SMART_PTR_DEFINITIONS(IntraProcessBuffer) - - IntraProcessBuffer() - {} - - void add(ConstMessageSharedPtr msg) - { - message_ptr = reinterpret_cast(msg.get()); - shared_msg = msg; - } - - void add(MessageUniquePtr msg) - { - message_ptr = reinterpret_cast(msg.get()); - unique_msg = std::move(msg); - } - - void pop(std::uintptr_t & msg_ptr) - { - msg_ptr = message_ptr; - message_ptr = 0; - } - - // need to store the messages somewhere otherwise the memory address will be reused - ConstMessageSharedPtr shared_msg; - MessageUniquePtr unique_msg; - - std::uintptr_t message_ptr; -}; - -} // namespace mock -} // namespace buffers -} // namespace experimental -} // namespace rclcpp - - namespace rclcpp { namespace experimental diff --git a/rclcpp/test/rclcpp/test_publisher.cpp b/rclcpp/test/rclcpp/test_publisher.cpp index 558bc39912..62ac3d832e 100644 --- a/rclcpp/test/rclcpp/test_publisher.cpp +++ b/rclcpp/test/rclcpp/test_publisher.cpp @@ -698,6 +698,8 @@ TEST_F(TestPublisher, intra_process_transient_local) { EXPECT_EQ(1, pub_ipm_enabled_transient_local_disabled->get_intra_process_subscription_count()); EXPECT_EQ(0, pub_ipm_disabled_transient_local_disabled->get_intra_process_subscription_count()); + /* + // JF: lowest_available_ipm_capacity API not available in our fork of rclcpp @ irobot/humble EXPECT_EQ( history_depth - 1u, pub_ipm_enabled_transient_local_enabled->lowest_available_ipm_capacity()); @@ -706,6 +708,7 @@ TEST_F(TestPublisher, intra_process_transient_local) { history_depth, pub_ipm_enabled_transient_local_disabled->lowest_available_ipm_capacity()); EXPECT_EQ(0, pub_ipm_disabled_transient_local_disabled->lowest_available_ipm_capacity()); + */ EXPECT_TRUE(callback1.called); EXPECT_FALSE(callback2.called); diff --git a/rclcpp/test/rclcpp/test_service.cpp b/rclcpp/test/rclcpp/test_service.cpp index 90e535cb7a..85f89ecf61 100644 --- a/rclcpp/test/rclcpp/test_service.cpp +++ b/rclcpp/test/rclcpp/test_service.cpp @@ -177,7 +177,14 @@ TEST_F(TestService, basic_public_getters) { } rclcpp::AnyServiceCallback cb; + const rclcpp::Service base( + node_handle_int->get_node_base_interface(), + &service_handle, cb); + // Use get_service_handle specific to const service + std::shared_ptr const_service_handle = base.get_service_handle(); + EXPECT_NE(nullptr, const_service_handle); + /* rclcpp::IntraProcessSetting ipc_setting; if (node_base_interface->get_use_intra_process_default()) { ipc_setting = rclcpp::IntraProcessSetting::Enable; @@ -185,12 +192,14 @@ TEST_F(TestService, basic_public_getters) { ipc_setting = rclcpp::IntraProcessSetting::Disable; } + // FIXME: where is our Service constructor that supports ipc_setting? const rclcpp::Service base( node_handle_int->get_node_base_interface(), &service_handle, cb, ipc_setting); // Use get_service_handle specific to const service std::shared_ptr const_service_handle = base.get_service_handle(); EXPECT_NE(nullptr, const_service_handle); + */ EXPECT_EQ( RCL_RET_OK, rcl_service_fini( diff --git a/rclcpp/test/rclcpp/test_timers_manager.cpp b/rclcpp/test/rclcpp/test_timers_manager.cpp new file mode 100644 index 0000000000..0e49da08e1 --- /dev/null +++ b/rclcpp/test/rclcpp/test_timers_manager.cpp @@ -0,0 +1,407 @@ +// Copyright 2023 iRobot Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include + +#include "rclcpp/contexts/default_context.hpp" +#include "rclcpp/experimental/timers_manager.hpp" + +using namespace std::chrono_literals; + +using rclcpp::experimental::TimersManager; + +using CallbackT = std::function; +using TimerT = rclcpp::WallTimer; + +class TestTimersManager : public ::testing::Test +{ +public: + void SetUp() + { + rclcpp::init(0, nullptr); + } + + void TearDown() + { + rclcpp::shutdown(); + } +}; + +static void execute_all_ready_timers(std::shared_ptr timers_manager) +{ + bool head_was_ready = false; + do { + head_was_ready = timers_manager->execute_head_timer(); + } while (head_was_ready); +} + +TEST_F(TestTimersManager, empty_manager) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + EXPECT_EQ(std::chrono::nanoseconds::max(), timers_manager->get_head_timeout()); + EXPECT_FALSE(timers_manager->execute_head_timer()); + EXPECT_NO_THROW(timers_manager->clear()); + EXPECT_NO_THROW(timers_manager->start()); + EXPECT_NO_THROW(timers_manager->stop()); +} + +TEST_F(TestTimersManager, add_run_remove_timer) +{ + size_t t_runs = 0; + std::chrono::milliseconds timer_period(10); + + auto t = TimerT::make_shared( + timer_period, + [&t_runs]() { + t_runs++; + }, + rclcpp::contexts::get_global_default_context()); + std::weak_ptr t_weak = t; + + // Add the timer to the timers manager + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + timers_manager->add_timer(t); + + // Sleep for more 3 times the timer period + std::this_thread::sleep_for(3 * timer_period); + + // The timer is executed only once, even if we slept 3 times the period + execute_all_ready_timers(timers_manager); + EXPECT_EQ(1u, t_runs); + + // Remove the timer from the manager + timers_manager->remove_timer(t); + + t.reset(); + // The timer is now not valid anymore + EXPECT_FALSE(t_weak.lock() != nullptr); +} + +TEST_F(TestTimersManager, clear) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + auto t1 = TimerT::make_shared(1ms, CallbackT(), rclcpp::contexts::get_global_default_context()); + std::weak_ptr t1_weak = t1; + auto t2 = TimerT::make_shared(1ms, CallbackT(), rclcpp::contexts::get_global_default_context()); + std::weak_ptr t2_weak = t2; + + timers_manager->add_timer(t1); + timers_manager->add_timer(t2); + + EXPECT_TRUE(t1_weak.lock() != nullptr); + EXPECT_TRUE(t2_weak.lock() != nullptr); + + timers_manager->clear(); + + t1.reset(); + t2.reset(); + + EXPECT_FALSE(t1_weak.lock() != nullptr); + EXPECT_FALSE(t2_weak.lock() != nullptr); +} + +TEST_F(TestTimersManager, remove_not_existing_timer) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + // Try to remove a nullptr timer + EXPECT_NO_THROW(timers_manager->remove_timer(nullptr)); + + auto t = TimerT::make_shared(1ms, CallbackT(), rclcpp::contexts::get_global_default_context()); + timers_manager->add_timer(t); + + // Remove twice the same timer + timers_manager->remove_timer(t); + EXPECT_NO_THROW(timers_manager->remove_timer(t)); +} + +TEST_F(TestTimersManager, timers_thread_exclusive_usage) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + timers_manager->start(); + + EXPECT_THROW(timers_manager->start(), std::exception); + EXPECT_THROW(timers_manager->get_head_timeout(), std::exception); + EXPECT_THROW(timers_manager->execute_head_timer(), std::exception); + + timers_manager->stop(); + + EXPECT_NO_THROW(timers_manager->get_head_timeout()); + EXPECT_NO_THROW(timers_manager->execute_head_timer()); +} + +TEST_F(TestTimersManager, add_timer_twice) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + auto t = TimerT::make_shared(1ms, CallbackT(), rclcpp::contexts::get_global_default_context()); + + timers_manager->add_timer(t); + EXPECT_NO_THROW(timers_manager->add_timer(t)); +} + +TEST_F(TestTimersManager, add_nullptr) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + EXPECT_THROW(timers_manager->add_timer(nullptr), std::exception); +} + +TEST_F(TestTimersManager, head_not_ready) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + size_t t_runs = 0; + auto t = TimerT::make_shared( + 10s, + [&t_runs]() { + t_runs++; + }, + rclcpp::contexts::get_global_default_context()); + + timers_manager->add_timer(t); + + // Timer will take 10s to get ready, so nothing to execute here + bool ret = timers_manager->execute_head_timer(); + EXPECT_FALSE(ret); + EXPECT_EQ(0u, t_runs); +} + +TEST_F(TestTimersManager, start_stop_timers_thread) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + auto t = TimerT::make_shared(1ms, []() {}, rclcpp::contexts::get_global_default_context()); + timers_manager->add_timer(t); + + // Calling start multiple times will throw an error + EXPECT_NO_THROW(timers_manager->start()); + EXPECT_THROW(timers_manager->start(), std::exception); + + // Calling stop multiple times does not throw an error + EXPECT_NO_THROW(timers_manager->stop()); + EXPECT_NO_THROW(timers_manager->stop()); +} + +TEST_F(TestTimersManager, timers_thread) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + int t1_runs = 0; + auto t1 = TimerT::make_shared( + 1ms, + [&t1_runs]() { + t1_runs++; + }, + rclcpp::contexts::get_global_default_context()); + + int t2_runs = 0; + auto t2 = TimerT::make_shared( + 1ms, + [&t2_runs]() { + t2_runs++; + }, + rclcpp::contexts::get_global_default_context()); + + // Add timers + timers_manager->add_timer(t1); + timers_manager->add_timer(t2); + + // Run timers thread for a while + timers_manager->start(); + std::this_thread::sleep_for(50ms); + timers_manager->stop(); + + EXPECT_LT(1u, t1_runs); + EXPECT_LT(1u, t2_runs); + EXPECT_LE(std::abs(t1_runs - t2_runs), 1); +} + +TEST_F(TestTimersManager, destructor) +{ + size_t t_runs = 0; + auto t = TimerT::make_shared( + 1ms, + [&t_runs]() { + t_runs++; + }, + rclcpp::contexts::get_global_default_context()); + std::weak_ptr t_weak = t; + + // When the timers manager is destroyed, it will stop the thread + // and clear the timers + { + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + timers_manager->add_timer(t); + + timers_manager->start(); + std::this_thread::sleep_for(100ms); + + EXPECT_LT(1u, t_runs); + } + + // The thread is not running anymore, so this value does not increase + size_t runs = t_runs; + std::this_thread::sleep_for(100ms); + EXPECT_EQ(runs, t_runs); + t.reset(); + EXPECT_FALSE(t_weak.lock() != nullptr); +} + +TEST_F(TestTimersManager, add_remove_while_thread_running) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + size_t t1_runs = 0; + auto t1 = TimerT::make_shared( + 1ms, + [&t1_runs]() { + t1_runs++; + }, + rclcpp::contexts::get_global_default_context()); + + size_t t2_runs = 0; + auto t2 = TimerT::make_shared( + 1ms, + [&t2_runs]() { + t2_runs++; + }, + rclcpp::contexts::get_global_default_context()); + + // Add timers + timers_manager->add_timer(t1); + + // Start timers thread + timers_manager->start(); + + // After a while remove t1 and add t2 + std::this_thread::sleep_for(50ms); + timers_manager->remove_timer(t1); + size_t tmp_t1 = t1_runs; + timers_manager->add_timer(t2); + + // Wait some more time and then stop + std::this_thread::sleep_for(50ms); + timers_manager->stop(); + + // t1 has stopped running + EXPECT_EQ(tmp_t1, t1_runs); + // t2 is correctly running + EXPECT_LT(1u, t2_runs); +} + +TEST_F(TestTimersManager, infinite_loop) +{ + // This test makes sure that even if timers have a period shorter than the duration + // of their callback the functions never block indefinitely. + + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + size_t t1_runs = 0; + auto t1 = TimerT::make_shared( + 1ms, + [&t1_runs]() { + t1_runs++; + std::this_thread::sleep_for(5ms); + }, + rclcpp::contexts::get_global_default_context()); + + size_t t2_runs = 0; + auto t2 = TimerT::make_shared( + 1ms, + [&t2_runs]() { + t2_runs++; + std::this_thread::sleep_for(5ms); + }, + rclcpp::contexts::get_global_default_context()); + + timers_manager->add_timer(t1); + timers_manager->add_timer(t2); + + // Start a timers thread and make sure that we can stop it later + timers_manager->start(); + std::this_thread::sleep_for(50ms); + timers_manager->stop(); + + EXPECT_LT(0u, t1_runs); + EXPECT_LT(0u, t2_runs); +} + +// Validate that cancelling one timer yields no change in behavior for other +// timers. +TEST_F(TestTimersManager, check_one_timer_cancel_doesnt_affect_other_timers) +{ + auto timers_manager = std::make_shared( + rclcpp::contexts::get_global_default_context()); + + size_t t1_runs = 0; + std::shared_ptr t1; + // After a while cancel t1. Don't remove it though. + // Simulates typical usage in a Node where a timer is cancelled but not removed, + // since typical users aren't going to mess around with the timer manager. + t1 = TimerT::make_shared( + 1ms, + [&t1_runs, &t1]() { + t1_runs++; + if (t1_runs == 5) { + t1->cancel(); + } + }, + rclcpp::contexts::get_global_default_context()); + + size_t t2_runs = 0; + auto t2 = TimerT::make_shared( + 1ms, + [&t2_runs]() { + t2_runs++; + }, + rclcpp::contexts::get_global_default_context()); + + // Add timers + timers_manager->add_timer(t1); + timers_manager->add_timer(t2); + + // Start timers thread + timers_manager->start(); + + std::this_thread::sleep_for(15ms); + + // t1 has stopped running + EXPECT_NE(t1_runs, t2_runs); + // Check that t2 has significantly more calls + EXPECT_LT(t1_runs + 5, t2_runs); + timers_manager->stop(); +}