diff --git a/CODEOWNERS b/CODEOWNERS index a8644f985f259..b473dc6d395a2 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -119,7 +119,7 @@ /doc/scripts/ @carlescufi /doc/guides/bluetooth/ @joerchan @jhedberg @Vudentz /doc/reference/bluetooth/ @joerchan @jhedberg @Vudentz -/doc/reference/kernel/other/resource_mgmt.rst @pabigot +/doc/reference/resource_management/ @pabigot /doc/reference/networking/can* @alexanderwachter /drivers/debug/ @nashif /drivers/*/*cc13xx_cc26xx* @bwitherspoon diff --git a/doc/reference/index.rst b/doc/reference/index.rst index 37625ae3b0ee5..edfa76d5732d7 100644 --- a/doc/reference/index.rst +++ b/doc/reference/index.rst @@ -9,6 +9,7 @@ API Reference stability.rst terminology.rst audio/index.rst + misc/notify.rst bluetooth/index.rst kconfig/index.rst crypto/index.rst @@ -22,6 +23,7 @@ API Reference peripherals/index.rst power_management/index.rst random/index.rst + resource_management/index.rst shell/index.rst storage/index.rst usb/index.rst diff --git a/doc/reference/kernel/index.rst b/doc/reference/kernel/index.rst index 9025b7e0b29a2..d77ec137c575e 100644 --- a/doc/reference/kernel/index.rst +++ b/doc/reference/kernel/index.rst @@ -116,7 +116,6 @@ These pages cover other kernel services. other/atomic.rst other/float.rst other/ring_buffers.rst - other/resource_mgmt.rst other/cxx_support.rst other/version.rst other/fatal.rst diff --git a/doc/reference/kernel/other/resource_mgmt.rst b/doc/reference/kernel/other/resource_mgmt.rst deleted file mode 100644 index 3cf284e0be5fa..0000000000000 --- a/doc/reference/kernel/other/resource_mgmt.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. _resource_mgmt: - -Resource Management -################### - -There are various situations where it's necessary to coordinate resource -use at runtime among multiple clients. These include power rails, -clocks, other peripherals, and binary device power management. The -complexity of properly managing multiple consumers of a device in a -multithreaded system, especially when transitions may be asynchronous, -suggests that a shared implementation is desirable. - -.. contents:: - :local: - :depth: 2 - - -On-Off Services -*************** - -An on-off service supports an arbitrary number of clients of a service -which has a binary state. Example applications are power rails, clocks, -and binary device power management. - -The service has the following properties: - -* The stable states are off, on, and error. The service always begins - in the off state. The service may also be in a transition to a given - state. -* The core operations are request (add a dependency) and release (remove - a dependency). The service manages the state based on calls to - functions that initiate these operations. -* The service transitions from off to on when first client request is - received. -* The service transitions from on to off when last client release is - received. -* Each service configuration provides functions that implement the - transition from off to on, from on to off, and optionally from an - error state to off. Transitions that may put a calling thread to - sleep must be flagged in the configuration to support safe invocation - from non-thread context. -* All operations are asynchronous, and are initiated by a function call - that references a specific service and is given client notification - data. The function call will succeed or fail. On success, the - operation is guaranteed to be initiated, but whether the operation - itself succeeds or fails is indicated through client notification. - The initiation functions can be invoked from pre-kernel, thread, or - ISR context. In contexts and states where the operation cannot - be started the function will result in an error. -* Requests to turn on may be queued while a transition to off is in - progress: when the service has turned off successfully it will be - immediately turned on again (where context allows) and waiting clients - notified when the start completes. - -Requests are reference counted, but not tracked. That means clients are -responsible for recording whether their requests were accepted, and for -initiating a release only if they have previously successfully completed -a request. Improper use of the API can cause an active client to be -shut out, and the service does not maintain a record of specific clients -that have been granted a request. - -Failures in executing a transition are recorded and inhibit further -requests or releases until the service is reset. Pending requests are -notified (and cancelled) when errors are discovered. - -Transition operation completion notifications are provided through any -of the following mechanisms: - -* Signal: A pointer to a :c:type:`struct k_poll_signal` is provided, and - the signal is raised when the transition completes. The operation - completion code is stored as the signal value. -* Callback: a function pointer is provided by the client along with an - opaque pointer, and on completion of the operation the function is - invoked with the pointer and the operation completion code. -* Spin-wait: the client is required to check for operation completion - using the :cpp:func:`onoff_client_fetch_result()` function. - -Synchronous transition may be implemented by a caller based on its -context, for example by using :cpp:func:`k_poll()` to wait until the -completion is signalled. - -.. doxygengroup:: resource_mgmt_apis - :project: Zephyr diff --git a/doc/reference/misc/notify.rst b/doc/reference/misc/notify.rst new file mode 100644 index 0000000000000..278fe302bcd8c --- /dev/null +++ b/doc/reference/misc/notify.rst @@ -0,0 +1,25 @@ +.. _async_notification: + +Asynchronous Notification APIs +############################## + +Zephyr APIs often include :ref:`api_term_async` functions where an +operation is initiated and the application needs to be informed when it +completes, and whether it succeeded. Using :cpp:func:`k_poll()` is +often a good method, but some application architectures may be more +suited to a callback notification, and operations like enabling clocks +and power rails may need to be invoked before kernel functions are +available so a busy-wait for completion may be needed. + +This API is intended to be embedded within specific subsystems such as +:ref:`resource_mgmt_onoff` and other APIs that support async +transactions. The subsystem wrappers are responsible for extracting +operation-specific data from requests that include a notification +element, and for invoking callbacks with the parameters required by the +API. + +API Reference +************* + +.. doxygengroup:: sys_notify_apis + :project: Zephyr diff --git a/doc/reference/resource_management/index.rst b/doc/reference/resource_management/index.rst new file mode 100644 index 0000000000000..aaa44dc00b449 --- /dev/null +++ b/doc/reference/resource_management/index.rst @@ -0,0 +1,204 @@ +.. _resource_mgmt: + +Resource Management +################### + +There are various situations where it's necessary to coordinate resource +use at runtime among multiple clients. These include power rails, +clocks, other peripherals, and binary device power management. The +complexity of properly managing multiple consumers of a device in a +multithreaded system, especially when transitions may be asynchronous, +suggests that a shared implementation is desirable. + +Zephyr provides managers for several coordination policies. These +managers are embedded into services that use them for specific +functions. + +.. contents:: + :local: + :depth: 2 + +.. _resource_mgmt_onoff: + +On-Off Manager +************** + +An on-off manager supports an arbitrary number of clients of a service +which has a binary state. Example applications are power rails, clocks, +and binary device power management. + +The manager has the following properties: + +* The stable states are off, on, and error. The service always begins + in the off state. The service may also be in a transition to a given + state. +* The core operations are request (add a dependency) and release (remove + a dependency). The service manages the state based on calls to + functions that initiate these operations. +* The service transitions from off to on when first client request is + received. +* The service transitions from on to off when last client release is + received. +* Each service configuration provides functions that implement the + transition from off to on, from on to off, and optionally from an + error state to off. Transitions that may put a calling thread to + sleep must be flagged in the configuration to support detecting unsafe + invocation from non-thread context. +* All operations are asynchronous, and are initiated by a function call + that references a specific service and is given client notification + data. The function call will succeed or fail. On success, the + operation is guaranteed to be initiated, but whether the operation + itself succeeds or fails is indicated through client notification. + The initiation functions can be invoked from pre-kernel, thread, or + ISR context. In contexts and states where the operation cannot + be started the function will result in an error. +* Requests to turn on may be queued while a transition to off is in + progress: when the service has turned off successfully it will be + immediately turned on again (where context allows) and waiting clients + notified when the start completes. + +Requests are reference counted, but not tracked. That means clients are +responsible for recording whether their requests were accepted, and for +initiating a release only if they have previously successfully completed +a request. Improper use of the API can cause an active client to be +shut out, and the manager does not maintain a record of specific clients +that have been granted a request. + +Failures in executing a transition are recorded and inhibit further +requests or releases until the manager is reset. Pending requests are +notified (and cancelled) when errors are discovered. + +Transition operation completion notifications are provided through the +standard :ref:`async_notification`, supporting these methods: + +* Signal: A pointer to a :c:type:`struct k_poll_signal` is provided, and + the signal is raised when the transition completes. The operation + completion code is stored as the signal value. +* Callback: a function pointer is provided by the client along with an + opaque pointer, and on completion of the operation the function is + invoked with the manager, the client data, the manager state, the + operation completion code, and the user-provided pointer. +* Spin-wait: the client is required to check for operation completion + using the :cpp:func:`onoff_client_fetch_result()` function. + +Synchronous transition may be implemented by a caller based on its +context, for example by using :cpp:func:`k_poll()` to wait until the +completion is signalled. + +.. _resource_mgmt_onoff_monitor: + +On-Off Monitor +============== + +In the simplest use cases a client of an on-off service will issue +:cpp:func:`onoff_request()` and :cpp:func:`onoff_release()` commands and +rely on the notifications to detect completion of these operations. In +cases where client needs may change, it may be necessary to cancel a +request or release and return to the previous state after a transition +has started. This can be done using :cpp:func:`onoff_cancel()`, which +can be simpler than designing the client to wait until the transition +completes so it can issue a new release or request. + +However the asynchronous nature of on-off service transitions makes +reliable use onoff_cancel() difficult. It will return an error +identifying when a transition has already completed, and will +synchronously disable a request that has not been satisfied, but when +cancelling a release it must convert the release operation into a +request operation. The release may complete before the caller has been +informed of the conversion. Similarly it is possible that, due to +cancellation of a request, a service error that occurs during transition +will not be reported. + +Clients and other components interested in tracking service state can be +informed of state transitions by registering for state changes using +onoff_monitor_request(). These changes are provided before issuing +completion notifications associated with the new state. + +.. _resource_mgmt_onoff_notification: + +On-Off Notification +=================== + +The standard client model for an on-off service is to issue a request +and hold it while the service is in use, then release it on completion. +Service transitions are asynchronous, and there is currently no +mechanism to support cancelling a transition and returning to the +original state. For some use cases where the need for a service is not +under application control the standard sequence of service request, use, +and release may not be easily satisfiable. + +An example is functionality that requires both an onoff service (such as +a clock) and an secondary gating signal (such as a connected USB cable). +If the cable is removed before the clock is started then the clock is +not needed anymore, but the client functionality should not be required +to implement the logic to wait for the request to complete and to then +submit a release. + +The :cpp:type:`onoff_notifier` infrastructure provides an internal state +machine that reacts immediately to synchronous requests and releases, +coordinating with the underlying onoff service to ensure the client's +latest desired state will be reached as soon as possible. The client +provides a callback that is invoked on relevant state changes, and +synchronously indicates on both request and release whether the desired +state has already been reached. + +.. doxygengroup:: resource_mgmt_onoff_apis + :project: Zephyr + +.. _resource_mgmt_queued_operation: + +Queued Operation Manager +************************ + +While :ref:`resource_mgmt_onoff` supports a shared resource that must be +available as long as any user still depends on it, the queued operation +manager provides serialized exclusive access to a resource that executes +operations asynchronously. This can be used to support (for example) +ADC sampling for different sensors, or groups of bus transactions. +Clients submit a operation request that is processed when the device +becomes available, with clients being notified of the completion of the +operation though the standard :ref:`async_notification`. + +As with the on-off manager, the queued resource manager is a generic +infrastructure tool that should be used by a extending service, such as +an I2C bus controller or an ADC. The manager has the following +characteristics: + +* The stable states are idle and processing. The manager always begins + in the idle state. +* The core client operations are submit (add an operation) and cancel + (remove an operation before it starts). +* Ownership of the operation object transitions from the client to the + manager when a queue request is accepted, and is returned to the + client when the manager notifies the client of operation completion. +* The core client event is completion. Manager state changes only as a + side effect from submitting or completing an operation. +* The service transitions from idle to processing when an operation is + submitted. +* The service transitions from processing to idle when notification of + the last operation has completed and there are no queued operations. +* The manager selects the next operation to process when notification of + completion has itself completed. In particular, changes to the set of + pending operations that are made during a completion callback affect + the next operation to execute. +* Each submitted operation includes a priority that orders execution by + first-come-first-served within priority. +* Operations are asynchronous, with completion notification through the + :ref:`async_notification`. The operations and notifications are run + in a context that is service-specific. This may be one or more + dedicated threads, or work queues. Notifications may come from + interrupt handlers. Note that for some services certain operations + may complete before the submit request has returned to its caller. + +The generic infrastructure holds the active operation and a queue of +pending operations. A service extension shall provide functions that: + +* check that a request is well-formed, i.e. can be added to the queue; +* receive notification that a new operation is to be processed, or that + no operations are available (allowing the service to enter a + power-down mode); +* translate a generic completion callback into a service-specific + callback. + +.. doxygengroup:: resource_mgmt_queued_operation_apis + :project: Zephyr diff --git a/include/sys/notify.h b/include/sys/notify.h new file mode 100644 index 0000000000000..2e5d4f035c933 --- /dev/null +++ b/include/sys/notify.h @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_SYS_NOTIFY_H_ +#define ZEPHYR_INCLUDE_SYS_NOTIFY_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @defgroup sys_notify_apis Asynchronous Notification APIs + * @ingroup kernel_apis + * @{ + */ + +/* Forward declaration */ +struct sys_notify; + +/* + * Flag value that overwrites the method field when the operation has + * completed. + */ +#define SYS_NOTIFY_METHOD_COMPLETED 0 + +/* + * Indicates that no notification will be provided. + * + * Callers must check for completions using + * sys_notify_fetch_result(). + * + * See sys_notify_init_spinwait(). + */ +#define SYS_NOTIFY_METHOD_SPINWAIT 1 + +/* + * Select notification through @ref k_poll signal + * + * See sys_notify_init_signal(). + */ +#define SYS_NOTIFY_METHOD_SIGNAL 2 + +/* + * Select notification through a user-provided callback. + * + * See sys_notify_init_callback(). + */ +#define SYS_NOTIFY_METHOD_CALLBACK 3 + +#define SYS_NOTIFY_METHOD_MASK 0x03 +#define SYS_NOTIFY_METHOD_POS 0 + +/** + * @brief Identify the region of sys_notify flags available for + * containing services. + * + * Bits of the flags field of the sys_notify structure at and above + * this position may be used by extensions to the sys_notify + * structure. + * + * These bits are intended for use by containing service + * implementations to record client-specific information. The bits + * are cleared by sys_notify_validate(). Use of these does not + * imply that the flags field becomes public API. + */ +#define SYS_NOTIFY_EXTENSION_POS 2 + +/* + * Mask isolating the bits of sys_notify::flags that are available + * for extension. + */ +#define SYS_NOTIFY_EXTENSION_MASK (~BIT_MASK(SYS_NOTIFY_EXTENSION_POS)) + +/** + * @brief Generic signature used to notify of result completion by + * callback. + * + * Functions with this role may be invoked from any context including + * pre-kernel, ISR, or cooperative or pre-emptible threads. + * Compatible functions must be isr-ok and not sleep. + * + * Parameters that should generally be passed to such functions include: + * + * * a pointer to a specific client request structure, i.e. the one + * that contains the sys_notify structure. + * * the result of the operation, either as passed to + * sys_notify_finalize() or extracted afterwards using + * sys_notify_fetch_result(). Expected values are + * service-specific, but the value shall be non-negative if the + * operation succeeded, and negative if the operation failed. + */ +typedef void (*sys_notify_generic_callback)(); + +/** + * @brief State associated with notification for an asynchronous + * operation. + * + * Objects of this type are allocated by a client, which must use an + * initialization function (e.g. sys_notify_init_signal()) to + * configure them. Generally the structure is a member of a + * service-specific client structure, such as onoff_client. + * + * Control of the containing object transfers to the service provider + * when a pointer to the object is passed to a service function that + * is documented to take control of the object, such as + * onoff_service_request(). While the service provider controls the + * object the client must not change any object fields. Control + * reverts to the client: + * * if the call to the service API returns an error; + * * when operation completion is posted. This may occur before the + * call to the service API returns. + * + * Operation completion is technically posted when the flags field is + * updated so that sys_notify_fetch_result() returns success. This + * will happen before the signal is posted or callback is invoked. + * Note that although the manager will no longer reference the + * sys_notify object past this point, the containing object may have + * state that will be referenced within the callback. Where callbacks + * are used control of the containing object does not revert to the + * client until the callback has been invoked. (Re-use within the + * callback is explicitly permitted.) + * + * After control has reverted to the client the notify object must be + * reinitialized for the next operation. + * + * The content of this structure is not public API to clients: all + * configuration and inspection should be done with functions like + * sys_notify_init_callback() and sys_notify_fetch_result(). + * However, services that use this structure may access certain + * fields directly. + */ +struct sys_notify { + union method { + /* Pointer to signal used to notify client. + * + * The signal value corresponds to the res parameter + * of sys_notify_callback. + */ + struct k_poll_signal *signal; + + /* Generic callback function for callback notification. */ + sys_notify_generic_callback callback; + } method; + + /* + * Flags recording information about the operation. + * + * Bits below SYS_NOTIFY_EXTENSION_POS are initialized by + * async notify API init functions like + * sys_notify_init_callback(), and must not by modified by + * extensions or client code. + * + * Bits at and above SYS_NOTIFY_EXTENSION_POS are available + * for use by service extensions while the containing object + * is managed by the service. They are not for client use, + * are zeroed by the async notify API init functions, and will + * be zeroed by sys_notify_finalize(). + */ + u32_t volatile flags; + + /* + * The result of the operation. + * + * This is the value that was (or would be) passed to the + * async infrastructure. This field is the sole record of + * success or failure for spin-wait synchronous operations. + */ + int volatile result; +}; + +/** @internal */ +static inline u32_t sys_notify_get_method(const struct sys_notify *notify) +{ + u32_t method = notify->flags >> SYS_NOTIFY_METHOD_POS; + + return method & SYS_NOTIFY_METHOD_MASK; +} + +/** + * @brief Validate and initialize the notify structure. + * + * This should be invoked at the start of any service-specific + * configuration validation. It ensures that the basic asynchronous + * notification configuration is consistent, and clears the result. + * + * Note that this function does not validate extension bits (zeroed by + * async notify API init functions like sys_notify_init_callback()). + * It may fail to recognize that an uninitialized structure has been + * passed because only method bits of flags are tested against method + * settings. To reduce the chance of accepting an uninititalized + * operation service validation of structures that contain an + * sys_notify instance should confirm that the extension bits are + * set or cleared as expected. + * + * @retval 0 on successful validation and reinitialization + * @retval -EINVAL if the configuration is not valid. + */ +int sys_notify_validate(struct sys_notify *notify); + +/** + * @brief Record and signal the operation completion. + * + * @param notify pointer to the notification state structure. + * + * @param res the result of the operation. Expected values are + * service-specific, but the value shall be non-negative if the + * operation succeeded, and negative if the operation failed. + * + * @return If the notification is to be done by callback this returns + * the generic version of the function to be invoked. The caller must + * immediately invoke that function with whatever arguments are + * expected by the callback. If notification is by spin-wait or + * signal, the notification has been completed by the point this + * function returns, and a null pointer is returned. + */ +sys_notify_generic_callback sys_notify_finalize(struct sys_notify *notify, + int res); + +/** + * @brief Check for and read the result of an asynchronous operation. + * + * @param notify pointer to the object used to specify asynchronous + * function behavior and store completion information. + * + * @param result pointer to storage for the result of the operation. + * The result is stored only if the operation has completed. + * + * @retval 0 if the operation has completed. + * @retval -EAGAIN if the operation has not completed. + */ +static inline int sys_notify_fetch_result(const struct sys_notify *notify, + int *result) +{ + __ASSERT_NO_MSG(notify != NULL); + __ASSERT_NO_MSG(result != NULL); + int rv = -EAGAIN; + + if (sys_notify_get_method(notify) == SYS_NOTIFY_METHOD_COMPLETED) { + rv = 0; + *result = notify->result; + } + return rv; +} + +/** + * @brief Initialize a notify object for spin-wait notification. + * + * Clients that use this initialization receive no asynchronous + * notification, and instead must periodically check for completion + * using sys_notify_fetch_result(). + * + * On completion of the operation the client object must be + * reinitialized before it can be re-used. + * + * @param notify pointer to the notification configuration object. + */ +static inline void sys_notify_init_spinwait(struct sys_notify *notify) +{ + __ASSERT_NO_MSG(notify != NULL); + + *notify = (struct sys_notify){ + .flags = SYS_NOTIFY_METHOD_SPINWAIT, + }; +} + +/** + * @brief Initialize a notify object for (k_poll) signal notification. + * + * Clients that use this initialization will be notified of the + * completion of operations through the provided signal. + * + * On completion of the operation the client object must be + * reinitialized before it can be re-used. + * + * @note + * @rst + * This capability is available only when :option:`CONFIG_POLL` is + * selected. + * @endrst + * + * @param notify pointer to the notification configuration object. + * + * @param sigp pointer to the signal to use for notification. The + * value must not be null. The signal must be reset before the client + * object is passed to the on-off service API. + */ +static inline void sys_notify_init_signal(struct sys_notify *notify, + struct k_poll_signal *sigp) +{ + __ASSERT_NO_MSG(notify != NULL); + __ASSERT_NO_MSG(sigp != NULL); + + *notify = (struct sys_notify){ + .method = { + .signal = sigp, + }, + .flags = SYS_NOTIFY_METHOD_SIGNAL, + }; +} + +/** + * @brief Initialize a notify object for callback notification. + * + * Clients that use this initialization will be notified of the + * completion of operations through the provided callback. Note that + * callbacks may be invoked from various contexts depending on the + * specific service; see @ref sys_notify_generic_callback. + * + * On completion of the operation the client object must be + * reinitialized before it can be re-used. + * + * @param notify pointer to the notification configuration object. + * + * @param handler a function pointer to use for notification. + */ +static inline void sys_notify_init_callback(struct sys_notify *notify, + sys_notify_generic_callback handler) +{ + __ASSERT_NO_MSG(notify != NULL); + __ASSERT_NO_MSG(handler != NULL); + + *notify = (struct sys_notify){ + .method = { + .callback = handler, + }, + .flags = SYS_NOTIFY_METHOD_CALLBACK, + }; +} + +/** + * @brief Detect whether a particular notification uses a callback. + * + * The generic handler does not capture the signature expected by the + * callback, and the translation to a service-specific callback must + * be provided by the service. This check allows abstracted services + * to reject callback notification requests when the service doesn't + * provide a translation function. + * + * @return true if and only if a callback is to be used for notification. + */ +static inline bool sys_notify_uses_callback(const struct sys_notify *notify) +{ + __ASSERT_NO_MSG(notify != NULL); + + return sys_notify_get_method(notify) == SYS_NOTIFY_METHOD_CALLBACK; +} + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_SYS_NOTIFY_H_ */ diff --git a/include/sys/onoff.h b/include/sys/onoff.h index d8629d6a549aa..1715212da4798 100644 --- a/include/sys/onoff.h +++ b/include/sys/onoff.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA * * SPDX-License-Identifier: Apache-2.0 */ @@ -9,72 +10,115 @@ #include #include +#include #ifdef __cplusplus extern "C" { #endif /** - * @defgroup resource_mgmt_apis Resource Management APIs + * @defgroup resource_mgmt_onoff_apis On-Off Service APIs * @ingroup kernel_apis * @{ */ /** - * @brief Flag fields used to specify on-off service behavior. + * @brief Flag used in struct onoff_manager_transitions. + * + * When provided this indicates the start transition function + * may cause the calling thread to wait. This blocks attempts + * to initiate a transition from a non-thread context. */ -enum onoff_service_flags { - /** - * @brief Flag passed to onoff_service_init(). - * - * When provided this indicates the start transition function - * may cause the calling thread to wait. This blocks attempts - * to initiate a transition from a non-thread context. - */ - ONOFF_SERVICE_START_SLEEPS = BIT(0), +#define ONOFF_START_SLEEPS BIT(0) - /** - * @brief Flag passed to onoff_service_init(). - * - * As with @ref ONOFF_SERVICE_START_SLEEPS but describing the - * stop transition function. - */ - ONOFF_SERVICE_STOP_SLEEPS = BIT(1), +/** + * @brief Flag used in struct onoff_manager_transitions. + * + * As with @ref ONOFF_START_SLEEPS but describing the stop + * transition function. + */ +#define ONOFF_STOP_SLEEPS BIT(1) - /** - * @brief Flag passed to onoff_service_init(). - * - * As with @ref ONOFF_SERVICE_START_SLEEPS but describing the - * reset transition function. - */ - ONOFF_SERVICE_RESET_SLEEPS = BIT(2), +/** + * @brief Flag used in struct onoff_manager_transitions. + * + * As with @ref ONOFF_START_SLEEPS but describing the reset + * transition function. + */ +#define ONOFF_RESET_SLEEPS BIT(2) - /* Internal use. */ - ONOFF_SERVICE_HAS_ERROR = BIT(3), +/** + * @brief Flag indicating an error state. + * + * Error states are cleared using onoff_reset(). + */ +#define ONOFF_HAS_ERROR BIT(3) - /* This and higher bits reserved for internal use. */ - ONOFF_SERVICE_INTERNAL_BASE = BIT(4), -}; +/** @internal */ +#define ONOFF_FLAG_ONOFF BIT(4) +/** @internal */ +#define ONOFF_FLAG_TRANSITION BIT(5) -/* Forward declaration */ -struct onoff_service; +/** + * @brief Mask used to isolate bits defining the service state. + * + * Mask a value with this then test for ONOFF_HAS_ERROR to determine + * whether the machine has an unfixed error, or compare against + * ONOFF_STATE_ON, ONOFF_STATE_OFF, ONOFF_STATE_TO_ON, or + * ONOFF_STATE_TO_OFF. + */ +#define ONOFF_STATE_MASK (ONOFF_HAS_ERROR \ + | ONOFF_FLAG_TRANSITION | ONOFF_FLAG_ONOFF) /** - * @brief Signature used to notify an on-off service that a transition + * @brief Value exposed by ONOFF_STATE_MASK when service is off. + */ +#define ONOFF_STATE_OFF 0 + +/** + * @brief Value exposed by ONOFF_STATE_MASK when service is on. + */ +#define ONOFF_STATE_ON ONOFF_FLAG_ONOFF + +/** + * @brief Value exposed by ONOFF_STATE_MASK when service is + * transitioning to on. + */ +#define ONOFF_STATE_TO_ON (ONOFF_FLAG_TRANSITION | ONOFF_STATE_ON) + +/** + * @brief Value exposed by ONOFF_STATE_MASK when service is + * transitioning to off. + */ +#define ONOFF_STATE_TO_OFF (ONOFF_FLAG_TRANSITION | ONOFF_STATE_OFF) + +#define ONOFF_SERVICE_START_SLEEPS __DEPRECATED_MACRO ONOFF_START_SLEEPS +#define ONOFF_SERVICE_STOP_SLEEPS __DEPRECATED_MACRO ONOFF_STOP_SLEEPS +#define ONOFF_SERVICE_RESET_SLEEPS __DEPRECATED_MACRO ONOFF_RESET_SLEEPS +#define ONOFF_SERVICE_HAS_ERROR __DEPRECATED_MACRO ONOFF_HAS_ERROR +#define ONOFF_SERVICE_INTERNAL_BASE __DEPRECATED_MACRO ONOFF_INTERNAL_BASE + +/* Forward declarations */ +struct onoff_manager; +struct onoff_monitor; +struct onoff_notifier; + +/** + * @brief Signature used to notify an on-off manager that a transition * has completed. * * Functions of this type are passed to service-specific transition * functions to be used to report the completion of the operation. * The functions may be invoked from any context. * - * @param srv the service for which transition was requested. + * @param mgr the manager for which transition was requested. * * @param res the result of the transition. This shall be * non-negative on success, or a negative error code. If an error is * indicated the service shall enter an error state. */ -typedef void (*onoff_service_notify_fn)(struct onoff_service *srv, - int res); +typedef void (*onoff_notify_fn)(struct onoff_manager *mgr, + int res); /** * @brief Signature used by service implementations to effect a @@ -88,10 +132,10 @@ typedef void (*onoff_service_notify_fn)(struct onoff_service *srv, * * The stop function will be called only from the on state. * - * The reset function may be called only when - * onoff_service_has_error() returns true. + * The reset function may be called only when onoff_has_error() + * returns true. * - * @param srv the service for which transition was requested. + * @param mgr the manager for which transition was requested. * * @param notify the function to be invoked when the transition has * completed. The callee shall capture this parameter to notify on @@ -99,40 +143,48 @@ typedef void (*onoff_service_notify_fn)(struct onoff_service *srv, * asynchronous, notify shall be invoked before the transition * function returns. */ -typedef void (*onoff_service_transition_fn)(struct onoff_service *srv, - onoff_service_notify_fn notify); +typedef void (*onoff_transition_fn)(struct onoff_manager *mgr, + onoff_notify_fn notify); + +/** @brief On-off service transition functions. */ +struct onoff_transitions { + /* Function to invoke to transition the service to on. */ + onoff_transition_fn start; + + /* Function to invoke to transition the service to off. */ + onoff_transition_fn stop; + + /* Function to force the service state to reset, where supported. */ + onoff_transition_fn reset; + + /* Flags identifying transition function capabilities. */ + u8_t flags; +}; /** - * @brief State associated with an on-off service. + * @brief State associated with an on-off manager. * * No fields in this structure are intended for use by service * providers or clients. The state is to be initialized once, using - * onoff_service_init(), when the service provider is initialized. - * In case of error it may be reset through the - * onoff_service_reset() API. + * onoff_manager_init(), when the service provider is initialized. In + * case of error it may be reset through the onoff_reset() API. */ -struct onoff_service { - /* List of clients waiting for completion of reset or - * transition to on. +struct onoff_manager { + /* List of clients waiting for transition or reset completion + * notifications. */ sys_slist_t clients; - /* Function to invoke to transition the service to on. */ - onoff_service_transition_fn start; - - /* Function to invoke to transition the service to off. */ - onoff_service_transition_fn stop; - - /* Function to force the service state to reset, where - * supported. + /* List of monitors to be notified of state changes including + * errors and transition completion. */ - onoff_service_transition_fn reset; + sys_slist_t monitors; - /* Mutex protection for flags, clients, releaser, and refs. */ - struct k_spinlock lock; + /* Transition functions. */ + const struct onoff_transitions *transitions; - /* Client to be informed when transition to off completes. */ - struct onoff_client *releaser; + /* Mutex protection for flags, clients, monitors, and refs. */ + struct k_spinlock lock; /* Flags identifying the service state. */ u16_t flags; @@ -141,14 +193,39 @@ struct onoff_service { u16_t refs; }; +/** @brief Initializer for a onoff_transitions object. + * + * @param _start a function used to transition from off to on state. + * + * @param _stop a function used to transition from on to off state. + * + * @param _reset a function used to clear errors and force the service to an off + * state. Can be null. + * + * @param _flags any or all of the flags from enum onoff_manager_flags. + */ +#define ONOFF_TRANSITIONS_INITIALIZER(_start, _stop, _reset, _flags) { \ + .start = _start, \ + .stop = _stop, \ + .reset = _reset, \ + .flags = _flags, \ +} + +#define ONOFF_SERVICE_TRANSITIONS_INITIALIZER(_start, _stop, _reset, _flags) \ + __DEPRECATED_MACRO \ + ONOFF_TRANSISTIONS_INITIALIZER(_start, _stop, _reset, _flags) + + /** @internal */ -#define ONOFF_SERVICE_INITIALIZER(_start, _stop, _reset, _flags) { \ - .start = _start, \ - .stop = _stop, \ - .reset = _reset, \ - .flags = _flags, \ +#define ONOFF_MANAGER_INITIALIZER(_transitions) { \ + .transitions = _transitions, \ + .flags = (_transitions)->flags, \ } +#define ONOFF_SERVICE_INITIALIZER(_transitions) \ + __DEPRECATED_MACRO \ + ONOFF_MANAGER_INITIALIZER(_transitions) + /** * @brief Initialize an on-off service to off state. * @@ -158,72 +235,16 @@ struct onoff_service { * * This function should never be invoked by clients of an on-off service. * - * @param srv the service definition object to be initialized. - * - * @param start the function used to (initiate a) transition from off - * to on. This must not be null. Include @ref ONOFF_SERVICE_START_SLEEPS as - * appropriate in flags. + * @param mgr the manager definition object to be initialized. * - * @param stop the function used to (initiate a) transition from on to - * off. This must not be null. Include @ref ONOFF_SERVICE_STOP_SLEEPS - * as appropriate in flags. - * - * @param reset the function used to clear errors and force the - * service to an off state. Pass null if the service cannot or need - * not be reset. (Services where a transition operation can complete - * with an error notification should support the reset operation.) - * Include @ref ONOFF_SERVICE_RESET_SLEEPS as appropriate in flags. - * - * @param flags any or all of the flags mentioned above, - * e.g. @ref ONOFF_SERVICE_START_SLEEPS. Use of other flags produces an - * error. + * @param transitions A structure with transition functions. Structure must be + * persistent as it is used by the service. * * @retval 0 on success * @retval -EINVAL if start, stop, or flags are invalid */ -int onoff_service_init(struct onoff_service *srv, - onoff_service_transition_fn start, - onoff_service_transition_fn stop, - onoff_service_transition_fn reset, - u32_t flags); - -/** @internal - * - * Flag fields used to specify on-off client behavior. - * - * These flags control whether calls to onoff_service_request() and - * onoff_service_release() are synchronous or asynchronous, and for - * asynchronous operations how the operation result is communicated to - * the client. - */ -enum onoff_client_flags { - /* Known-invalid field, used in validation */ - ONOFF_CLIENT_NOTIFY_INVALID = 0, - - /* - * Indicates that no notification will be provided. - * - * Callers must check for completions using - * onoff_client_fetch_result(). - * - * See onoff_client_init_spinwait(). - */ - ONOFF_CLIENT_NOTIFY_SPINWAIT = 1, - - /* - * Select notification through @ref k_poll signal - * - * See onoff_client_init_signal(). - */ - ONOFF_CLIENT_NOTIFY_SIGNAL = 2, - - /** - * Select notification through a user-provided callback. - * - * See onoff_client_init_callback(). - */ - ONOFF_CLIENT_NOTIFY_CALLBACK = 3, -}; +int onoff_manager_init(struct onoff_manager *mgr, + const struct onoff_transitions *transitions); /* Forward declaration */ struct onoff_client; @@ -236,22 +257,30 @@ struct onoff_client; * pre-kernel, ISR, or cooperative or pre-emptible threads. * Compatible functions must be isr-callable and non-suspendable. * - * @param srv the service for which the operation was initiated. + * @param mgr the manager for which the operation was initiated. * * @param cli the client structure passed to the function that * initiated the operation. * - * @param user_data user data provided when the client structure was - * initialized with onoff_client_init_callback(). + * @param state the state of the machine at the time of completion, + * restricted by ONOFF_STATE_MASK. ONOFF_HAS_ERROR must be checked + * independently of whether res is negative as a machine error may + * indicate that all future operations except onoff_reset() will fail. * * @param res the result of the operation. Expected values are * service-specific, but the value shall be non-negative if the - * operation succeeded, and negative if the operation failed. + * operation succeeded, and negative if the operation failed. If res + * is negative ONOFF_HAS_ERROR will be set in state, but if res is + * non-negative ONOFF_HAS_ERROR may still be set in state. + * + * @param user_data user data provided when the client structure was + * initialized with onoff_client_init_callback(). */ -typedef void (*onoff_client_callback)(struct onoff_service *srv, +typedef void (*onoff_client_callback)(struct onoff_manager *mgr, struct onoff_client *cli, - void *user_data, - int res); + u32_t state, + int res, + void *user_data); /** * @brief State associated with a client of an on-off service. @@ -264,11 +293,8 @@ typedef void (*onoff_client_callback)(struct onoff_service *srv, * when a pointer to the object is passed to any on-off service * function. While the service provider controls the object the * client must not change any object fields. Control reverts to the - * client: - * * if the call to the service API returns an error; - * * if the call to the service API succeeds for a no-wait operation; - * * when operation completion is posted (signalled or callback - * invoked). + * client concurrent with release of the owned sys_notify structure, + * or when indicated by an onoff_cancel() return value. * * After control has reverted to the client the state object must be * reinitialized for the next operation. @@ -281,34 +307,32 @@ struct onoff_client { /* Links the client into the set of waiting service users. */ sys_snode_t node; - union async { - /* Pointer to signal used to notify client. - * - * The signal value corresponds to the res parameter - * of onoff_client_callback. - */ - struct k_poll_signal *signal; - - /* Handler and argument for callback notification. */ - struct callback { - onoff_client_callback handler; - void *user_data; - } callback; - } async; - - /* - * The result of the operation. - * - * This is the value that was (or would be) passed to the - * async infrastructure. This field is the sole record of - * success or failure for no-wait synchronous operations. - */ - int volatile result; + /* Notification configuration. */ + struct sys_notify notify; - /* Flags recording client state. */ - u32_t volatile flags; + /* User data for callback-based notification. */ + void *user_data; }; +/** @internal */ +#define ONOFF_CLIENT_TYPE_POS SYS_NOTIFY_EXTENSION_POS +/** @internal */ +#define ONOFF_CLIENT_TYPE_BITS 2U +/** + * @brief Identify region of sys_notify flags available for + * containing services. + * + * Bits of the flags field of the sys_notify structure contained + * within the queued_operation structure at and above this position + * may be used by extensions to the onoff_client structure. + * + * These bits are intended for use by containing service + * implementations to record client-specific information. Use of + * these does not imply that the flags field becomes public API. + */ +#define ONOFF_CLIENT_EXTENSION_POS (SYS_NOTIFY_EXTENSION_POS \ + + ONOFF_CLIENT_TYPE_BITS) + /** * @brief Check for and read the result of an asynchronous operation. * @@ -325,15 +349,8 @@ static inline int onoff_client_fetch_result(const struct onoff_client *op, int *result) { __ASSERT_NO_MSG(op != NULL); - __ASSERT_NO_MSG(result != NULL); - - int rv = -EAGAIN; - if (op->flags == 0U) { - rv = 0; - *result = op->result; - } - return rv; + return sys_notify_fetch_result(&op->notify, result); } /** @@ -353,9 +370,8 @@ static inline void onoff_client_init_spinwait(struct onoff_client *cli) { __ASSERT_NO_MSG(cli != NULL); - *cli = (struct onoff_client){ - .flags = ONOFF_CLIENT_NOTIFY_SPINWAIT, - }; + *cli = (struct onoff_client){}; + sys_notify_init_spinwait(&cli->notify); } /** @@ -385,16 +401,9 @@ static inline void onoff_client_init_signal(struct onoff_client *cli, struct k_poll_signal *sigp) { __ASSERT_NO_MSG(cli != NULL); - __ASSERT_NO_MSG(sigp != NULL); - *cli = (struct onoff_client){ -#ifdef CONFIG_POLL - .async = { - .signal = sigp, - }, -#endif /* CONFIG_POLL */ - .flags = ONOFF_CLIENT_NOTIFY_SIGNAL, - }; + *cli = (struct onoff_client){}; + sys_notify_init_signal(&cli->notify, sigp); } /** @@ -425,14 +434,9 @@ static inline void onoff_client_init_callback(struct onoff_client *cli, __ASSERT_NO_MSG(handler != NULL); *cli = (struct onoff_client){ - .async = { - .callback = { - .handler = handler, - .user_data = user_data, - }, - }, - .flags = ONOFF_CLIENT_NOTIFY_CALLBACK, + .user_data = user_data, }; + sys_notify_init_callback(&cli->notify, handler); } /** @@ -456,7 +460,7 @@ static inline void onoff_client_init_callback(struct onoff_client *cli, * transition to on can sleep, the transition cannot be started and * the request will fail with `-EWOULDBLOCK`. * - * @param srv the service that will be used. + * @param mgr the manager that will be used. * * @param cli a non-null pointer to client state providing * instructions on synchronous expectations and how to notify the @@ -472,7 +476,7 @@ static inline void onoff_client_init_callback(struct onoff_client *cli, * context and successful initiation could result in an attempt to * make the calling thread sleep. */ -int onoff_request(struct onoff_service *srv, +int onoff_request(struct onoff_manager *mgr, struct onoff_client *cli); /** @@ -488,7 +492,7 @@ int onoff_request(struct onoff_service *srv, * actual release fails. Always check the operation completion * result. * - * @param srv the service that will be used. + * @param mgr the manager that will be used. * * @param cli a non-null pointer to client state providing * instructions on how to notify the client when release completes. @@ -504,7 +508,7 @@ int onoff_request(struct onoff_service *srv, * to off * @retval -EBUSY if the service is transitioning to on */ -int onoff_release(struct onoff_service *srv, +int onoff_release(struct onoff_manager *mgr, struct onoff_client *cli); /** @@ -512,13 +516,17 @@ int onoff_release(struct onoff_service *srv, * * This function can be used to determine whether the service has * recorded an error. Errors may be cleared by invoking - * onoff_service_reset(). + * onoff_reset(). + * + * This is an unlocked convenience function suitable for use only when + * it is known that no other process might invoke an operation that + * transitions the service between an error and non-error state. * * @return true if and only if the service has an uncleared error. */ -static inline bool onoff_service_has_error(const struct onoff_service *srv) +static inline bool onoff_has_error(const struct onoff_manager *mgr) { - return (srv->flags & ONOFF_SERVICE_HAS_ERROR) != 0; + return (mgr->flags & ONOFF_HAS_ERROR) != 0; } /** @@ -526,7 +534,7 @@ static inline bool onoff_service_has_error(const struct onoff_service *srv) * state. * * A service can only be reset when it is in an error state as - * indicated by onoff_service_has_error(). + * indicated by onoff_has_error(). * * The return value indicates the success or failure of an attempt to * initiate an operation to reset the resource. If initiation of the @@ -538,15 +546,12 @@ static inline bool onoff_service_has_error(const struct onoff_service *srv) * Note that the call to this function may succeed in a case where the * actual reset fails. Always check the operation completion result. * - * This function is blocking if the reset transition is blocking, - * unless client notification specifies no-wait. - * * @note Due to the conditions on state transition all incomplete * asynchronous operations will have been informed of the error when * it occurred. There need be no concern about dangling requests left * after a reset completes. * - * @param srv the service to be reset. + * @param mgr the manager to be reset. * * @param cli pointer to client state, including instructions on how * to notify the client when reset completes. Behavior is undefined @@ -554,12 +559,12 @@ static inline bool onoff_service_has_error(const struct onoff_service *srv) * operation. * * @retval 0 on success - * @retval -ENOTSUP if reset is not supported - * @retval -EINVAL if the parameters are invalid, or if the service - * @retval -EALREADY if the service does not have a recorded error + * @retval -ENOTSUP if reset is not supported by the service. + * @retval -EINVAL if the parameters are invalid. + * @retval -EALREADY if the service does not have a recorded error. */ -int onoff_service_reset(struct onoff_service *srv, - struct onoff_client *cli); +int onoff_reset(struct onoff_manager *mgr, + struct onoff_client *cli); /** * @brief Attempt to cancel an in-progress client operation. @@ -568,44 +573,277 @@ int onoff_service_reset(struct onoff_service *srv, * shut down before the operation has completed. For example, when a * request was made and the need is no longer present. * - * There is limited support for cancelling an in-progress operation: - * * If a start or reset is in progress, all but one clients - * requesting the start can cancel their request. - * * If a stop is in progress, all clients requesting a restart can - * cancel their request; - * * A client requesting a release cannot cancel the release. + * In-progress transitions on behalf of a specific client can be + * cancelled, with the following behavior: + * + * * Clients with an incomplete request or reset can immediately + * cancel it regardless of whether a start, stop, or reset is in + * progress. + * * Clients with an incomplete release can cancel it, and the client + * structure will be converted to a request operation that will + * force a transition to on as soon as the transition to off + * completes. + * + * @warning It is possible for the synthesized request to complete + * notification before execution returns to the caller informing it + * that the release was converted to a request. Consequently + * clients that choose to cancel release operations must take care + * to inspect either the state parameter to a callback notifier, or + * to use the onoff_monitor infrastructure to be informed + * synchronously with each state change, to correctly associate the + * completion result with a request or release operation. * * Be aware that any transition that was initiated on behalf of the - * client will continue to progress to completion. The restricted - * support for cancellation ensures that for any in-progress - * transition there will always be at least one client that will be - * notified when the operation completes. + * client will continue to progress to completion: it is only + * notification of transition completion that may be eliminated. If + * there are no active requests when a transition to on completes the + * manager will initiate a transition to off. If there are pending + * requests when a transition to off completes the manager will + * initiate a transition to on. * - * If the cancellation fails the service retains control of the client - * object, and the client must wait for operation completion. + * The onoff_notifier infrastructure provides a wrapper API for + * anonymous clients to register requests and releases without + * directly maintaining state related to in-progress transitions. + * This can be used to work around the complexities that result from + * trying to cancel an in-progress transition. * - * @param srv the service for which an operation is to be cancelled. + * @param mgr the manager for which an operation is to be cancelled. * * @param cli a pointer to the same client state that was provided - * when the operation to be cancelled was issued. If the cancellation - * is successful the client will be notified of operation completion - * with a result of `-ECANCELED`. + * when the operation to be cancelled was issued. * * @retval 0 if the cancellation was completed before the client could - * be notified. The client will be notified through cli with an - * operation completion of `-ECANCELED`. + * be notified. The cancellation succeeds and control of the cli + * structure returns to the client without any completion notification. + * @retval 1 in the case where a pending release was cancelled before + * the service transitioned to off. In this case the manager retains + * control of the client structure, which is converted to a request, + * and completion will be notified when the manager completes a + * transition back to on. * @retval -EINVAL if the parameters are invalid. - * @retval -EWOULDBLOCK if cancellation was rejected because the - * client is the only waiter for an in-progress transition. The - * service retains control of the client structure. * @retval -EALREADY if cli was not a record of an uncompleted * notification at the time the cancellation was processed. This * likely indicates that the operation and client notification had * already completed. */ -int onoff_cancel(struct onoff_service *srv, +int onoff_cancel(struct onoff_manager *mgr, struct onoff_client *cli); +/** + * @brief Signature used to notify a monitor of an onoff service of + * errors or completion of a state transition. + * + * This is similar to onoff_client_callback but provides information + * about all transitions, not just ones associated with a specific + * client. Monitor callbacks are invoked before any completion + * notifications associated with the state change are made. + * + * These functions may be invoked from any context including + * pre-kernel, ISR, or cooperative or pre-emptible threads. + * Compatible functions must be isr-callable and non-suspendable. + * + * The callback is permitted to unregister itself from the manager, + * but must not register or unregister any other monitors. + * + * @param mgr the manager for which a transition has completed. + * + * @param mon the monitor instance through which this notification + * arrived. + * + * @param state the state of the machine at the time of completion, + * restricted by ONOFF_STATE_MASK. This includes the ONOFF_HAS_ERROR + * flag as well as all four non-error state: ONOFF_STATE_OFF, + * ONOFF_STATE_TO_ON, ONOFF_STATE_ON, ONOFF_STATE_TO_OFF. + * + * @param res the result of the operation. Expected values are + * service- and state-specific, but the value shall be non-negative if + * the operation succeeded, and negative if the operation failed. + */ +typedef void (*onoff_monitor_callback)(struct onoff_manager *mgr, + struct onoff_monitor *mon, + u32_t state, + int res); + +/** + * @brief Registration state for notifications of onoff service + * transitions. + * + * Any given onoff_monitor structure can be associated with at most + * one onoff_manager instance. + */ +struct onoff_monitor { + /* Links the client into the set of waiting service users. */ + sys_snode_t node; + + /* Callback to be invoked on state change. */ + onoff_monitor_callback callback; +}; + +/** + * @brief Add a monitor of state changes for a manager. + * + * @param mgr the manager for which a state changes are to be monitored. + * + * @param mon a linkable node providing the callback to be invoked on + * state changes. + * + * @return non-negative on successful addition, or a negative error + * code. + */ +int onoff_monitor_register(struct onoff_manager *mgr, + struct onoff_monitor *mon); + +/** + * @brief Remove a monitor of state changes from a manager. + * + * @param mgr the manager for which a state changes are to be monitored. + * + * @param mon a linkable node providing the callback to be invoked on + * state changes. + * + * @return non-negative on successful addition, or a negative error + * code. + */ +int onoff_monitor_unregister(struct onoff_manager *mgr, + struct onoff_monitor *mon); + +/** + * @brief Callback for an event-based API for onoff request/release + * completions. + * + * Transition to off (for the client) is indicated by a zero value. + * Transition to on (for the client) is indicated by a positive value. + * A negative value indicates an error in the onoff service state. + * + * If the client issues requests or releases before transitions are + * incomplete the notifier will be invoked only when the final target + * state is reached with no changes pending. In this case a series of + * notifications may repeat the previous notification value, rather + * than alternating between on and off. + * + * @param np pointer to the state object that manages on/off + * transitions for a client that prefers event-based notifications + * rather than separate request/release operations. + * + * @param status the status of the service at the time of the + * callback: 1 if it's on, 0 if it's not on (including transitioning + * to or from on). A negative status value is passed if the + * underlying service has indicated an error; in this case + * onoff_notifier_reset() may need to be invoked to restore + * functionality. + */ +typedef void (*onoff_notifier_callback)(struct onoff_notifier *np, + int status); + +/** + * @brief Convert an onoff manager async API to an event-based API. + * + * In some use cases it's inconvenient for a client of an onoff + * service to manage request and release states itself. An example is + * a case where the client no longer needs the service and wants to + * shut down without waiting for the stop to complete. + * + * An onoff notifier wraps an onoff service, providing state-free + * request and release methods and holding a callback that is invoked + * whenever the state change for the individual client has completed. + * + * Note that this does not track the state of the service as a whole: + * only the state of the client-specific requests and releases. + * Specifically, while notification of a transition to on confirms + * that the underlying service is on, notification of a transition to + * off does not indicate the state of the underlying service, only + * that this client no longer has a demand for the service. + */ +struct onoff_notifier { + /* Pointer to the underlying onoff service. */ + struct onoff_manager *onoff; + + /* Callback use to notify of transition to stable state. */ + onoff_notifier_callback callback; + + /* Protects changes to state. */ + struct k_spinlock lock; + + /* Client structure used to communicate with onoff service. */ + struct onoff_client cli; + + /* Internal state of the notifier. */ + u32_t volatile state; + + /* The completion value for the last onoff operation. */ + int onoff_result; +}; + +/** @brief Initializer for a onoff_notifier object. + * + * @param _onoff a pointer to the onoff_manager to use for requests + * and releases + * + * @param _callback the onoff_notifier_callback function pointer to be + * invoked to inform the client of state changes. + */ +#define ONOFF_NOTIFIER_INITIALIZER(_onoff, _callback) { \ + .onoff = _onoff, \ + .callback = _callback, \ +} \ + +/** + * @brief Inform an onoff service that this notifier client needs the + * service. + * + * If the underlying service is already on a positive value is + * returned, and the notifier callback is not invoked. If zero is + * returned the notifier's callback will be invoked as soon as the + * service completes a transition to on, which may be before the call + * to this function returns. + * + * @param np pointer to the notifier state. + * + * @retval 0 if the request was successful but the service is not yet on. + * @retval positive if the request was successful and the service is stable on. + * @retval -EIO if the notifier is in an error state. + * @retval -EALREADY if the client already in a state heading toward on. + * @retval -EWOULDBLOCK if the client is in the process of resetting + * from an error state. + * @retval other negative values indicate an error from the onoff manager. + */ +int onoff_notifier_request(struct onoff_notifier *np); + +/** + * @brief Inform the onoff service that this notifier client no longer + * needs the service. + * + * If the underlying service is already off a positive value is + * returned. If zero is returned the notifier's callback will be + * invoked as soon as this client's request for the service to turn + * off completes, which may be before the call to this function + * returns. + * + * @param np pointer to the notifier state. + * + * @retval 0 if the request was successful. + * @retval -EIO if the notifier is in an error state + * @retval -EALREADY if the client is already in a state heading toward off. + * @retval -EWOULDBLOCK if the client is in the process of resetting + * from an error state. + * @retval other negative values indicate an error from the onoff manager. + */ +int onoff_notifier_release(struct onoff_notifier *np); + +/** + * @brief Clear an error state from an onoff notifier. + * + * This function should be invoked if the notifier has been passed a + * negative error code. On successful invocation the notifier + * callback will be invoked to indicate reaching the stable off state, + * or with an error indicating why the reset could not be performed. + * + * @retval 0 if the request was successful. + * @retval -EALREADY if the notifier does not have an error state + * recorded. The notifier callback will not be invoked. + */ +int onoff_notifier_reset(struct onoff_notifier *np); + /** @} */ #ifdef __cplusplus diff --git a/include/sys/queued_operation.h b/include/sys/queued_operation.h new file mode 100644 index 0000000000000..559ae47ff19d8 --- /dev/null +++ b/include/sys/queued_operation.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_SYS_QUEUED_OPERATION_H_ +#define ZEPHYR_INCLUDE_SYS_QUEUED_OPERATION_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Forward declaration */ +struct queued_operation; +struct queued_operation_manager; + +/** + * @defgroup resource_mgmt_queued_operation_apis Queued Operation APIs + * @ingroup kernel_apis + * @{ + */ + +/** @internal */ +#define QUEUED_OPERATION_PRIORITY_POS SYS_NOTIFY_EXTENSION_POS +/** @internal */ +#define QUEUED_OPERATION_PRIORITY_BITS 8U +/** @internal */ +#define QUEUED_OPERATION_PRIORITY_MASK BIT_MASK(QUEUED_OPERATION_PRIORITY_BITS) + +/** + * @brief Special priority value to indicate operation should be + * placed last in current queue. + * + * This is like providing the lowest priority but uses a constant-time + * insertion and is FIFO. + */ +#define QUEUED_OPERATION_PRIORITY_APPEND \ + ((int)QUEUED_OPERATION_PRIORITY_MASK + 1) + +/** + * @brief Special priority value to indicate operation should be + * placed first in the current queue. + * + * This is like providing the highest priority but uses a + * constant-time insertion and is LIFO. + */ +#define QUEUED_OPERATION_PRIORITY_PREPEND \ + ((int)QUEUED_OPERATION_PRIORITY_MASK + 2) + +/** + * @brief Identify the region of sys_notify flags available for + * containing services. + * + * Bits of the flags field of the sys_notify structure contained + * within the queued_operation structure at and above this position + * may be used by extensions to the sys_notify structure. + * + * These bits are intended for use by containing service + * implementations to record client-specific information. The bits + * are cleared by sys_notify_validate(). Use of these does not + * imply that the flags field becomes public API. + */ +#define QUEUED_OPERATION_EXTENSION_POS \ + (QUEUED_OPERATION_PRIORITY_POS + QUEUED_OPERATION_PRIORITY_BITS) + +/** + * @brief Base object providing state for an operation. + * + * Instances of this should be members of a service-specific structure + * that provides the operation parameters. + */ +struct queued_operation { + /** @internal + * + * Links the operation into the operation queue. + */ + sys_snode_t node; + + /** + * @brief Notification configuration. + * + * This must be initialized using sys_notify_init_callback() + * or its sibling functions before an operation can be passed + * to queued_operation_submit(). + * + * The queued operation manager provides specific error codes + * for failures identified at the manager level: + * * -ENODEV indicates a failure in an onoff service. + */ + struct sys_notify notify; +}; + +/** + * @ brief Table of functions used by a queued operation manager. + */ +struct queued_operation_functions { + /** + * @brief Function used to verify an operation is well-defined. + * + * When provided this function is invoked by + * queued_operation_submit() to verify that the operation + * definition meets the expectations of the service. The + * operation is acceptable only if a non-negative value is + * returned. + * + * If not provided queued_operation_submit() will assume + * service-specific expectations are trivially satisfied, and + * will reject the operation only if sys_notify_validate() + * fails. Because that validation is limited services should + * at a minimum verify that the extension bits have the + * expected value (zero, when none are being used). + * + * @note The validate function must be isr-ok and not sleep. + * + * @param mgr the service that supports queued operations. + * + * @param op the operation being considered for suitability. + * + * @return the value to be returned from queued_operation_submit(). + */ + int (*validate)(struct queued_operation_manager *mgr, + struct queued_operation *op); + + /** + * @brief Function to transform a generic notification + * callback to its service-specific form. + * + * The implementation should cast cb to the proper signature + * for the service, and invoke the cast pointer with the + * appropriate arguments. + * + * @note The callback function must be isr-ok and not sleep. + * + * @param mgr the service that supports queued operations. + * + * @param op the operation that has been completed. + * + * @param cb the generic callback to invoke. + */ + void (*callback)(struct queued_operation_manager *mgr, + struct queued_operation *op, + sys_notify_generic_callback cb); + + /** + * @brief Function used to inform the manager of a new operation. + * + * This function can be called as a side effect of + * queued_operation_submit() or queued_operation_finalize() to + * tell the service that a new operation needs to be + * processed. + * + * Be aware that if processing is entirely + * synchronous--meaning queued_operation_finalize() can be + * invoked during process()--then the process() function will + * be invoked recursively, possibly with another operation. + * This can cause unbounded stack growth, and requires that + * process() be re-entrant. Generally the process() function + * should itself be async, with finalization done after + * process() returns. + * + * @note The process function must be isr-ok and not sleep. + * + * @param mgr the service that supports queued operations. + * + * @param op the operation that should be initiated. A null + * pointer is passed if there are no pending operations. + */ + void (*process)(struct queued_operation_manager *mgr, + struct queued_operation *op); +}; + +/** + * @brief State associated with a manager instance. + */ +struct queued_operation_manager { + /* Links the operation into the operation queue. */ + sys_slist_t operations; + + /* Pointer to the functions that support the manager. */ + const struct queued_operation_functions *vtable; + + /* Pointer to an on-off service supporting this service. NULL + * if service is always-on. + */ + struct onoff_manager *onoff; + + /* The state of on-off service requests. */ + struct onoff_client onoff_client; + + /* Lock controlling access to other fields. */ + struct k_spinlock lock; + + /* The operation that is being processed. */ + struct queued_operation *current; + + /* Information about the internal state of the manager. */ + u32_t volatile state; +}; + +#define QUEUED_OPERATION_MANAGER_INITIALIZER(_vtable, _onoff) { \ + .vtable = _vtable, \ + .onoff = _onoff, \ +} + +/** + * @brief Submit an operation to be processed when the service is + * available. + * + * The service process function will be invoked during this call if + * the service is available. + * + * @param mgr a generic pointer to the service instance + * + * @param op a generic pointer to an operation to be performed. The + * notify field in the provided operation must have been initialized + * before being submitted, even if the operation description is being + * re-used. This may be done directly with sys_notify API or by + * wrapping it in a service-specific operation init function. + * + * @param priority the priority of the operation relative to other + * operations. Numerically lower values are higher priority. Values + * outside the range of a signed 8-bit integer will be rejected, + * except for named priorities like QUEUED_OPERATION_PRIORITY_APPEND. + * + * @retval -ENOTSUP if callback notification is requested and the + * service does not provide a callback translation. This may also be + * returned due to service-specific validation. + * + * @retval -EINVAL if the passed priority is out of the range of + * supported priorities. This may also be returned due to + * service-specific validation. + * + * @return A negative value if the operation was rejected by service + * validation or due to other configuration errors. A non-negative + * value indicates the operation has been accepted for processing and + * completion notification will be provided. + */ +int queued_operation_submit(struct queued_operation_manager *mgr, + struct queued_operation *op, + int priority); + +/** + * @brief Helper to extract the result from a queued operation. + * + * This forwards to sys_notify_fetch_result(). + */ +static inline int queued_operation_fetch_result(const struct queued_operation *op, + int *result) +{ + return sys_notify_fetch_result(&op->notify, result); +} + +/** + * @brief Attempt to cancel a queued operation. + * + * Successful cancellation issues a completion notification with + * result -ECANCELED for the submitted operation before this function + * returns. + * + * @retval 0 if successfully cancelled. + * @retval -EINPROGRESS if op is currently being executed, so cannot + * be cancelled. + * @retval -EINVAL if op is neither being executed nor in the queue of + * pending operations + */ +int queued_operation_cancel(struct queued_operation_manager *mgr, + struct queued_operation *op); + +/** + * @brief Send the completion notification for a queued operation. + * + * This function must be invoked by services that support queued + * operations when the operation provided to them through the process + * function have been completed. It is not intended to be invoked by + * users of a service. + * + * @param mgr a generic pointer to the service instance + * @param res the result of the operation, as with + * sys_notify_finalize(). + */ +void queued_operation_finalize(struct queued_operation_manager *mgr, + int res); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_SYS_ASYNCNOTIFY_H_ */ diff --git a/lib/os/CMakeLists.txt b/lib/os/CMakeLists.txt index 7918f78dc6d72..47670c3d34278 100644 --- a/lib/os/CMakeLists.txt +++ b/lib/os/CMakeLists.txt @@ -11,8 +11,11 @@ zephyr_sources( fdtable.c hex.c mempool.c + notify.c printk.c onoff.c + onoff_notifier.c + queued_operation.c rb.c sem.c thread_entry.c diff --git a/lib/os/notify.c b/lib/os/notify.c new file mode 100644 index 0000000000000..ba5d2a3125f09 --- /dev/null +++ b/lib/os/notify.c @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +int sys_notify_validate(struct sys_notify *notify) +{ + int rv = 0; + + if (notify == NULL) { + return -EINVAL; + } + + /* Validate configuration based on mode */ + switch (sys_notify_get_method(notify)) { + case SYS_NOTIFY_METHOD_SPINWAIT: + break; + case SYS_NOTIFY_METHOD_CALLBACK: + if (notify->method.callback == NULL) { + rv = -EINVAL; + } + break; +#ifdef CONFIG_POLL + case SYS_NOTIFY_METHOD_SIGNAL: + if (notify->method.signal == NULL) { + rv = -EINVAL; + } + break; +#endif /* CONFIG_POLL */ + default: + rv = -EINVAL; + break; + } + + /* Clear the result here instead of in all callers. */ + if (rv == 0) { + notify->result = 0; + } + + return rv; +} + +sys_notify_generic_callback sys_notify_finalize(struct sys_notify *notify, + int res) +{ + struct k_poll_signal *sig = NULL; + sys_notify_generic_callback rv = 0; + u32_t method = sys_notify_get_method(notify); + + /* Store the result and capture secondary notification + * information. + */ + notify->result = res; + switch (method) { + case SYS_NOTIFY_METHOD_SPINWAIT: + break; + case SYS_NOTIFY_METHOD_CALLBACK: + rv = notify->method.callback; + break; + case SYS_NOTIFY_METHOD_SIGNAL: + sig = notify->method.signal; + break; + default: + __ASSERT_NO_MSG(false); + } + + /* Mark completion by clearing the flags field to the + * completed state, releasing any spin-waiters, then complete + * secondary notification. + */ + compiler_barrier(); + notify->flags = SYS_NOTIFY_METHOD_COMPLETED; + + if (IS_ENABLED(CONFIG_POLL) && (sig != NULL)) { + k_poll_signal_raise(sig, res); + } + + return rv; +} diff --git a/lib/os/onoff.c b/lib/os/onoff.c index 2a999b6ef8b90..3c9767694b56f 100644 --- a/lib/os/onoff.c +++ b/lib/os/onoff.c @@ -1,130 +1,121 @@ /* * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA * * SPDX-License-Identifier: Apache-2.0 */ #include #include -#include -#define CLIENT_NOTIFY_METHOD_MASK 0x03 -#define CLIENT_VALID_FLAGS_MASK 0x07 - -#define SERVICE_CONFIG_FLAGS \ - (ONOFF_SERVICE_START_SLEEPS \ - | ONOFF_SERVICE_STOP_SLEEPS \ - | ONOFF_SERVICE_RESET_SLEEPS) +#define SERVICE_CONFIG_FLAGS \ + (ONOFF_START_SLEEPS \ + | ONOFF_STOP_SLEEPS \ + | ONOFF_RESET_SLEEPS) #define SERVICE_REFS_MAX UINT16_MAX -#define SERVICE_STATE_OFF 0 -#define SERVICE_STATE_ON ONOFF_SERVICE_INTERNAL_BASE -#define SERVICE_STATE_TRANSITION (ONOFF_SERVICE_INTERNAL_BASE << 1) -#define SERVICE_STATE_TO_ON (SERVICE_STATE_TRANSITION | SERVICE_STATE_ON) -#define SERVICE_STATE_TO_OFF (SERVICE_STATE_TRANSITION | SERVICE_STATE_OFF) +#define ONOFF_CLIENT_INACTIVE 0 +#define ONOFF_CLIENT_REQUEST 1 +#define ONOFF_CLIENT_RELEASE 2 +#define ONOFF_CLIENT_RESET 3 + +#define ONOFF_CLIENT_TYPE_MASK (BIT_MASK(ONOFF_CLIENT_TYPE_BITS) \ + << ONOFF_CLIENT_TYPE_POS) -#define SERVICE_STATE_MASK (SERVICE_STATE_ON | SERVICE_STATE_TRANSITION) +static u32_t get_client_type(const struct onoff_client *cli) +{ + return (cli->notify.flags >> ONOFF_CLIENT_TYPE_POS) + & BIT_MASK(ONOFF_CLIENT_TYPE_BITS); +} -static void set_service_state(struct onoff_service *srv, +static void set_client_type(struct onoff_client *cli, + u32_t type) +{ + u32_t flags = cli->notify.flags; + + flags &= ~ONOFF_CLIENT_TYPE_MASK; + flags |= (type << ONOFF_CLIENT_TYPE_POS) & ONOFF_CLIENT_TYPE_MASK; + cli->notify.flags = flags; +} + +static void set_service_state(struct onoff_manager *mgr, u32_t state) { - srv->flags &= ~SERVICE_STATE_MASK; - srv->flags |= (state & SERVICE_STATE_MASK); + mgr->flags &= ~ONOFF_STATE_MASK; + mgr->flags |= (state & ONOFF_STATE_MASK); } -static int validate_args(const struct onoff_service *srv, +static int validate_args(const struct onoff_manager *mgr, struct onoff_client *cli) { - if ((srv == NULL) || (cli == NULL)) { + if ((mgr == NULL) || (cli == NULL)) { return -EINVAL; } - int rv = 0; - u32_t mode = cli->flags; - - /* Reject unexpected flags. */ - if (mode != (cli->flags & CLIENT_VALID_FLAGS_MASK)) { - return -EINVAL; - } + int rv = sys_notify_validate(&cli->notify); - /* Validate configuration based on mode */ - switch (mode & CLIENT_NOTIFY_METHOD_MASK) { - case ONOFF_CLIENT_NOTIFY_SPINWAIT: - break; - case ONOFF_CLIENT_NOTIFY_CALLBACK: - if (cli->async.callback.handler == NULL) { - rv = -EINVAL; - } - break; - case ONOFF_CLIENT_NOTIFY_SIGNAL: - if (cli->async.signal == NULL) { - rv = -EINVAL; - } - break; - default: + if ((rv == 0) + && ((cli->notify.flags + & ~BIT_MASK(ONOFF_CLIENT_EXTENSION_POS)) != 0)) { rv = -EINVAL; - break; - } - - /* Clear the result here instead of in all callers. */ - if (rv == 0) { - cli->result = 0; } return rv; } -int onoff_service_init(struct onoff_service *srv, - onoff_service_transition_fn start, - onoff_service_transition_fn stop, - onoff_service_transition_fn reset, - u32_t flags) +int onoff_manager_init(struct onoff_manager *mgr, + const struct onoff_transitions *transitions) { - if ((flags & SERVICE_CONFIG_FLAGS) != flags) { + if (transitions->flags & ~SERVICE_CONFIG_FLAGS) { return -EINVAL; } - if ((start == NULL) || (stop == NULL)) { + if ((transitions->start == NULL) || (transitions->stop == NULL)) { return -EINVAL; } - *srv = (struct onoff_service)ONOFF_SERVICE_INITIALIZER(start, stop, - reset, flags); + *mgr = (struct onoff_manager)ONOFF_MANAGER_INITIALIZER(transitions); return 0; } -static void notify_one(struct onoff_service *srv, +static void notify_monitors(struct onoff_manager *mgr, + u32_t state, + int res) +{ + sys_slist_t *mlist = &mgr->monitors; + struct onoff_monitor *mon; + struct onoff_monitor *tmp; + + SYS_SLIST_FOR_EACH_CONTAINER_SAFE(mlist, mon, tmp, node) { + mon->callback(mgr, mon, state, res); + } +} + +static void notify_one(struct onoff_manager *mgr, struct onoff_client *cli, + u32_t state, int res) { - unsigned int flags = cli->flags; + set_client_type(cli, ONOFF_CLIENT_INACTIVE); - /* Store the result, and notify if requested. */ - cli->result = res; - cli->flags = 0; - switch (flags & CLIENT_NOTIFY_METHOD_MASK) { - case ONOFF_CLIENT_NOTIFY_SPINWAIT: - break; - case ONOFF_CLIENT_NOTIFY_CALLBACK: - cli->async.callback.handler(srv, cli, - cli->async.callback.user_data, res); - break; -#ifdef CONFIG_POLL - case ONOFF_CLIENT_NOTIFY_SIGNAL: - k_poll_signal_raise(cli->async.signal, res); - break; -#endif /* CONFIG_POLL */ - default: - __ASSERT_NO_MSG(false); + void *ud = cli->user_data; + onoff_client_callback cb = + (onoff_client_callback)sys_notify_finalize(&cli->notify, res); + + if (cb) { + cb(mgr, cli, state, res, ud); } } -static void notify_all(struct onoff_service *srv, +static void notify_all(struct onoff_manager *mgr, sys_slist_t *list, + u32_t state, int res) { + notify_monitors(mgr, state, res); + while (!sys_slist_is_empty(list)) { sys_snode_t *node = sys_slist_get_not_empty(list); struct onoff_client *cli = @@ -132,18 +123,35 @@ static void notify_all(struct onoff_service *srv, struct onoff_client, node); - notify_one(srv, cli, res); + notify_one(mgr, cli, state, res); } } -static void onoff_start_notify(struct onoff_service *srv, +static void onoff_stop_notify(struct onoff_manager *mgr, + int res); + +static void onoff_start_notify(struct onoff_manager *mgr, int res) { - k_spinlock_key_t key = k_spin_lock(&srv->lock); - sys_slist_t clients = srv->clients; + bool stop = false; + unsigned int refs = 0U; + sys_snode_t *node; + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + sys_slist_t clients = mgr->clients; + + /* All clients registered at the time a start completes must + * be request clients. Accrue a reference count for each of + * them. + */ + SYS_SLIST_FOR_EACH_NODE(&clients, node) { + struct onoff_client *cli = CONTAINER_OF(node, + struct onoff_client, + node); - /* Can't have a queued releaser during start */ - __ASSERT_NO_MSG(srv->releaser == NULL); + (void)cli; + __ASSERT_NO_MSG(get_client_type(cli) == ONOFF_CLIENT_REQUEST); + refs += 1U; + } /* If the start failed log an error and leave the rest of the * state in place for diagnostics. @@ -156,93 +164,103 @@ static void onoff_start_notify(struct onoff_service *srv, * clients of operation completion. */ if (res < 0) { - srv->flags &= ~SERVICE_STATE_TRANSITION; - srv->flags |= ONOFF_SERVICE_HAS_ERROR; + mgr->flags &= ~ONOFF_FLAG_TRANSITION; + mgr->flags |= ONOFF_HAS_ERROR; } else { - sys_snode_t *node; - unsigned int refs = 0U; - - set_service_state(srv, SERVICE_STATE_ON); - - SYS_SLIST_FOR_EACH_NODE(&clients, node) { - refs += 1U; - } + set_service_state(mgr, ONOFF_STATE_ON); /* Update the reference count, or fail if the count * would overflow. */ - if (srv->refs > (SERVICE_REFS_MAX - refs)) { - srv->flags |= ONOFF_SERVICE_HAS_ERROR; + if (mgr->refs > (SERVICE_REFS_MAX - refs)) { + mgr->flags |= ONOFF_HAS_ERROR; } else { - srv->refs += refs; + mgr->refs += refs; + } + + stop = (mgr->refs == 0); + if (stop + && (k_is_in_isr() || k_is_pre_kernel()) + && ((mgr->flags & ONOFF_STOP_SLEEPS) != 0U)) { + mgr->flags |= ONOFF_HAS_ERROR; + stop = false; } - __ASSERT_NO_MSG(srv->refs > 0U); } - sys_slist_init(&srv->clients); + sys_slist_init(&mgr->clients); + + u32_t state = mgr->flags & ONOFF_STATE_MASK; - k_spin_unlock(&srv->lock, key); + k_spin_unlock(&mgr->lock, key); - notify_all(srv, &clients, res); + notify_all(mgr, &clients, state, res); + if (stop) { + __ASSERT_NO_MSG(mgr->transitions->stop != NULL); + mgr->transitions->stop(mgr, onoff_stop_notify); + } } -int onoff_request(struct onoff_service *srv, +int onoff_request(struct onoff_manager *mgr, struct onoff_client *cli) { bool add_client = false; /* add client to pending list */ bool start = false; /* invoke start transition */ bool notify = false; /* do client notification */ - int rv = validate_args(srv, cli); + int rv = validate_args(mgr, cli); if (rv < 0) { return rv; } - k_spinlock_key_t key = k_spin_lock(&srv->lock); + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + if (get_client_type(cli) != ONOFF_CLIENT_INACTIVE) { + rv = -EINVAL; + goto out; + } - if ((srv->flags & ONOFF_SERVICE_HAS_ERROR) != 0) { + if ((mgr->flags & ONOFF_HAS_ERROR) != 0) { rv = -EIO; goto out; } /* Reject if this would overflow the reference count. */ - if (srv->refs == SERVICE_REFS_MAX) { + if (mgr->refs == SERVICE_REFS_MAX) { rv = -EAGAIN; goto out; } - u32_t state = srv->flags & SERVICE_STATE_MASK; + u32_t state = mgr->flags & ONOFF_STATE_MASK; switch (state) { - case SERVICE_STATE_TO_OFF: + case ONOFF_STATE_TO_OFF: /* Queue to start after release */ - __ASSERT_NO_MSG(srv->releaser != NULL); add_client = true; rv = 3; break; - case SERVICE_STATE_OFF: + case ONOFF_STATE_OFF: /* Reject if in a non-thread context and start could * wait. */ if ((k_is_in_isr() || k_is_pre_kernel()) - && ((srv->flags & ONOFF_SERVICE_START_SLEEPS) != 0U)) { + && ((mgr->flags & ONOFF_START_SLEEPS) != 0U)) { rv = -EWOULDBLOCK; break; } /* Start with first request while off */ - __ASSERT_NO_MSG(srv->refs == 0); - set_service_state(srv, SERVICE_STATE_TO_ON); + __ASSERT_NO_MSG(mgr->refs == 0); + set_service_state(mgr, ONOFF_STATE_TO_ON); start = true; add_client = true; rv = 2; break; - case SERVICE_STATE_TO_ON: + case ONOFF_STATE_TO_ON: /* Already starting, just queue it */ add_client = true; rv = 1; break; - case SERVICE_STATE_ON: + case ONOFF_STATE_ON: /* Just increment the reference count */ notify = true; break; @@ -253,109 +271,133 @@ int onoff_request(struct onoff_service *srv, out: if (add_client) { - sys_slist_append(&srv->clients, &cli->node); + set_client_type(cli, ONOFF_CLIENT_REQUEST); + sys_slist_append(&mgr->clients, &cli->node); } else if (notify) { - srv->refs += 1; + mgr->refs += 1; } - k_spin_unlock(&srv->lock, key); + state = mgr->flags & ONOFF_STATE_MASK; + k_spin_unlock(&mgr->lock, key); if (start) { - __ASSERT_NO_MSG(srv->start != NULL); - srv->start(srv, onoff_start_notify); + __ASSERT_NO_MSG(mgr->transitions->start != NULL); + notify_monitors(mgr, state, 0); + mgr->transitions->start(mgr, onoff_start_notify); } else if (notify) { - notify_one(srv, cli, 0); + notify_one(mgr, cli, state, 0); } return rv; } -static void onoff_stop_notify(struct onoff_service *srv, +static void onoff_stop_notify(struct onoff_manager *mgr, int res) { - bool notify_clients = false; - int client_res = res; + bool fail_restart = false; bool start = false; - k_spinlock_key_t key = k_spin_lock(&srv->lock); - sys_slist_t clients = srv->clients; - struct onoff_client *releaser = srv->releaser; + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + sys_slist_t req_clients; + sys_slist_t rel_clients; - /* If the stop operation failed log an error and leave the - * rest of the state in place. - * - * If it succeeded remove the last reference and transition to - * off. + /* Separate any remaining clients into request clients and + * release clients, decrementing the reference count for the + * release clients. + */ + sys_slist_init(&req_clients); + sys_slist_init(&rel_clients); + while (!sys_slist_is_empty(&mgr->clients)) { + sys_snode_t *node = sys_slist_get_not_empty(&mgr->clients); + struct onoff_client *cli = CONTAINER_OF(node, + struct onoff_client, + node); + + if (get_client_type(cli) == ONOFF_CLIENT_RELEASE) { + sys_slist_append(&rel_clients, node); + mgr->refs -= 1U; + } else { + __ASSERT_NO_MSG(get_client_type(cli) + == ONOFF_CLIENT_REQUEST); + sys_slist_append(&req_clients, node); + } + } + + __ASSERT_NO_MSG(mgr->refs == 0); + + /* If the stop operation failed log an error, leave the + * rest of the state in place, and notify all clients. * - * In either case remove the last reference, and notify all - * waiting clients of operation completion. + * If the stop succeeded mark the service as off. If there + * are request clients then synthesize a transition to on + * (which fails if the start cannot be initiated). */ if (res < 0) { - srv->flags &= ~SERVICE_STATE_TRANSITION; - srv->flags |= ONOFF_SERVICE_HAS_ERROR; - notify_clients = true; - } else if (sys_slist_is_empty(&clients)) { - set_service_state(srv, SERVICE_STATE_OFF); - } else if ((k_is_in_isr() || k_is_pre_kernel()) - && ((srv->flags & ONOFF_SERVICE_START_SLEEPS) != 0U)) { - set_service_state(srv, SERVICE_STATE_OFF); - notify_clients = true; - client_res = -EWOULDBLOCK; + mgr->flags &= ~ONOFF_FLAG_TRANSITION; + mgr->flags |= ONOFF_HAS_ERROR; + sys_slist_merge_slist(&rel_clients, &req_clients); } else { - set_service_state(srv, SERVICE_STATE_TO_ON); - start = true; + set_service_state(mgr, ONOFF_STATE_OFF); + + if (!sys_slist_is_empty(&req_clients)) { + + /* We need to restart, which requires that we + * be able to start. + */ + fail_restart = ((k_is_in_isr() || k_is_pre_kernel()) + && ((mgr->flags & ONOFF_START_SLEEPS) + != 0U)); + if (!fail_restart) { + mgr->clients = req_clients; + set_service_state(mgr, ONOFF_STATE_TO_ON); + start = true; + } + } } - __ASSERT_NO_MSG(releaser); - srv->refs -= 1U; - srv->releaser = NULL; - __ASSERT_NO_MSG(srv->refs == 0); + u32_t state = mgr->flags & ONOFF_STATE_MASK; - /* Remove the clients if there was an error or a delayed start - * couldn't be initiated, because we're resolving their - * operation with an error. - */ - if (notify_clients) { - sys_slist_init(&srv->clients); - } + k_spin_unlock(&mgr->lock, key); - k_spin_unlock(&srv->lock, key); + /* Notify all the release clients of the result */ + notify_all(mgr, &rel_clients, state, res); - /* Notify the releaser. If there was an error, notify any - * pending requests; otherwise if there are pending requests - * start the transition to ON. - */ - notify_one(srv, releaser, res); - if (notify_clients) { - notify_all(srv, &clients, client_res); + /* Handle synthesized start (failure or initiate) */ + if (fail_restart) { + notify_all(mgr, &req_clients, state, -EWOULDBLOCK); } else if (start) { - srv->start(srv, onoff_start_notify); + mgr->transitions->start(mgr, onoff_start_notify); } } -int onoff_release(struct onoff_service *srv, +int onoff_release(struct onoff_manager *mgr, struct onoff_client *cli) { bool stop = false; /* invoke stop transition */ bool notify = false; /* do client notification */ - int rv = validate_args(srv, cli); + int rv = validate_args(mgr, cli); if (rv < 0) { return rv; } - k_spinlock_key_t key = k_spin_lock(&srv->lock); + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + if (get_client_type(cli) != ONOFF_CLIENT_INACTIVE) { + rv = -EINVAL; + goto out; + } - if ((srv->flags & ONOFF_SERVICE_HAS_ERROR) != 0) { + if ((mgr->flags & ONOFF_HAS_ERROR) != 0) { rv = -EIO; goto out; } - u32_t state = srv->flags & SERVICE_STATE_MASK; + u32_t state = mgr->flags & ONOFF_STATE_MASK; switch (state) { - case SERVICE_STATE_ON: + case ONOFF_STATE_ON: /* Stay on if release leaves a client. */ - if (srv->refs > 1U) { + if (mgr->refs > 1U) { notify = true; rv = 1; break; @@ -365,23 +407,24 @@ int onoff_release(struct onoff_service *srv, * wait */ if ((k_is_in_isr() || k_is_pre_kernel()) - && ((srv->flags & ONOFF_SERVICE_STOP_SLEEPS) != 0)) { + && ((mgr->flags & ONOFF_STOP_SLEEPS) != 0)) { rv = -EWOULDBLOCK; break; } stop = true; - set_service_state(srv, SERVICE_STATE_TO_OFF); - srv->releaser = cli; + set_service_state(mgr, ONOFF_STATE_TO_OFF); + set_client_type(cli, ONOFF_CLIENT_RELEASE); + sys_slist_append(&mgr->clients, &cli->node); rv = 2; break; - case SERVICE_STATE_TO_ON: + case ONOFF_STATE_TO_ON: rv = -EBUSY; break; - case SERVICE_STATE_OFF: - case SERVICE_STATE_TO_OFF: + case ONOFF_STATE_OFF: + case ONOFF_STATE_TO_OFF: rv = -EALREADY; break; default: @@ -390,57 +433,61 @@ int onoff_release(struct onoff_service *srv, out: if (notify) { - srv->refs -= 1U; + mgr->refs -= 1U; } - k_spin_unlock(&srv->lock, key); + state = mgr->flags & ONOFF_STATE_MASK; + k_spin_unlock(&mgr->lock, key); if (stop) { - __ASSERT_NO_MSG(srv->stop != NULL); - srv->stop(srv, onoff_stop_notify); + __ASSERT_NO_MSG(mgr->transitions->stop != NULL); + notify_monitors(mgr, state, 0); + mgr->transitions->stop(mgr, onoff_stop_notify); } else if (notify) { - notify_one(srv, cli, 0); + notify_one(mgr, cli, state, 0); } return rv; } -static void onoff_reset_notify(struct onoff_service *srv, +static void onoff_reset_notify(struct onoff_manager *mgr, int res) { - k_spinlock_key_t key = k_spin_lock(&srv->lock); - sys_slist_t clients = srv->clients; + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + sys_slist_t clients = mgr->clients; /* If the reset failed clear the transition flag but otherwise * leave the state unchanged. * * If it was successful clear the reference count and all - * flags except capability flags (sets to SERVICE_STATE_OFF). + * flags except capability flags (sets to ONOFF_STATE_OFF). */ if (res < 0) { - srv->flags &= ~SERVICE_STATE_TRANSITION; + mgr->flags &= ~ONOFF_FLAG_TRANSITION; } else { - __ASSERT_NO_MSG(srv->refs == 0U); - srv->refs = 0U; - srv->flags &= SERVICE_CONFIG_FLAGS; + __ASSERT_NO_MSG(mgr->refs == 0U); + mgr->refs = 0U; + mgr->flags &= SERVICE_CONFIG_FLAGS; } - sys_slist_init(&srv->clients); + sys_slist_init(&mgr->clients); - k_spin_unlock(&srv->lock, key); + u32_t state = mgr->flags & ONOFF_STATE_MASK; - notify_all(srv, &clients, res); + k_spin_unlock(&mgr->lock, key); + + notify_all(mgr, &clients, state, res); } -int onoff_service_reset(struct onoff_service *srv, - struct onoff_client *cli) +int onoff_reset(struct onoff_manager *mgr, + struct onoff_client *cli) { - if (srv->reset == NULL) { + if (mgr->transitions->reset == NULL) { return -ENOTSUP; } bool reset = false; - int rv = validate_args(srv, cli); + int rv = validate_args(mgr, cli); if (rv < 0) { return rv; @@ -448,71 +495,130 @@ int onoff_service_reset(struct onoff_service *srv, /* Reject if in a non-thread context and reset could wait. */ if ((k_is_in_isr() || k_is_pre_kernel()) - && ((srv->flags & ONOFF_SERVICE_RESET_SLEEPS) != 0U)) { + && ((mgr->flags & ONOFF_RESET_SLEEPS) != 0U)) { return -EWOULDBLOCK; } - k_spinlock_key_t key = k_spin_lock(&srv->lock); + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + if (get_client_type(cli) != ONOFF_CLIENT_INACTIVE) { + rv = -EINVAL; + goto out; + } - if ((srv->flags & ONOFF_SERVICE_HAS_ERROR) == 0) { + if ((mgr->flags & ONOFF_HAS_ERROR) == 0) { rv = -EALREADY; goto out; } - if ((srv->flags & SERVICE_STATE_TRANSITION) == 0) { + if ((mgr->flags & ONOFF_FLAG_TRANSITION) == 0) { reset = true; - srv->flags |= SERVICE_STATE_TRANSITION; + mgr->flags |= ONOFF_FLAG_TRANSITION; } out: if (rv >= 0) { - sys_slist_append(&srv->clients, &cli->node); + set_client_type(cli, ONOFF_CLIENT_RESET); + sys_slist_append(&mgr->clients, &cli->node); } - k_spin_unlock(&srv->lock, key); + k_spin_unlock(&mgr->lock, key); if (reset) { - srv->reset(srv, onoff_reset_notify); + mgr->transitions->reset(mgr, onoff_reset_notify); } return rv; } -int onoff_cancel(struct onoff_service *srv, +int onoff_cancel(struct onoff_manager *mgr, struct onoff_client *cli) { - int rv = validate_args(srv, cli); + int rv = validate_args(mgr, cli); if (rv < 0) { return rv; } rv = -EALREADY; - k_spinlock_key_t key = k_spin_lock(&srv->lock); - u32_t state = srv->flags & SERVICE_STATE_MASK; - - /* Can't remove the last client waiting for the in-progress - * transition, as there would be nobody to receive the - * completion notification, which might indicate a service - * error. - */ - if (sys_slist_find_and_remove(&srv->clients, &cli->node)) { - rv = 0; - if (sys_slist_is_empty(&srv->clients) - && (state != SERVICE_STATE_TO_OFF)) { - rv = -EWOULDBLOCK; - sys_slist_append(&srv->clients, &cli->node); + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + u32_t state = mgr->flags & ONOFF_STATE_MASK; + + (void)state; + if (sys_slist_find_and_remove(&mgr->clients, &cli->node)) { + switch (get_client_type(cli)) { + case ONOFF_CLIENT_REQUEST: + /* Requests can be cancelled whether + * transitioning to on or off. They're + * cancelled immediately. + */ + __ASSERT_NO_MSG((state == ONOFF_STATE_TO_ON) + || (state == ONOFF_STATE_TO_OFF)); + rv = 0; + break; + case ONOFF_CLIENT_RELEASE: + __ASSERT_NO_MSG(state == ONOFF_STATE_TO_OFF); + + /* A release can only be present when + * transitioning to off, and when cancelled it + * converts to a request that goes back on the + * client list to be completed later. + */ + set_client_type(cli, ONOFF_CLIENT_REQUEST); + mgr->refs -= 1; + rv = 1; + sys_slist_append(&mgr->clients, &cli->node); + break; + case ONOFF_CLIENT_RESET: + __ASSERT_NO_MSG((state & ONOFF_HAS_ERROR) != 0); + rv = 0; + break; + default: + break; } - } else if (srv->releaser == cli) { - /* must be waiting for TO_OFF to complete */ - rv = -EWOULDBLOCK; } - k_spin_unlock(&srv->lock, key); + k_spin_unlock(&mgr->lock, key); if (rv == 0) { - notify_one(srv, cli, -ECANCELED); + set_client_type(cli, ONOFF_CLIENT_INACTIVE); } return rv; } + +int onoff_monitor_register(struct onoff_manager *mgr, + struct onoff_monitor *mon) +{ + if ((mgr == NULL) || (mon == NULL)) { + return -EINVAL; + } + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + sys_slist_append(&mgr->monitors, &mon->node); + + k_spin_unlock(&mgr->lock, key); + + return 0; +} + +int onoff_monitor_unregister(struct onoff_manager *mgr, + struct onoff_monitor *mon) +{ + int rv = -EINVAL; + + if ((mgr == NULL) || (mon == NULL)) { + return rv; + } + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + if (sys_slist_find_and_remove(&mgr->monitors, &mon->node)) { + rv = 0; + } + + k_spin_unlock(&mgr->lock, key); + + return rv; +} diff --git a/lib/os/onoff_notifier.c b/lib/os/onoff_notifier.c new file mode 100644 index 0000000000000..7ab53fa196a83 --- /dev/null +++ b/lib/os/onoff_notifier.c @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +#define WITH_PRINTK 0 + +/* Identify the four external events that can trigger state changes, + * as well as an internal state used when processing deferred actions. + */ +enum event_type { + /* No-op event: used to process deferred changes */ + EVT_NOP, + /* Client request for the service */ + EVT_REQUEST, + /* Client release of the service */ + EVT_RELEASE, + /* Client reset of the service */ + EVT_RESET, + /* Completion of a service transition. */ + EVT_COMPLETE, +}; + +/* There are eight bits that indicate the state of the notifier. + * These bits are manipulated by process_events() under lock, and + * actions cued by bit values are executed outside of lock within + * process_events(). + * + * * ON specifies the target state for the client: clear for OFF, set + * for ON. + * * RESET specifies that the notifier is being reset. + * * CHANGING specifies that the device is in transition to the target + * state. This bit is set when a request or release event occurs + * while the notifier is in a stable state, and is cleared when the + * required onoff transition has been initiated. + * * CANCEL specifies that the target state changed while a transition + * had not been completed. Set or cleared based on request and + * release events that occur while CHANGING is set, cleared when the + * completion event is received. + * * UNSUBMITTED indicates that a required onoff transition has not + * yet been initiated. This is set when CHANGING is set, and is + * cleared when mutex is restored after initiating the onoff + * transition. + * * UNNOTIFIED indicates that an error or transition completion has + * been recorded but the notifier callback has not yet been invoked. + * Set when an error is detected or on completion of an uncancelled + * transition, cleared when the notification is about to be invoked. + * * PROCESSING is used to mark that there is an active invocation of + * the state machine. It defers actions related to state changes + * that occur during the unlocked periods where onoff transition or + * notification occurs, which simplifies the state machine. The + * actions will be invoked when the top-level process_events() + * invocation regains control. + * * ERROR indicates receipt of an error from the underlying onoff + * service, either a rejected transition or a failed transition. + */ +#define ST_BIT_ON BIT(0) +#define ST_BIT_RESET BIT(1) +#define ST_BIT_CHANGING BIT(2) +#define ST_BIT_CANCEL BIT(3) +#define ST_BIT_UNSUBMITTED BIT(4) +#define ST_BIT_UNNOTIFIED BIT(5) +#define ST_BIT_PROCESSING BIT(6) +#define ST_BIT_ERROR BIT(7) + +#define ST_OFF 0 +#define ST_ON ST_BIT_ON +#define ST_CHANGING_FROM_OFF (ST_OFF | ST_BIT_CHANGING) +#define ST_CHANGING_FROM_ON (ST_ON | ST_BIT_CHANGING) +#define ST_RESETTING (ST_BIT_RESET | ST_BIT_CHANGING) + +/* Mask used to isolate the bits required to the core state: stable ON + * and OFF, and unstable changing from ON or OFF. + */ +#define ST_CHANGING_MASK (ST_BIT_ON | ST_BIT_RESET | ST_BIT_CHANGING) + +static inline bool state_has_error(u32_t state) +{ + return state & ST_BIT_ERROR; +} + +static inline u32_t state_set_error(struct onoff_notifier *np, + int res) +{ + u32_t state = np->state; + + np->onoff_result = res; + state |= ST_BIT_ERROR | ST_BIT_UNNOTIFIED; + np->state = state; + + return state; +} + +#if (WITH_PRINTK - 0) +#define PRINTK(...) printk(__VA_ARGS__) + +static const char *const evt_s[] = { + [EVT_NOP] = "nop", + [EVT_REQUEST] = "request", + [EVT_RELEASE] = "release", + [EVT_RESET] = "reset", + [EVT_COMPLETE] = "complete", +}; + +static const char *state_s(u32_t state) +{ + static char buf[128]; + + snprintf(buf, sizeof(buf), "%x: %s%s%s%s%s%s%s", + state, + (state & ST_BIT_ON) ? "ON" : "off", + (state & ST_BIT_CHANGING) ? " CHANGING" : "", + (state & ST_BIT_CANCEL) ? " CANCEL" : "", + (state & ST_BIT_UNSUBMITTED) ? " UNSUBMITTED" : "", + (state & ST_BIT_UNNOTIFIED) ? " UNNOTIFIED" : "", + (state & ST_BIT_PROCESSING) ? " PROCESSING" : "", + (state & ST_BIT_ERROR) ? " ERROR" : ""); + return buf; +} + +#else /* WITH_PRINTK */ +#define PRINTK(...) (void)0 +#endif /* WITH_PRINTK */ + +static int process_event(struct onoff_notifier *np, + int evt, + int res); + +static void onoff_callback(struct onoff_manager *mp, + struct onoff_client *cli, + u32_t state, + int res, + void *user_data) +{ + struct onoff_notifier *np = user_data; + + process_event(np, EVT_COMPLETE, res); +} + +int onoff_notifier_request(struct onoff_notifier *np) +{ + return process_event(np, EVT_REQUEST, 0); +} + +int onoff_notifier_release(struct onoff_notifier *np) +{ + return process_event(np, EVT_RELEASE, 0); +} + +int onoff_notifier_reset(struct onoff_notifier *np) +{ + return process_event(np, EVT_RESET, 0); +} + +static void issue_change(struct onoff_notifier *np, + k_spinlock_key_t *key) +{ + int rc; + u32_t mode = np->state & ST_CHANGING_MASK; + + PRINTK("submit %s\n", state_s(mode)); + k_spin_unlock(&np->lock, *key); + + onoff_client_init_callback(&np->cli, onoff_callback, np); + if (mode == ST_CHANGING_FROM_OFF) { + rc = onoff_request(np->onoff, &np->cli); + } else if (mode == ST_CHANGING_FROM_ON) { + rc = onoff_release(np->onoff, &np->cli); + } else { + __ASSERT_NO_MSG(mode == ST_RESETTING); + rc = onoff_reset(np->onoff, &np->cli); + if (rc == -EALREADY) { + /* Somebody already cleared the onoff service + * error; synthesize a successful completion + * so the notifier state gets reset. + */ + rc = 0; + process_event(np, EVT_COMPLETE, rc); + } + } + + *key = k_spin_lock(&np->lock); + PRINTK("submitted %s %d\n", state_s(np->state), rc); + if (rc < 0) { + state_set_error(np, rc); + } +} + +static void issue_notification(struct onoff_notifier *np, + k_spinlock_key_t *key) +{ + u32_t state = np->state & ST_CHANGING_MASK; + int res = np->onoff_result; + + PRINTK("notify %x\n", np->state); + k_spin_unlock(&np->lock, *key); + + if (res >= 0) { + res = (state == ST_BIT_ON); + } + np->callback(np, res); + + *key = k_spin_lock(&np->lock); + PRINTK("notified %s\n", state_s(np->state)); +} + +/* There are two points in the state machine where the machine is + * unlocked to perform some external action: + * * Initiation of an onoff transition due to some event; + * * Invocation of the user-specified callback when a stable state is + * reached or an error detected. + * + * Events received during these unlocked periods are recorded in the + * state, but processing is deferred to the top-level invocation which + * will loop to handle any events that occurred during the unlocked + * regions. + */ +static int process_event(struct onoff_notifier *np, + int evt, + int res) +{ + static unsigned int depth; + int rc = 0; + bool loop = false; + k_spinlock_key_t key = k_spin_lock(&np->lock); + u32_t state = np->state; + + ++depth; + PRINTK("pev[%d] entry %s %s\n", depth, state_s(state), evt_s[evt]); + __ASSERT_NO_MSG(evt != EVT_NOP); + do { + rc = 0; + loop = false; + + if (state_has_error(state) + && (evt != EVT_RESET)) { + rc = -EIO; + } else if (((state & ST_CHANGING_MASK) == ST_RESETTING) + && (evt != EVT_COMPLETE)) { + rc = -EWOULDBLOCK; + } else if (evt == EVT_REQUEST) { + if ((state & ST_CHANGING_MASK) == ST_OFF) { + state = ST_CHANGING_FROM_OFF + | ST_BIT_UNSUBMITTED; + } else if ((state & ST_CHANGING_MASK) + == ST_CHANGING_FROM_OFF) { + state &= ~ST_BIT_CANCEL; + } else if ((state & ST_CHANGING_MASK) + == ST_CHANGING_FROM_ON) { + state |= ST_BIT_CANCEL; + } else { + rc = -EALREADY; + } + } else if (evt == EVT_RELEASE) { + if ((state & ST_CHANGING_MASK) == ST_ON) { + state = ST_CHANGING_FROM_ON + | ST_BIT_UNSUBMITTED; + } else if ((state & ST_CHANGING_MASK) + == ST_CHANGING_FROM_ON) { + state &= ~ST_BIT_CANCEL; + } else if ((state & ST_CHANGING_MASK) + == ST_CHANGING_FROM_OFF) { + state |= ST_BIT_CANCEL; + } else { + rc = -EALREADY; + } + } else if (evt == EVT_RESET) { + if (state_has_error(state)) { + state &= ~(ST_BIT_ON | ST_BIT_ERROR); + state |= ST_BIT_RESET | ST_BIT_CHANGING + | ST_BIT_UNSUBMITTED; + } else { + rc = -EALREADY; + } + } else if (evt == EVT_COMPLETE) { + u32_t mode = state & ST_CHANGING_MASK; + + __ASSERT_NO_MSG((state & ST_BIT_CHANGING) != 0U); + state &= ~ST_BIT_CHANGING; + PRINTK("pev[%d] complete %s %d\n", depth, + state_s(state), res); + np->onoff_result = res; + if (res < 0) { + np->state = state; + state = state_set_error(np, res); + } else if (mode == ST_RESETTING) { + /* Reset completed: return to OFF + * state and notify. + */ + state = ST_OFF | ST_BIT_UNNOTIFIED; + } else { + bool cancel = ((state & ST_BIT_CANCEL) != 0U); + + /* The completed operation inverts the + * on bit and clears any pending + * cancel. Loop back to issue the + * reverse transition if the operation + * is to be cancelled, otherwise + * notify the client. + */ + state ^= ST_BIT_ON; + state &= ~ST_BIT_CANCEL; + + if (cancel) { + loop = true; + evt = (state == ST_ON) + ? EVT_RELEASE + : EVT_REQUEST; + } else { + state |= ST_BIT_UNNOTIFIED; + } + PRINTK("pev[%d] settle %s : %s\n", depth, + state_s(state), + cancel ? " CANCEL" : "settle"); + + } + } else { + __ASSERT_NO_MSG(evt == EVT_NOP); + } + + if (!loop) { + evt = EVT_NOP; + } + + /* If we're in a nested call defer any additional processing. */ + if ((state & ST_BIT_PROCESSING) != 0U) { + break; + } + + state |= ST_BIT_PROCESSING; + + /* Initiate any unsubmitted onoff transition. */ + if ((state & ST_BIT_UNSUBMITTED) != 0U) { + state &= ~ST_BIT_UNSUBMITTED; + np->state = state; + + issue_change(np, &key); + + loop |= (np->state != state); + state = np->state; + } + + /* Initiate any unnotified notifications. */ + if ((state & ST_BIT_UNNOTIFIED) != 0U) { + state &= ~ST_BIT_UNNOTIFIED; + np->state = state; + + issue_notification(np, &key); + + loop |= (np->state != state); + state = np->state; + } + + state &= ~ST_BIT_PROCESSING; + } while (loop); + + np->state = state; + if (rc >= 0) { + rc = ((state & ST_CHANGING_MASK) == ST_BIT_ON) != 0U; + } + + + PRINTK("pev[%d] exit %s %d\n", depth, state_s(state), rc); + --depth; + k_spin_unlock(&np->lock, key); + + return rc; +} diff --git a/lib/os/queued_operation.c b/lib/os/queued_operation.c new file mode 100644 index 0000000000000..63c26fab02438 --- /dev/null +++ b/lib/os/queued_operation.c @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +/* States used in the manager state field. */ +enum state { + /* Service is not active. + * + * Transitions to STARTING on queued_operation_submit(). + */ + ST_OFF = 0, + + /* Service is being started. + * + * This is a transient state while an associated on-off + * service request is incomplete. Transitions to IDLE and + * reschedules on successful start, and ERROR on failure to + * start. + */ + ST_STARTING, + + /* Service is active with no known operations. + * + * This is a transient substate of an implicit ON state. The + * machine will transition to NOTIFYING or STOPPING within + * the current mutex region + */ + ST_IDLE, + + /* The manager is invoking process() to notify the service of + * a new operation or a transition to idle. + * + * Transitions to PROCESSING if an operation was passed and + * queued_operation_finalize() has not been invoked before + * process() returns to the manager. + * + * Transitions to IDLE if an operation was not passed and the + * manager queue remains empty when process() returns to the + * manager. + * + * Re-runs select in all other cases. + */ + ST_NOTIFYING, + + /* A new operation has been identified and the service + * process() function will be/is/has-been invoked on it. + * + * Transitions to FINALIZING when queued_operation_finalize() + * is invoked. + */ + ST_PROCESSING, + + /* An operation that was processing is being finalized. + * + * Re-selects after finalization and any containing notifying + * completes. + */ + ST_FINALIZING, + + /* Service is being started. + * + * This is a transient state while an associated on-off + * service request is pending. + */ + ST_STOPPING, + + /* Service is in an error state. */ + ST_ERROR, +}; + +/* Forward declaration */ +static void select_next_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key); +static void start_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key); + +static inline void trivial_start_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + mgr->state = ST_IDLE; + select_next_and_unlock(mgr, key); +} + +static inline int op_get_priority(const struct queued_operation *op) +{ + return (s8_t)(op->notify.flags >> QUEUED_OPERATION_PRIORITY_POS); +} + +static inline int op_set_priority(struct queued_operation *op, + int priority) +{ + s8_t prio = (s8_t)priority; + u32_t mask = (QUEUED_OPERATION_PRIORITY_MASK + << QUEUED_OPERATION_PRIORITY_POS); + + if (priority == QUEUED_OPERATION_PRIORITY_PREPEND) { + prio = INT8_MIN; + } else if (priority == QUEUED_OPERATION_PRIORITY_APPEND) { + prio = INT8_MAX; + } else if (prio != priority) { + return -EINVAL; + } + + op->notify.flags = (op->notify.flags & ~mask) + | (mask & (prio << QUEUED_OPERATION_PRIORITY_POS)); + + return 0; +} + +static inline void finalize_and_notify(struct queued_operation_manager *mgr, + struct queued_operation *op, + int res) +{ + sys_notify_generic_callback cb + = sys_notify_finalize(&op->notify, res); + + if (cb != NULL) { + mgr->vtable->callback(mgr, op, cb); + } +} + +/* React to the completion of an onoff transition, either from a + * manager or directly. + * + * @param mgr the operation manager + * + * @param from either ST_STARTING or ST_STOPPING depending on + * transition direction + * + * @param res the transition completion value, negative for error. + */ +static void settle_onoff(struct queued_operation_manager *mgr, + enum state from, + int res) +{ + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + __ASSERT_NO_MSG(mgr->state == from); + + if (res >= 0) { + if (from == ST_STARTING) { + trivial_start_and_unlock(mgr, key); + return; + } + + /* Came from STOPPING so set to OFF, but automatically + * initiate a new start if operations have been added. + */ + mgr->state = ST_OFF; + if (!sys_slist_is_empty(&mgr->operations)) { + start_and_unlock(mgr, key); + return; + } + + k_spin_unlock(&mgr->lock, key); + return; + } + + /* On transition failure mark service failed. All unstarted + * operations are unlinked and completed as a service failure. + */ + sys_slist_t ops = mgr->operations; + + sys_slist_init(&mgr->operations); + mgr->state = ST_ERROR; + + k_spin_unlock(&mgr->lock, key); + + struct queued_operation *op; + + SYS_SLIST_FOR_EACH_CONTAINER(&ops, op, node) { + finalize_and_notify(mgr, op, -ENODEV); + } +} + +static void start_callback(struct onoff_manager *mp, + struct onoff_client *cli, + u32_t state, + int res, + void *user_data) +{ + settle_onoff(user_data, ST_STARTING, res); +} + +static void stop_callback(struct onoff_manager *mp, + struct onoff_client *cli, + u32_t state, + int res, + void *user_data) +{ + settle_onoff(user_data, ST_STOPPING, res); +} + +static void stop_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + __ASSERT_NO_MSG(mgr->state == ST_IDLE); + + if (mgr->onoff == NULL) { + mgr->state = ST_OFF; + k_spin_unlock(&mgr->lock, key); + return; + } + + mgr->state = ST_STOPPING; + + struct onoff_manager *onoff = mgr->onoff; + + k_spin_unlock(&mgr->lock, key); + + struct onoff_client *cli = &mgr->onoff_client; + int rc = 0; + + onoff_client_init_callback(cli, stop_callback, mgr); + rc = onoff_release(onoff, cli); + + if (rc < 0) { + settle_onoff(mgr, ST_STOPPING, rc); + } +} + +static void select_next_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + bool loop = false; + + do { + struct queued_operation *op = NULL; + sys_snode_t *node = sys_slist_get(&mgr->operations); + u32_t state = mgr->state; + + __ASSERT_NO_MSG((state == ST_IDLE) + || (state == ST_FINALIZING)); + loop = false; + if (node) { + op = CONTAINER_OF(node, struct queued_operation, node); + mgr->state = ST_NOTIFYING; + mgr->current = op; + + k_spin_unlock(&mgr->lock, key); + + /* Notify the service, then check everything again + * because the operation might have completed or the + * queue might have changed while we were unlocked. + */ + mgr->vtable->process(mgr, op); + + /* Update the state to one of IDLE, PROCESSING, or + * leave it FINALIZING; loop if something needs to be + * done. + */ + key = k_spin_lock(&mgr->lock); + + state = mgr->state; + + /* If an operation finalized during notification we + * need to reselect because finalization couldn't do + * that, otherwise it's still running. + */ + loop = (state == ST_FINALIZING); + if (!loop) { + __ASSERT_NO_MSG(state == ST_NOTIFYING); + mgr->state = ST_PROCESSING; + } + } else { + __ASSERT_NO_MSG(state == ST_FINALIZING); + mgr->state = ST_IDLE; + mgr->current = op; + } + + if (!loop) { + /* All done, release lock and exit */ + if (mgr->state == ST_IDLE) { + stop_and_unlock(mgr, key); + } else { + k_spin_unlock(&mgr->lock, key); + } + } + } while (loop); +} + +static void start_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + struct onoff_manager *onoff = mgr->onoff; + struct onoff_client *cli = &mgr->onoff_client; + int rv = 0; + + if (onoff == NULL) { + trivial_start_and_unlock(mgr, key); + return; + } + + mgr->state = ST_STARTING; + k_spin_unlock(&mgr->lock, key); + + onoff_client_init_callback(cli, start_callback, mgr); + rv = onoff_request(onoff, cli); + + if (rv >= 0) { + /* Success. Replace the request result value with a + * fixed success value, and lock so we can keep + * going at the call site. + */ + rv = 0; + } else { + /* Failure, record the error state */ + settle_onoff(mgr, ST_STARTING, rv); + } +} + +int queued_operation_submit(struct queued_operation_manager *mgr, + struct queued_operation *op, + int priority) +{ + int validate_rv = -ENOTSUP; + int rv = 0; + + __ASSERT_NO_MSG(mgr != NULL); + __ASSERT_NO_MSG(mgr->vtable != NULL); + __ASSERT_NO_MSG(mgr->vtable->process != NULL); + __ASSERT_NO_MSG(op != NULL); + + /* Validation is optional; if present, use it. */ + if (mgr->vtable->validate) { + validate_rv = mgr->vtable->validate(mgr, op); + rv = validate_rv; + } + + /* Set the priority, checking whether it's in range. */ + if (rv >= 0) { + rv = op_set_priority(op, priority); + } + + /* Reject callback notifications without translation + * function. + */ + if ((rv >= 0) + && sys_notify_uses_callback(&op->notify) + && (mgr->vtable->callback == NULL)) { + rv = -ENOTSUP; + } + + if (rv < 0) { + goto out; + } + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + sys_slist_t *list = &mgr->operations; + u32_t state = mgr->state; + + /* Preserve error state, or insert the item into the list. */ + if (state == ST_ERROR) { + rv = -ENODEV; + } else if (priority == QUEUED_OPERATION_PRIORITY_PREPEND) { + sys_slist_prepend(list, &op->node); + } else if (priority == QUEUED_OPERATION_PRIORITY_APPEND) { + sys_slist_append(list, &op->node); + } else { + struct queued_operation *prev = NULL; + struct queued_operation *tmp; + + SYS_SLIST_FOR_EACH_CONTAINER(list, tmp, node) { + if (priority < op_get_priority(tmp)) { + break; + } + prev = tmp; + } + + if (prev == NULL) { + sys_slist_prepend(list, &op->node); + } else { + sys_slist_insert(list, &prev->node, &op->node); + } + } + + /* Initiate an operation only if we're off. */ + if (state == ST_OFF) { + start_and_unlock(mgr, key); + } else { + __ASSERT_NO_MSG(state != ST_IDLE); + k_spin_unlock(&mgr->lock, key); + } + +out: + /* Preserve a service-specific success code on success */ + if ((rv >= 0) && (validate_rv >= 0)) { + rv = validate_rv; + } + + return rv; +} + +void queued_operation_finalize(struct queued_operation_manager *mgr, + int res) +{ + __ASSERT_NO_MSG(mgr != NULL); + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + struct queued_operation *op = mgr->current; + u32_t state = mgr->state; + bool processing = (state == ST_PROCESSING); + + __ASSERT_NO_MSG(op != NULL); + __ASSERT_NO_MSG((state == ST_NOTIFYING) + || (state == ST_PROCESSING)); + + mgr->state = ST_FINALIZING; + + k_spin_unlock(&mgr->lock, key); + + finalize_and_notify(mgr, op, res); + + /* If we were processing we need to reselect; if we were + * notifying we'll reselect when the notification completes. + */ + if (processing) { + select_next_and_unlock(mgr, k_spin_lock(&mgr->lock)); + } +} + +int queued_operation_cancel(struct queued_operation_manager *mgr, + struct queued_operation *op) +{ + __ASSERT_NO_MSG(mgr != NULL); + __ASSERT_NO_MSG(op != NULL); + + int rv = 0; + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + if (op == mgr->current) { + rv = -EINPROGRESS; + } else if (!sys_slist_find_and_remove(&mgr->operations, &op->node)) { + rv = -EINVAL; + } + + k_spin_unlock(&mgr->lock, key); + + if (rv == 0) { + finalize_and_notify(mgr, op, -ECANCELED); + } + + return rv; +} diff --git a/tests/lib/notify/CMakeLists.txt b/tests/lib/notify/CMakeLists.txt new file mode 100644 index 0000000000000..abd9e6e13baee --- /dev/null +++ b/tests/lib/notify/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.13.1) +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(sys_notify) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/lib/notify/prj.conf b/tests/lib/notify/prj.conf new file mode 100644 index 0000000000000..1948aaa649a4e --- /dev/null +++ b/tests/lib/notify/prj.conf @@ -0,0 +1,2 @@ +CONFIG_POLL=y +CONFIG_ZTEST=y diff --git a/tests/lib/notify/src/main.c b/tests/lib/notify/src/main.c new file mode 100644 index 0000000000000..4f858cac39250 --- /dev/null +++ b/tests/lib/notify/src/main.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +static u32_t get_extflags(const struct sys_notify *anp) +{ + u32_t flags = anp->flags & SYS_NOTIFY_EXTENSION_MASK; + + return flags >> SYS_NOTIFY_EXTENSION_POS; +} + +static void set_extflags(struct sys_notify *anp, + u32_t flags) +{ + anp->flags = (anp->flags & ~SYS_NOTIFY_EXTENSION_MASK) + | (flags << SYS_NOTIFY_EXTENSION_POS); +} + +static void callback(struct sys_notify *anp, + int *resp) +{ + zassert_equal(sys_notify_fetch_result(anp, resp), 0, + "failed callback fetch"); +} + +static void test_validate(void) +{ + struct sys_notify notify = { + .flags = 0, + }; + + zassert_equal(sys_notify_validate(NULL), -EINVAL, + "accepted null pointer"); + zassert_equal(sys_notify_validate(¬ify), -EINVAL, + "accepted bad method"); +} + + +static void test_spinwait(void) +{ + int rc; + int set_res = 423; + int res; + sys_notify_generic_callback cb; + struct sys_notify notify; + u32_t xflags = 0x1234; + + memset(¬ify, 0xac, sizeof(notify)); + rc = sys_notify_validate(¬ify); + zassert_equal(rc, -EINVAL, + "invalid not diagnosed"); + + sys_notify_init_spinwait(¬ify); + rc = sys_notify_validate(¬ify); + zassert_equal(rc, 0, + "init_spinwait invalid"); + + zassert_false(sys_notify_uses_callback(¬ify), + "uses callback"); + + zassert_equal(notify.flags, SYS_NOTIFY_METHOD_SPINWAIT, + "flags mismatch"); + + set_extflags(¬ify, xflags); + zassert_equal(sys_notify_get_method(¬ify), + SYS_NOTIFY_METHOD_SPINWAIT, + "method corrupted"); + zassert_equal(get_extflags(¬ify), xflags, + "xflags extract failed"); + + rc = sys_notify_fetch_result(¬ify, &res); + zassert_equal(rc, -EAGAIN, + "spinwait ready too soon"); + + zassert_not_equal(notify.flags, 0, + "flags cleared"); + + cb = sys_notify_finalize(¬ify, set_res); + zassert_equal(cb, (sys_notify_generic_callback)NULL, + "callback not null"); + zassert_equal(notify.flags, 0, + "flags not cleared"); + + rc = sys_notify_fetch_result(¬ify, &res); + zassert_equal(rc, 0, + "spinwait not ready"); + zassert_equal(res, set_res, + "result not set"); +} + +static void test_signal(void) +{ +#ifdef CONFIG_POLL + int rc; + int set_res = 423; + int res; + struct k_poll_signal sig; + sys_notify_generic_callback cb; + struct sys_notify notify; + u32_t xflags = 0x1234; + + memset(¬ify, 0xac, sizeof(notify)); + rc = sys_notify_validate(¬ify); + zassert_equal(rc, -EINVAL, + "invalid not diagnosed"); + + k_poll_signal_init(&sig); + k_poll_signal_check(&sig, &rc, &res); + zassert_equal(rc, 0, + "signal set"); + + sys_notify_init_signal(¬ify, &sig); + notify.method.signal = NULL; + rc = sys_notify_validate(¬ify); + zassert_equal(rc, -EINVAL, + "null signal not invalid"); + + memset(¬ify, 0xac, sizeof(notify)); + sys_notify_init_signal(¬ify, &sig); + rc = sys_notify_validate(¬ify); + zassert_equal(rc, 0, + "init_spinwait invalid"); + + zassert_false(sys_notify_uses_callback(¬ify), + "uses callback"); + + zassert_equal(notify.flags, SYS_NOTIFY_METHOD_SIGNAL, + "flags mismatch"); + zassert_equal(notify.method.signal, &sig, + "signal pointer mismatch"); + + set_extflags(¬ify, xflags); + zassert_equal(sys_notify_get_method(¬ify), + SYS_NOTIFY_METHOD_SIGNAL, + "method corrupted"); + zassert_equal(get_extflags(¬ify), xflags, + "xflags extract failed"); + + rc = sys_notify_fetch_result(¬ify, &res); + zassert_equal(rc, -EAGAIN, + "spinwait ready too soon"); + + zassert_not_equal(notify.flags, 0, + "flags cleared"); + + cb = sys_notify_finalize(¬ify, set_res); + zassert_equal(cb, (sys_notify_generic_callback)NULL, + "callback not null"); + zassert_equal(notify.flags, 0, + "flags not cleared"); + k_poll_signal_check(&sig, &rc, &res); + zassert_equal(rc, 1, + "signal not set"); + zassert_equal(res, set_res, + "signal result wrong"); + + rc = sys_notify_fetch_result(¬ify, &res); + zassert_equal(rc, 0, + "signal not ready"); + zassert_equal(res, set_res, + "result not set"); +#endif /* CONFIG_POLL */ +} + +static void test_callback(void) +{ + int rc; + int set_res = 423; + int res; + sys_notify_generic_callback cb; + struct sys_notify notify; + u32_t xflags = 0x8765432; + + memset(¬ify, 0xac, sizeof(notify)); + rc = sys_notify_validate(¬ify); + zassert_equal(rc, -EINVAL, + "invalid not diagnosed"); + + sys_notify_init_callback(¬ify, callback); + notify.method.callback = NULL; + rc = sys_notify_validate(¬ify); + zassert_equal(rc, -EINVAL, + "null callback not invalid"); + + memset(¬ify, 0xac, sizeof(notify)); + sys_notify_init_callback(¬ify, callback); + rc = sys_notify_validate(¬ify); + zassert_equal(rc, 0, + "init_spinwait invalid"); + + zassert_true(sys_notify_uses_callback(¬ify), + "not using callback"); + + zassert_equal(notify.flags, SYS_NOTIFY_METHOD_CALLBACK, + "flags mismatch"); + zassert_equal(notify.method.callback, + (sys_notify_generic_callback)callback, + "callback mismatch"); + + set_extflags(¬ify, xflags); + zassert_equal(sys_notify_get_method(¬ify), + SYS_NOTIFY_METHOD_CALLBACK, + "method corrupted"); + zassert_equal(get_extflags(¬ify), xflags, + "xflags extract failed"); + + rc = sys_notify_fetch_result(¬ify, &res); + zassert_equal(rc, -EAGAIN, + "callback ready too soon"); + + zassert_not_equal(notify.flags, 0, + "flags cleared"); + + cb = sys_notify_finalize(¬ify, set_res); + zassert_equal(cb, (sys_notify_generic_callback)callback, + "callback wrong"); + zassert_equal(notify.flags, 0, + "flags not cleared"); + + res = ~set_res; + ((sys_notify_generic_callback)cb)(¬ify, &res); + zassert_equal(res, set_res, + "result not set"); +} + +void test_main(void) +{ + ztest_test_suite(sys_notify_api, + ztest_unit_test(test_validate), + ztest_unit_test(test_spinwait), + ztest_unit_test(test_signal), + ztest_unit_test(test_callback)); + ztest_run_test_suite(sys_notify_api); +} diff --git a/tests/lib/notify/testcase.yaml b/tests/lib/notify/testcase.yaml new file mode 100644 index 0000000000000..b3753af80ede7 --- /dev/null +++ b/tests/lib/notify/testcase.yaml @@ -0,0 +1,3 @@ +tests: + libraries.sys_notify: + tags: sys_notify diff --git a/tests/lib/onoff/src/main.c b/tests/lib/onoff/src/main.c index 0cf2de278dfb9..1b06f6dcad276 100644 --- a/tests/lib/onoff/src/main.c +++ b/tests/lib/onoff/src/main.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA * * SPDX-License-Identifier: Apache-2.0 */ @@ -7,17 +8,28 @@ #include #include +struct transition_record { + u32_t state; + int res; +}; + +static struct transition_record xions[32]; +static struct transition_record *xions_end; + static struct onoff_client spinwait_cli; +static u32_t callback_state; static int callback_res; static void *callback_ud; -static void callback(struct onoff_service *srv, +static void callback(struct onoff_manager *srv, struct onoff_client *cli, - void *ud, - int res) + u32_t state, + int res, + void *ud) { - callback_ud = ud; + callback_state = state; callback_res = res; + callback_ud = ud; } static inline void init_notify_sig(struct onoff_client *cli, @@ -52,8 +64,8 @@ struct transit_state { const char *tag; bool async; int retval; - onoff_service_notify_fn notify; - struct onoff_service *srv; + onoff_notify_fn notify; + struct onoff_manager *srv; }; static void reset_transit_state(struct transit_state *tsp) @@ -64,8 +76,8 @@ static void reset_transit_state(struct transit_state *tsp) tsp->srv = NULL; } -static void run_transit(struct onoff_service *srv, - onoff_service_notify_fn notify, +static void run_transit(struct onoff_manager *srv, + onoff_notify_fn notify, struct transit_state *tsp) { if (tsp->async) { @@ -80,7 +92,7 @@ static void run_transit(struct onoff_service *srv, static void notify(struct transit_state *tsp) { - TC_PRINT("%s settle %d\n", tsp->tag, tsp->retval); + TC_PRINT("%s settle %d %p\n", tsp->tag, tsp->retval, tsp->notify); tsp->notify(tsp->srv, tsp->retval); tsp->notify = NULL; tsp->srv = NULL; @@ -99,7 +111,7 @@ static void isr_notify(struct k_timer *timer) } struct isr_call_state { - struct onoff_service *srv; + struct onoff_manager *srv; struct onoff_client *cli; int result; }; @@ -124,15 +136,15 @@ static void isr_reset(struct k_timer *timer) { struct isr_call_state *rsp = k_timer_user_data_get(timer); - rsp->result = onoff_service_reset(rsp->srv, rsp->cli); + rsp->result = onoff_reset(rsp->srv, rsp->cli); k_sem_give(&isr_sync); } static struct transit_state start_state = { .tag = "start", }; -static void start(struct onoff_service *srv, - onoff_service_notify_fn notify) +static void start(struct onoff_manager *srv, + onoff_notify_fn notify) { run_transit(srv, notify, &start_state); } @@ -140,8 +152,8 @@ static void start(struct onoff_service *srv, static struct transit_state stop_state = { .tag = "stop", }; -static void stop(struct onoff_service *srv, - onoff_service_notify_fn notify) +static void stop(struct onoff_manager *srv, + onoff_notify_fn notify) { run_transit(srv, notify, &stop_state); } @@ -149,64 +161,88 @@ static void stop(struct onoff_service *srv, static struct transit_state reset_state = { .tag = "reset", }; -static void reset(struct onoff_service *srv, - onoff_service_notify_fn notify) +static void reset(struct onoff_manager *srv, + onoff_notify_fn notify) { run_transit(srv, notify, &reset_state); } +static void xion_callback(struct onoff_manager *mgr, + struct onoff_monitor *mon, + u32_t state, + int res) +{ + if (xions_end < (xions + ARRAY_SIZE(xions))) { + *xions_end = (struct transition_record){ + .state = state, + .res = res, + }; + ++xions_end; + } +} + static void clear_transit(void) { callback_res = 0; reset_transit_state(&start_state); reset_transit_state(&stop_state); reset_transit_state(&reset_state); + xions_end = xions; } static void test_service_init_validation(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; + const struct onoff_transitions null_transitions = + ONOFF_TRANSITIONS_INITIALIZER(NULL, NULL, NULL, 0); + const struct onoff_transitions start_transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, NULL, NULL, 0); + const struct onoff_transitions stop_transitions = + ONOFF_TRANSITIONS_INITIALIZER(NULL, stop, NULL, 0); + struct onoff_transitions start_stop_transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, 0); + const struct onoff_transitions all_transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, + ONOFF_START_SLEEPS); clear_transit(); - rc = onoff_service_init(NULL, NULL, NULL, NULL, 0); + rc = onoff_manager_init(NULL, &null_transitions); zassert_equal(rc, -EINVAL, "init null srv %d", rc); - rc = onoff_service_init(&srv, NULL, NULL, NULL, 0); + rc = onoff_manager_init(&srv, &null_transitions); zassert_equal(rc, -EINVAL, "init null transit %d", rc); - rc = onoff_service_init(&srv, start, NULL, NULL, 0); + rc = onoff_manager_init(&srv, &start_transitions); zassert_equal(rc, -EINVAL, "init null stop %d", rc); - rc = onoff_service_init(&srv, NULL, stop, NULL, 0); + rc = onoff_manager_init(&srv, &stop_transitions); zassert_equal(rc, -EINVAL, "init null start %d", rc); - rc = onoff_service_init(&srv, start, stop, NULL, - ONOFF_SERVICE_INTERNAL_BASE); + start_stop_transitions.flags |= ONOFF_FLAG_ONOFF; + rc = onoff_manager_init(&srv, &start_stop_transitions); zassert_equal(rc, -EINVAL, "init bad flags %d", rc); - u32_t flags = ONOFF_SERVICE_START_SLEEPS; - memset(&srv, 0xA5, sizeof(srv)); zassert_false(sys_slist_is_empty(&srv.clients), "slist empty"); - rc = onoff_service_init(&srv, start, stop, reset, flags); + rc = onoff_manager_init(&srv, &all_transitions); zassert_equal(rc, 0, "init good %d", rc); - zassert_equal(srv.start, start, + zassert_equal(srv.transitions->start, start, "init start mismatch"); - zassert_equal(srv.stop, stop, + zassert_equal(srv.transitions->stop, stop, "init stop mismatch"); - zassert_equal(srv.reset, reset, + zassert_equal(srv.transitions->reset, reset, "init reset mismatch"); - zassert_equal(srv.flags, ONOFF_SERVICE_START_SLEEPS, + zassert_equal(srv.flags, ONOFF_START_SLEEPS, "init flags mismatch"); zassert_equal(srv.refs, 0, "init refs mismatch"); @@ -224,7 +260,7 @@ static void test_client_init_validation(void) onoff_client_init_spinwait(&cli); zassert_equal(z_snode_next_peek(&cli.node), NULL, "cli node mismatch"); - zassert_equal(cli.flags, ONOFF_CLIENT_NOTIFY_SPINWAIT, + zassert_equal(cli.notify.flags, SYS_NOTIFY_METHOD_SPINWAIT, "cli spinwait flags"); struct k_poll_signal sig; @@ -233,29 +269,31 @@ static void test_client_init_validation(void) onoff_client_init_signal(&cli, &sig); zassert_equal(z_snode_next_peek(&cli.node), NULL, "cli signal node"); - zassert_equal(cli.flags, ONOFF_CLIENT_NOTIFY_SIGNAL, + zassert_equal(cli.notify.flags, SYS_NOTIFY_METHOD_SIGNAL, "cli signal flags"); - zassert_equal(cli.async.signal, &sig, + zassert_equal(cli.notify.method.signal, &sig, "cli signal async"); memset(&cli, 0xA5, sizeof(cli)); onoff_client_init_callback(&cli, callback, &sig); zassert_equal(z_snode_next_peek(&cli.node), NULL, "cli callback node"); - zassert_equal(cli.flags, ONOFF_CLIENT_NOTIFY_CALLBACK, + zassert_equal(cli.notify.flags, SYS_NOTIFY_METHOD_CALLBACK, "cli callback flags"); - zassert_equal(cli.async.callback.handler, callback, + zassert_equal(cli.notify.method.callback, callback, "cli callback handler"); - zassert_equal(cli.async.callback.user_data, &sig, + zassert_equal(cli.user_data, &sig, "cli callback user_data"); } static void test_validate_args(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; struct k_poll_signal sig; struct onoff_client cli; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, 0); clear_transit(); @@ -263,7 +301,7 @@ static void test_validate_args(void) * release, and reset; test it through the request API. */ - rc = onoff_service_init(&srv, start, stop, NULL, 0); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); @@ -294,7 +332,7 @@ static void test_validate_args(void) "validate req cli flags"); init_spinwait(&cli); - cli.flags = ONOFF_CLIENT_NOTIFY_INVALID; + cli.notify.flags = SYS_NOTIFY_METHOD_COMPLETED; rc = onoff_request(&srv, &cli); zassert_equal(rc, -EINVAL, "validate req cli mode"); @@ -304,7 +342,7 @@ static void test_validate_args(void) zassert_equal(rc, 0, "validate req cli signal: %d", rc); init_notify_sig(&cli, &sig); - cli.async.signal = NULL; + cli.notify.method.signal = NULL; rc = onoff_request(&srv, &cli); zassert_equal(rc, -EINVAL, "validate req cli signal null"); @@ -315,7 +353,7 @@ static void test_validate_args(void) "validate req cli callback"); init_notify_cb(&cli); - cli.async.callback.handler = NULL; + cli.notify.method.callback = NULL; rc = onoff_request(&srv, &cli); zassert_equal(rc, -EINVAL, "validate req cli callback null"); @@ -329,28 +367,44 @@ static void test_validate_args(void) static void test_reset(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; struct k_poll_signal sig; struct onoff_client cli; unsigned int signalled = 0; int result = 0; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, 0); + struct onoff_transitions transitions_with_reset = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, 0); clear_transit(); - rc = onoff_service_init(&srv, start, stop, NULL, 0); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); - rc = onoff_service_reset(&srv, &cli); + + struct onoff_monitor mon = { + .callback = xion_callback, + }; + const struct transition_record *xp = xions; + + rc = onoff_reset(&srv, &cli); zassert_equal(rc, -ENOTSUP, "reset: %d", rc); - rc = onoff_service_init(&srv, start, stop, reset, 0); + rc = onoff_manager_init(&srv, &transitions_with_reset); zassert_equal(rc, 0, "service init"); - rc = onoff_service_reset(&srv, NULL); + rc = onoff_monitor_register(&srv, &mon); + zassert_equal(rc, 0, + "mon reg"); + + rc = onoff_reset(&srv, NULL); zassert_equal(rc, -EINVAL, "rst no cli"); + zassert_equal(xp, xions, + "xion count"); init_spinwait(&spinwait_cli); rc = onoff_request(&srv, &spinwait_cli); @@ -358,16 +412,22 @@ static void test_reset(void) "req ok"); zassert_equal(srv.refs, 1U, "reset req refs: %u", srv.refs); - - - zassert_false(onoff_service_has_error(&srv), + zassert_equal(xions_end, xions + 2, + "xion count"); + zassert_equal(xp->state, ONOFF_STATE_TO_ON, + "xion0"); + ++xp; + zassert_equal(xp->state, ONOFF_STATE_ON, + "xion1"); + ++xp; + + zassert_false(onoff_has_error(&srv), "has error"); reset_state.retval = 57; init_notify_sig(&cli, &sig); - rc = onoff_service_reset(&srv, &cli); + rc = onoff_reset(&srv, &cli); zassert_equal(rc, -EALREADY, "reset: %d", rc); - stop_state.retval = -23; init_notify_sig(&cli, &sig); rc = onoff_release(&srv, &cli); @@ -375,7 +435,7 @@ static void test_reset(void) "rel trigger: %d", rc); zassert_equal(srv.refs, 0U, "reset req refs: %u", srv.refs); - zassert_true(onoff_service_has_error(&srv), + zassert_true(onoff_has_error(&srv), "has error"); zassert_equal(cli_result(&cli), stop_state.retval, "cli result"); @@ -387,28 +447,52 @@ static void test_reset(void) zassert_equal(result, stop_state.retval, "result"); k_poll_signal_reset(&sig); + zassert_equal(xions_end, xions + 4, + "xion count"); + zassert_equal(xp->state, ONOFF_STATE_TO_OFF, + "xion2"); + ++xp; + zassert_equal(xp->state, ONOFF_HAS_ERROR | ONOFF_STATE_OFF, + "xion3"); + zassert_equal(xp->res, stop_state.retval, + "xion3"); + ++xp; reset_state.retval = -59; init_notify_sig(&cli, &sig); - rc = onoff_service_reset(&srv, &cli); + rc = onoff_reset(&srv, &cli); zassert_equal(rc, 0U, "reset: %d", rc); zassert_equal(cli_result(&cli), reset_state.retval, "reset result"); zassert_equal(srv.refs, 0U, "reset req refs: %u", srv.refs); - zassert_true(onoff_service_has_error(&srv), + zassert_true(onoff_has_error(&srv), "has error"); + zassert_equal(xions_end, xions + 5, + "xion count"); + zassert_equal(xp->state, ONOFF_HAS_ERROR | ONOFF_STATE_OFF, + "xion4"); + zassert_equal(xp->res, reset_state.retval, + "xion4"); + ++xp; reset_state.retval = 62; init_notify_sig(&cli, &sig); - rc = onoff_service_reset(&srv, &cli); + rc = onoff_reset(&srv, &cli); zassert_equal(rc, 0U, "reset: %d", rc); zassert_equal(cli_result(&cli), reset_state.retval, "reset result"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); + zassert_equal(xions_end, xions + 6, + "xion count"); + zassert_equal(xp->state, ONOFF_STATE_OFF, + "xion5"); + zassert_equal(xp->res, reset_state.retval, + "xion5"); + ++xp; signalled = 0; result = -1; @@ -420,19 +504,19 @@ static void test_reset(void) zassert_equal(srv.refs, 0U, "reset req refs: %u", srv.refs); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); - rc = onoff_service_init(&srv, start, stop, reset, - ONOFF_SERVICE_RESET_SLEEPS); + transitions_with_reset.flags |= ONOFF_RESET_SLEEPS; + rc = onoff_manager_init(&srv, &transitions_with_reset); zassert_equal(rc, 0, "service init"); start_state.retval = -23; - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); init_spinwait(&spinwait_cli); rc = onoff_request(&srv, &spinwait_cli); - zassert_true(onoff_service_has_error(&srv), + zassert_true(onoff_has_error(&srv), "has error"); struct isr_call_state isr_state = { @@ -456,29 +540,91 @@ static void test_reset(void) "is reset result"); } -static void test_request(void) +static void test_orphan_stop(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, + ONOFF_START_SLEEPS + | ONOFF_STOP_SLEEPS); clear_transit(); - rc = onoff_service_init(&srv, start, stop, reset, 0); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); + start_state.async = true; + start_state.retval = 23; + stop_state.async = true; + + /* Initiate a request, then cancel it, then complete the + * transition to on in a context where we can't initiate a + * transition to off. Result: error, without notification. + */ + + zassert_true(start_state.notify == NULL, + "start not invoked"); init_spinwait(&spinwait_cli); rc = onoff_request(&srv, &spinwait_cli); + zassert_equal(rc, 2, + "start not pending %d", rc); + zassert_false(start_state.notify == NULL, + "start invoked"); + + rc = onoff_cancel(&srv, &spinwait_cli); + zassert_equal(rc, 0, + "start not pending"); + zassert_equal(cli_result(&spinwait_cli), -EAGAIN, + "start completed"); + + zassert_false(onoff_has_error(&srv), + "has error"); + + k_timer_user_data_set(&isr_timer, &start_state); + k_timer_start(&isr_timer, K_MSEC(1), K_NO_WAIT); + rc = k_sem_take(&isr_sync, K_MSEC(10)); + zassert_equal(rc, 0, + "isr sync"); + + zassert_true(onoff_has_error(&srv), + "no error"); +} + +static void test_request(void) +{ + int rc; + struct onoff_manager srv; + struct onoff_client cli; + struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, 0); + + clear_transit(); + start_state.retval = 16; + + rc = onoff_manager_init(&srv, &transitions); + zassert_equal(rc, 0, + "service init"); + + onoff_client_init_callback(&cli, callback, &srv); + rc = onoff_request(&srv, &cli); zassert_true(rc >= 0, "reset req: %d", rc); zassert_equal(srv.refs, 1U, "reset req refs: %u", srv.refs); - zassert_equal(cli_result(&spinwait_cli), 0, - "reset req result: %d", cli_result(&spinwait_cli)); + zassert_equal(cli_result(&cli), start_state.retval, + "reset req result: %d", cli_result(&cli)); + zassert_equal(callback_state, ONOFF_STATE_ON, + "callback state"); + zassert_equal(callback_res, start_state.retval, + "callback res"); + zassert_equal(callback_ud, (void *)&srv, + "callback userdata"); /* Can't reset when no error present. */ init_spinwait(&spinwait_cli); - rc = onoff_service_reset(&srv, &spinwait_cli); + rc = onoff_reset(&srv, &spinwait_cli); zassert_equal(rc, -EALREADY, "reset spin client"); @@ -500,7 +646,7 @@ static void test_request(void) "error release"); zassert_equal(cli_result(&spinwait_cli), stop_state.retval, "error retval"); - zassert_true(onoff_service_has_error(&srv), + zassert_true(onoff_has_error(&srv), "has error"); /* Can't request when error present. */ @@ -516,14 +662,13 @@ static void test_request(void) "rel with error"); struct k_poll_signal sig; - struct onoff_client cli; /* Clear the error */ init_notify_sig(&cli, &sig); - rc = onoff_service_reset(&srv, &cli); + rc = onoff_reset(&srv, &cli); zassert_equal(rc, 0, "reset"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); /* Error on start */ @@ -534,20 +679,20 @@ static void test_request(void) "req with error"); zassert_equal(cli_result(&spinwait_cli), start_state.retval, "req with error"); - zassert_true(onoff_service_has_error(&srv), + zassert_true(onoff_has_error(&srv), "has error"); /* Clear the error */ init_spinwait(&spinwait_cli); - rc = onoff_service_reset(&srv, &spinwait_cli); + rc = onoff_reset(&srv, &spinwait_cli); zassert_equal(rc, 0, "reset"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); /* Diagnose a no-wait delayed start */ - rc = onoff_service_init(&srv, start, stop, reset, - ONOFF_SERVICE_START_SLEEPS); + transitions.flags |= ONOFF_START_SLEEPS; + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); start_state.async = true; @@ -577,11 +722,13 @@ static void test_request(void) static void test_sync(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, 0); clear_transit(); - rc = onoff_service_init(&srv, start, stop, reset, 0); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); @@ -623,11 +770,15 @@ static void test_sync(void) static void test_async(void) { int rc; - struct onoff_service srv; - struct k_poll_signal sig[2]; - struct onoff_client cli[2]; + struct onoff_manager srv; + struct k_poll_signal sig[3]; + struct onoff_client cli[3]; unsigned int signalled = 0; int result = 0; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, + ONOFF_START_SLEEPS + | ONOFF_STOP_SLEEPS); clear_transit(); start_state.async = true; @@ -635,12 +786,19 @@ static void test_async(void) stop_state.async = true; stop_state.retval = 17; - rc = onoff_service_init(&srv, start, stop, reset, - ONOFF_SERVICE_START_SLEEPS - | ONOFF_SERVICE_STOP_SLEEPS); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); + struct onoff_monitor mon = { + .callback = xion_callback, + }; + const struct transition_record *xp = xions; + + rc = onoff_monitor_register(&srv, &mon); + zassert_equal(rc, 0, + "mon reg"); + /* WHITEBOX: request that triggers on returns positive */ init_notify_sig(&cli[0], &sig[0]); rc = onoff_request(&srv, &cli[0]); @@ -651,7 +809,16 @@ static void test_async(void) "cli signalled"); zassert_equal(srv.refs, 0U, "reset req refs: %u", srv.refs); + zassert_equal(xions_end, xions + 1, + "xion count"); + zassert_equal(xp->state, ONOFF_STATE_TO_ON, + "xion0"); + ++xp; + /* Attempts to request when client is active are failures. */ + rc = onoff_request(&srv, &cli[0]); + zassert_equal(rc, -EINVAL, + "re-req accepted"); /* Non-initial request from ISR is OK */ struct onoff_client isrcli; @@ -710,6 +877,13 @@ static void test_async(void) "cli2 result"); zassert_equal(srv.refs, 3U, "reset req refs: %u", srv.refs); + zassert_equal(xions_end, xions + 2, + "xion count %u", (xions_end - xions)); + zassert_equal(xp->state, ONOFF_STATE_ON, + "xion1"); + zassert_equal(xp->res, start_state.retval, + "xion1"); + ++xp; /* Non-final release decrements refs and completes. */ init_notify_sig(&cli[0], &sig[0]); @@ -724,6 +898,8 @@ static void test_async(void) "cli signalled"); zassert_equal(result, 0, "cli result"); + zassert_equal(xions_end, xions + 2, + "xion count %u", (xions_end - xions)); /* Non-final release from ISR is OK */ init_spinwait(&isrcli); @@ -795,14 +971,25 @@ static void test_async(void) zassert_equal(result, stop_state.retval, "cli result"); + zassert_equal(xions_end, xions + 4, + "xion count %u", (xions_end - xions)); + zassert_equal(xp->state, ONOFF_STATE_TO_OFF, + "xion2"); + ++xp; + zassert_equal(xp->state, ONOFF_STATE_TO_ON, + "xion3"); + zassert_equal(xp->res, stop_state.retval, + "xion3"); + ++xp; + /* Release when starting is an error */ - init_notify_sig(&cli[0], &sig[0]); - rc = onoff_release(&srv, &cli[0]); + init_notify_sig(&cli[2], &sig[2]); + rc = onoff_release(&srv, &cli[2]); zassert_equal(rc, -EBUSY, "rel to-off: %d", rc); /* Finalize queued start, gets us to on */ - cli[0].result = 1 + start_state.retval; + cli[0].notify.result = 1 + start_state.retval; zassert_equal(cli_result(&cli[0]), -EAGAIN, "fetch failed"); zassert_false(start_state.notify == NULL, @@ -812,22 +999,31 @@ static void test_async(void) "start notified"); zassert_equal(srv.refs, 1U, "reset rel refs: %u", srv.refs); + zassert_equal(xions_end, xions + 5, + "xion count %u", (xions_end - xions)); + zassert_equal(xp->state, ONOFF_STATE_ON, + "xion4"); + zassert_equal(xp->res, start_state.retval, + "xion4"); + ++xp; } static void test_half_sync(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; struct k_poll_signal sig; struct onoff_client cli; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, + ONOFF_STOP_SLEEPS); clear_transit(); start_state.retval = 23; stop_state.async = true; stop_state.retval = 17; - rc = onoff_service_init(&srv, start, stop, NULL, - ONOFF_SERVICE_STOP_SLEEPS); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); @@ -870,12 +1066,62 @@ static void test_half_sync(void) "restart complete"); } +static void test_active_client(void) +{ + int rc; + struct onoff_manager srv; + struct onoff_client cli[2]; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, + ONOFF_START_SLEEPS + | ONOFF_STOP_SLEEPS); + + clear_transit(); + start_state.async = true; + start_state.retval = -23; + + rc = onoff_manager_init(&srv, &transitions); + zassert_equal(rc, 0, + "service init"); + + init_spinwait(&cli[0]); + rc = onoff_request(&srv, &cli[0]); + zassert_equal(rc, 2, + "req pending"); + + /* Attempts to request when client is active are failures. */ + rc = onoff_request(&srv, &cli[0]); + zassert_equal(rc, -EINVAL, + "re-req accepted"); + + /* Attempts to release when client is active are failures. */ + rc = onoff_release(&srv, &cli[0]); + zassert_equal(rc, -EINVAL, + "re-rel accepted"); + + /* Cache an in-use request to use for validation */ + cli[1] = cli[0]; + + zassert_false(onoff_has_error(&srv), + "has error"); + notify(&start_state); + zassert_true(onoff_has_error(&srv), + "no error"); + rc = onoff_reset(&srv, &cli[1]); + zassert_equal(rc, -EINVAL, + "re-reset accepted"); +} + static void test_cancel_request_waits(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; struct k_poll_signal sig; struct onoff_client cli; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, + ONOFF_START_SLEEPS + | ONOFF_STOP_SLEEPS); clear_transit(); start_state.async = true; @@ -883,12 +1129,18 @@ static void test_cancel_request_waits(void) stop_state.async = true; stop_state.retval = 31; - rc = onoff_service_init(&srv, start, stop, NULL, - ONOFF_SERVICE_START_SLEEPS - | ONOFF_SERVICE_STOP_SLEEPS); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); + struct onoff_monitor mon = { + .callback = xion_callback, + }; + + rc = onoff_monitor_register(&srv, &mon); + zassert_equal(rc, 0, + "mon reg"); + init_notify_sig(&cli, &sig); rc = onoff_request(&srv, &cli); zassert_true(rc > 0, @@ -911,75 +1163,61 @@ static void test_cancel_request_waits(void) rc = onoff_cancel(&srv, &cli); zassert_equal(rc, 0, "cancel failed: %d", rc); - zassert_equal(cli_result(&cli), -ECANCELED, + zassert_equal(cli_result(&cli), -EAGAIN, "cancel notified"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); - /* Not allowed to cancel the last pending start. - */ + /* Allowed to cancel the last pending start. */ rc = onoff_cancel(&srv, &spinwait_cli); - zassert_equal(rc, -EWOULDBLOCK, + zassert_equal(rc, 0, "last cancel", rc); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); zassert_equal(cli_result(&spinwait_cli), -EAGAIN, "last request"); + /* When start completes a stop will be synthesized. */ + zassert_true(stop_state.notify == NULL, + "stop pending"); notify(&start_state); - zassert_equal(cli_result(&spinwait_cli), start_state.retval, - "last request"); - zassert_false(onoff_service_has_error(&srv), - "has error"); - - - /* Issue a stop, then confirm that you can request and cancel - * a restart. - */ - init_spinwait(&cli); - rc = onoff_release(&srv, &cli); - zassert_equal(rc, 2, /* WHITEBOX stop pending */ - "stop pending, %d", rc); - zassert_equal(cli_result(&cli), -EAGAIN, - "stop pending"); - - init_spinwait(&spinwait_cli); - rc = onoff_request(&srv, &spinwait_cli); - zassert_equal(rc, 3, /* WHITEBOX restart pending */ - "restart pending"); - - rc = onoff_cancel(&srv, &spinwait_cli); - zassert_equal(rc, 0, - "restart cancel"); - zassert_equal(cli_result(&spinwait_cli), -ECANCELED, - "restart cancel"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); + zassert_false(stop_state.notify == NULL, + "stop not pending"); - zassert_equal(cli_result(&cli), -EAGAIN, - "stop pending"); - + /* Nobody's around to hear the stop completion... */ notify(&stop_state); - zassert_equal(cli_result(&cli), stop_state.retval, - "released"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); + + /* ...except the monitor */ + zassert_equal(xions_end - xions, 3, + "xion count"); + zassert_equal(xions[0].state, ONOFF_STATE_TO_ON, + "xion0"); + zassert_equal(xions[1].state, ONOFF_STATE_ON, + "xion1"); + zassert_equal(xions[2].state, ONOFF_STATE_OFF, + "xion2"); } static void test_cancel_request_ok(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; struct k_poll_signal sig; struct onoff_client cli; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, + ONOFF_START_SLEEPS); clear_transit(); start_state.async = true; start_state.retval = 14; stop_state.retval = 31; - rc = onoff_service_init(&srv, start, stop, NULL, - ONOFF_SERVICE_START_SLEEPS); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); @@ -990,21 +1228,22 @@ static void test_cancel_request_ok(void) zassert_false(start_state.notify == NULL, "start pending"); - /* You can't cancel the last start request */ + /* You can cancel the last start request */ rc = onoff_cancel(&srv, &cli); - zassert_equal(rc, -EWOULDBLOCK, + zassert_equal(rc, 0, "cancel"); + zassert_equal(cli_result(&cli), -EAGAIN, + "cancel notified"); + zassert_equal(srv.refs, 0, "refs empty"); notify(&start_state); - zassert_equal(srv.refs, 1, + zassert_equal(srv.refs, 0, "refs"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); - zassert_equal(cli_result(&cli), start_state.retval, - "cancel notified"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); /* You can "cancel" an request that isn't active */ @@ -1022,11 +1261,15 @@ static void test_cancel_request_ok(void) static void test_blocked_restart(void) { int rc; - struct onoff_service srv; + struct onoff_manager srv; unsigned int signalled = 0; int result; struct k_poll_signal sig[2]; struct onoff_client cli[2]; + const struct onoff_transitions transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, + ONOFF_START_SLEEPS + | ONOFF_STOP_SLEEPS); clear_transit(); start_state.async = true; @@ -1034,9 +1277,7 @@ static void test_blocked_restart(void) stop_state.async = true; stop_state.retval = 31; - rc = onoff_service_init(&srv, start, stop, NULL, - ONOFF_SERVICE_START_SLEEPS - | ONOFF_SERVICE_STOP_SLEEPS); + rc = onoff_manager_init(&srv, &transitions); zassert_equal(rc, 0, "service init"); @@ -1083,7 +1324,7 @@ static void test_blocked_restart(void) "isr sync"); /* Fail-to-restart is not an error */ - zassert_false(onoff_service_has_error(&srv), + zassert_false(onoff_has_error(&srv), "has error"); k_poll_signal_check(&sig[0], &signalled, &result); @@ -1101,19 +1342,17 @@ static void test_blocked_restart(void) static void test_cancel_release(void) { + static const struct onoff_transitions srv_transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL, + ONOFF_STOP_SLEEPS); + struct onoff_manager srv = ONOFF_MANAGER_INITIALIZER(&srv_transitions); int rc; - struct onoff_service srv; clear_transit(); start_state.retval = 16; stop_state.async = true; stop_state.retval = 94; - rc = onoff_service_init(&srv, start, stop, NULL, - ONOFF_SERVICE_STOP_SLEEPS); - zassert_equal(rc, 0, - "service init"); - init_spinwait(&spinwait_cli); rc = onoff_request(&srv, &spinwait_cli); zassert_true(rc > 0, @@ -1130,18 +1369,133 @@ static void test_cancel_release(void) zassert_equal(cli_result(&spinwait_cli), -EAGAIN, "release pending"); - /* You can't cancel a stop request. */ + /* You can cancel a stop request, but it becomes a start + * request. + */ rc = onoff_cancel(&srv, &spinwait_cli); - zassert_equal(rc, -EWOULDBLOCK, - "cancel succeeded"); - zassert_false(onoff_service_has_error(&srv), + zassert_equal(rc, 1, + "cancel succeeded, start pending"); + zassert_false(onoff_has_error(&srv), "has error"); + /* The stop initiates a restart, does not notify client (which + * is now a request) + */ + start_state.async = true; + zassert_true(start_state.notify == NULL, + "start pending"); notify(&stop_state); - zassert_equal(cli_result(&spinwait_cli), stop_state.retval, - "release pending"); - zassert_false(onoff_service_has_error(&srv), + zassert_false(start_state.notify == NULL, + "start not pending"); + zassert_equal(cli_result(&spinwait_cli), -EAGAIN, + "restart pending"); + + notify(&start_state); + + zassert_equal(cli_result(&spinwait_cli), start_state.retval, + "restart finished"); + zassert_false(onoff_has_error(&srv), + "has error"); +} + +static void test_cancel_reset(void) +{ + static const struct onoff_transitions srv_transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, + ONOFF_RESET_SLEEPS); + struct onoff_manager srv = ONOFF_MANAGER_INITIALIZER(&srv_transitions); + int rc; + + clear_transit(); + start_state.retval = -16; + reset_state.async = true; + + zassert_false(onoff_has_error(&srv), "has error"); + init_spinwait(&spinwait_cli); + rc = onoff_request(&srv, &spinwait_cli); + zassert_true(onoff_has_error(&srv), + "no error"); + zassert_equal(cli_result(&spinwait_cli), start_state.retval, + "start error"); + + init_spinwait(&spinwait_cli); + rc = onoff_reset(&srv, &spinwait_cli); + zassert_equal(rc, 0, + "reset %d", rc); + zassert_equal(cli_result(&spinwait_cli), -EAGAIN, + "reset done"); + + rc = onoff_cancel(&srv, &spinwait_cli); + zassert_equal(rc, 0, + "reset not cancelled"); + zassert_equal(cli_result(&spinwait_cli), -EAGAIN, + "reset done"); + + zassert_true(onoff_has_error(&srv), + "no error"); + notify(&reset_state); + zassert_false(onoff_has_error(&srv), + "error"); +} + +static void test_monitor(void) +{ + static const struct onoff_transitions srv_transitions = + ONOFF_TRANSITIONS_INITIALIZER(start, stop, reset, + ONOFF_RESET_SLEEPS); + struct onoff_manager srv = ONOFF_MANAGER_INITIALIZER(&srv_transitions); + int rc; + + clear_transit(); + + struct onoff_monitor mon = { + .callback = xion_callback, + }; + const struct transition_record *xp = xions; + + rc = onoff_monitor_register(NULL, &mon); + zassert_equal(rc, -EINVAL, + "mgr validation"); + rc = onoff_monitor_register(&srv, NULL); + zassert_equal(rc, -EINVAL, + "mon validation"); + rc = onoff_monitor_unregister(NULL, &mon); + zassert_equal(rc, -EINVAL, + "mgr validation"); + rc = onoff_monitor_unregister(&srv, NULL); + zassert_equal(rc, -EINVAL, + "mon validation"); + rc = onoff_monitor_register(&srv, &mon); + zassert_equal(rc, 0, + "mgr register"); + + init_spinwait(&spinwait_cli); + rc = onoff_request(&srv, &spinwait_cli); + zassert_true(rc, 0, + "request"); + zassert_equal(xions_end, xions + 2, + "xion count"); + zassert_equal(xp->state, ONOFF_STATE_TO_ON, + "xion0.state"); + ++xp; + zassert_equal(xp->state, ONOFF_STATE_ON, + "xion1.state"); + + rc = onoff_monitor_unregister(&srv, &mon); + zassert_equal(rc, 0, + "mgr unregister"); + rc = onoff_monitor_unregister(&srv, &mon); + zassert_equal(rc, -EINVAL, + "mgr dup unregister"); + + init_spinwait(&spinwait_cli); + rc = onoff_release(&srv, &spinwait_cli); + zassert_true(rc, 0, + "release"); + + zassert_equal(xions_end, xions + 2, + "xion count"); } void test_main(void) @@ -1158,9 +1512,13 @@ void test_main(void) ztest_unit_test(test_sync), ztest_unit_test(test_async), ztest_unit_test(test_half_sync), + ztest_unit_test(test_active_client), ztest_unit_test(test_cancel_request_waits), ztest_unit_test(test_cancel_request_ok), ztest_unit_test(test_blocked_restart), - ztest_unit_test(test_cancel_release)); + ztest_unit_test(test_cancel_release), + ztest_unit_test(test_orphan_stop), + ztest_unit_test(test_cancel_reset), + ztest_unit_test(test_monitor)); ztest_run_test_suite(onoff_api); } diff --git a/tests/lib/onoff_notifier/CMakeLists.txt b/tests/lib/onoff_notifier/CMakeLists.txt new file mode 100644 index 0000000000000..21fc14029caf6 --- /dev/null +++ b/tests/lib/onoff_notifier/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.13.1) +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(onoff_notifier) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/lib/onoff_notifier/prj.conf b/tests/lib/onoff_notifier/prj.conf new file mode 100644 index 0000000000000..1948aaa649a4e --- /dev/null +++ b/tests/lib/onoff_notifier/prj.conf @@ -0,0 +1,2 @@ +CONFIG_POLL=y +CONFIG_ZTEST=y diff --git a/tests/lib/onoff_notifier/src/main.c b/tests/lib/onoff_notifier/src/main.c new file mode 100644 index 0000000000000..14cfecb79c9ce --- /dev/null +++ b/tests/lib/onoff_notifier/src/main.c @@ -0,0 +1,645 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include + +static int onoff_status[10]; +static unsigned int num_onoff; + +static int notify_status[10]; +static unsigned int num_notify; + +struct service { + struct onoff_manager onoff; + onoff_notify_fn notify; + int request_rv; + int release_rv; + int reset_rv; + bool async; + bool active; +}; + +static struct service service; +static struct onoff_notifier notifier; + +static void notify_onoff(onoff_notify_fn notify, + int status) +{ + __ASSERT_NO_MSG(num_onoff < ARRAY_SIZE(onoff_status)); + printk("onoff notify %d\n", status); + onoff_status[num_onoff++] = status; + notify(&service.onoff, status); +} + +static void settle_onoff(int res, + bool request) +{ + onoff_notify_fn notify = service.notify; + + __ASSERT_NO_MSG(notify != NULL); + service.notify = NULL; + service.active = (request && (res >= 0)); + notify_onoff(notify, res); +} + +static void basic_start(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async) { + __ASSERT_NO_MSG(sp->notify == NULL); + sp->notify = notify; + } else { + sp->active = (sp->request_rv >= 0); + notify_onoff(notify, sp->request_rv); + } +} + +static void basic_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async) { + __ASSERT_NO_MSG(sp->notify == NULL); + sp->notify = notify; + } else { + sp->active = false; + notify_onoff(notify, sp->release_rv); + } +} + +static void basic_reset(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async) { + __ASSERT_NO_MSG(sp->notify == NULL); + sp->notify = notify; + } else { + sp->active = false; + notify_onoff(notify, sp->reset_rv); + } +} + +static struct onoff_transitions const transitions = { + .start = basic_start, + .stop = basic_stop, + .reset = basic_reset, +}; + +static void notify_callback(struct onoff_notifier *np, + int status) +{ + __ASSERT_NO_MSG(num_notify < ARRAY_SIZE(notify_status)); + notify_status[num_notify++] = status; +} + +static void reset_service(void) +{ + memset(onoff_status, 0, sizeof(onoff_status)); + num_onoff = 0; + memset(notify_status, 0, sizeof(notify_status)); + num_notify = 0; + + service = (struct service){ + .onoff = ONOFF_MANAGER_INITIALIZER(&transitions), + }; + + notifier = (struct onoff_notifier) + ONOFF_NOTIFIER_INITIALIZER(&service.onoff, notify_callback); +} + + +static void replace_service_onoff(struct onoff_transitions *transitions) +{ + service.onoff.transitions = transitions; +} + +static void test_basic(void) +{ + int rc; + + reset_service(); + + zassert_false(service.active, + "unexp active"); + + /* Immediate success expected */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 1, + "request failed"); + zassert_equal(num_onoff, 1, + "onoff not invoked"); + zassert_true(service.active, + "not active"); + zassert_equal(num_notify, 1, + "req not notified"); + zassert_equal(notify_status[0], 1, + "notification not on"); + + /* No-effect error to re-request */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EALREADY, + "re-request failure"); + + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "release failed"); + zassert_false(service.active, + "still active"); + zassert_equal(num_notify, 2, + "rel not notified"); + zassert_equal(notify_status[1], 0, + "notification on"); + + /* No-effect error to re-release */ + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, -EALREADY, + "re-release failure"); +} + +static void test_failed_request(void) +{ + int rc; + + reset_service(); + service.request_rv = -23; + + zassert_false(service.active, + "unexp active"); + + /* Immediate failure expected */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request failed: %d", rc); + zassert_equal(num_onoff, 1, + "onoff not invoked"); + zassert_false(service.active, + "active"); + + /* Failures are persistent until service reset. */ + service.request_rv = 0; + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request failed: %d", rc); +} + +static void test_async(void) +{ + int rc; + + reset_service(); + service.async = true; + + zassert_false(service.active, + "unexp active"); + + /* No immediate success */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + zassert_equal(num_onoff, 0, + "onoff premature"); + zassert_false(service.active, + "unexp active"); + zassert_equal(num_notify, 0, + "notify premature"); + + /* Re-invocation at this point has no effect */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + + settle_onoff(service.request_rv, true); + zassert_equal(num_onoff, 1, + "onoff premature"); + zassert_true(service.active, + "unexp inactive"); + zassert_equal(num_notify, 1, + "notify premature"); + + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "release failed: %d", rc); + zassert_equal(num_onoff, 1, + "onoff premature"); + + settle_onoff(service.request_rv, false); + + zassert_false(service.active, + "still active"); + zassert_equal(num_notify, 2, + "rel not notified"); + zassert_equal(notify_status[1], 0, + "notification on"); +} + +static void test_cancelled_request(void) +{ + int rc; + + reset_service(); + service.async = true; + + zassert_false(service.active, + "unexp active"); + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + zassert_equal(num_onoff, 0, + "onoff premature"); + zassert_false(service.active, + "unexp active"); + zassert_equal(num_notify, 0, + "notify premature"); + + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + zassert_equal(num_onoff, 0, + "onoff premature"); + zassert_false(service.active, + "unexp active"); + zassert_equal(num_notify, 0, + "notify premature"); + + /* Complete the initial request */ + settle_onoff(0, true); + zassert_equal(num_onoff, 1, + "on not complete"); + zassert_equal(num_notify, 0, + "notify premature"); + zassert_not_equal(service.notify, NULL, + "stop transition not invoked"); + + /* Complete the synthesized cancellation. We should get one + * notification that the service is off. + */ + settle_onoff(0, false); + zassert_equal(num_onoff, 2, + "off not complete"); + zassert_equal(num_notify, 1, + "notify not received"); + zassert_equal(notify_status[0], 0, + "notification on"); +} + +static void test_bicancelled_request(void) +{ + int rc; + + reset_service(); + service.async = true; + + zassert_false(service.active, + "unexp active"); + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + zassert_equal(num_onoff, 0, + "onoff premature"); + zassert_false(service.active, + "unexp active"); + zassert_equal(num_notify, 0, + "notify premature"); + + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + zassert_equal(num_onoff, 0, + "onoff premature"); + zassert_false(service.active, + "unexp active"); + zassert_equal(num_notify, 0, + "notify premature"); + + /* Issue a request which cancels the pending release */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not incomplete"); + zassert_equal(num_onoff, 0, + "onoff premature"); + zassert_false(service.active, + "unexp active"); + zassert_equal(num_notify, 0, + "notify premature"); + + /* Complete the initial request. The intermediary release was + * cancelled before it could be initiated. + */ + settle_onoff(0, true); + zassert_equal(num_onoff, 1, + "on not complete"); + zassert_equal(num_notify, 1, + "notify premature"); + zassert_equal(notify_status[0], 1, + "notification on"); + zassert_equal(service.notify, NULL, + "stop transition queued"); +} + +static void test_cancelled_release(void) +{ + int rc; + + reset_service(); + + zassert_false(service.active, + "unexp active"); + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 1, + "request not complete"); + zassert_equal(num_onoff, 1, + "onoff failed"); + zassert_equal(num_notify, 1, + "notify failed"); + zassert_equal(notify_status[0], 1, + "notify failed"); + zassert_true(service.active, + "exp active"); + + service.async = true; + + /* Issue a release, which will block. */ + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "request complete"); + zassert_equal(num_onoff, 1, + "onoff premature"); + zassert_equal(num_notify, 1, + "notify premature"); + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not complete"); + + /* Complete the initial release */ + settle_onoff(0, false); + zassert_equal(num_onoff, 2, + "on not complete"); + zassert_equal(num_notify, 1, + "notify premature"); + zassert_not_equal(service.notify, NULL, + "stop transition not invoked"); + + /* Complete the synthesized request */ + settle_onoff(0, true); + zassert_equal(num_onoff, 3, + "off not complete"); + zassert_equal(num_notify, 2, + "notify not received"); + zassert_equal(notify_status[0], 1, + "notification"); + zassert_equal(notify_status[1], 1, + "renotification"); +} + +static void test_bicancelled_release(void) +{ + int rc; + + reset_service(); + + zassert_false(service.active, + "unexp active"); + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 1, + "request not complete"); + zassert_equal(num_onoff, 1, + "onoff failed"); + zassert_equal(num_notify, 1, + "notify failed"); + zassert_equal(notify_status[0], 1, + "notify failed"); + zassert_true(service.active, + "exp active"); + + service.async = true; + + /* Issue a release, which will block. */ + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "request complete"); + zassert_equal(num_onoff, 1, + "onoff premature"); + zassert_equal(num_notify, 1, + "notify premature"); + + /* Issue a request to cancel the release */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, 0, + "request not complete"); + + /* Issue a second release to cancel the pending request */ + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, 0, + "request not complete"); + zassert_equal(num_onoff, 1, + "onoff premature"); + zassert_equal(num_notify, 1, + "notify premature"); + + /* Complete the initial release */ + settle_onoff(0, false); + zassert_equal(num_onoff, 2, + "on not complete"); + zassert_equal(num_notify, 2, + "notify ok"); + zassert_equal(service.notify, NULL, + "start transition pending"); + zassert_equal(notify_status[1], 0, + "notify failed"); +} + +static void test_basic_reset(void) +{ + int rc; + + reset_service(); + service.request_rv = -23; + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request error"); + zassert_equal(num_notify, 1, + "notify wrong"); + zassert_equal(notify_status[0], service.request_rv, + "notify status wrong"); + + /* Non-reset operations in an error state produce an error. */ + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, -EIO, + "release check"); + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request check"); + + rc = onoff_notifier_reset(¬ifier); + zassert_equal(rc, 0, + "reset unsupported failed: %d", rc); + zassert_equal(num_notify, 2, + "notify wrong: %d", num_notify); + zassert_equal(notify_status[1], 0, + "reset failed"); + + /* Re-reset is rejected */ + rc = onoff_notifier_reset(¬ifier); + zassert_equal(rc, -EALREADY, + "re-reset failed"); + +} + +static void test_unsupported_reset(void) +{ + int rc; + + reset_service(); + + struct onoff_transitions reset_transitions = *service.onoff.transitions; + + reset_transitions.reset = NULL; + replace_service_onoff(&reset_transitions); + service.request_rv = -23; + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request error"); + zassert_equal(num_notify, 1, + "notify wrong"); + zassert_equal(notify_status[0], service.request_rv, + "notify status wrong"); + + /* Non-reset operations in an error state produce an error. */ + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, -EIO, + "release check"); + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request check"); + + /* Reset fails if service can't be reset */ + rc = onoff_notifier_reset(¬ifier); + zassert_equal(rc, -EIO, + "reset unsupported failed"); + zassert_equal(num_notify, 2, + "notify wrong"); + zassert_equal(notify_status[1], -ENOTSUP, + "reset status wrong: %d", notify_status[1]); +} + +static void test_already_reset(void) +{ + int rc; + + reset_service(); + service.request_rv = -23; + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request error"); + zassert_equal(num_notify, 1, + "notify wrong"); + zassert_equal(notify_status[0], service.request_rv, + "notify status wrong"); + + zassert_true(onoff_has_error(&service.onoff), + "no error?"); + + /* Clear the underlying error as if from another process */ + struct onoff_client cli; + + onoff_client_init_spinwait(&cli); + rc = onoff_reset(&service.onoff, &cli); + zassert_equal(rc, 0, + "reset failed"); + zassert_false(onoff_has_error(&service.onoff), + "no error?"); + + onoff_client_init_spinwait(&cli); + rc = onoff_reset(&service.onoff, &cli); + zassert_equal(rc, -EALREADY, + "re-reset succeeded"); + + /* Notifier reset should still succeed. */ + rc = onoff_notifier_reset(¬ifier); + zassert_equal(rc, 0, + "request error"); + zassert_equal(num_notify, 2, + "notify wrong"); + zassert_equal(notify_status[1], 0, + "notify status wrong"); +} + +static void test_async_reset(void) +{ + int rc; + + reset_service(); + service.request_rv = -23; + + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EIO, + "request error"); + zassert_equal(num_notify, 1, + "notify wrong"); + zassert_equal(notify_status[0], service.request_rv, + "notify status wrong"); + + zassert_true(onoff_has_error(&service.onoff), + "no error?"); + + service.async = true; + + /* Notifier reset should be acceptable. */ + rc = onoff_notifier_reset(¬ifier); + zassert_equal(rc, 0, + "request error"); + zassert_equal(num_notify, 1, + "notify wrong"); + + /* Other operations should be rejected while reset is + * unresolved. + */ + rc = onoff_notifier_request(¬ifier); + zassert_equal(rc, -EWOULDBLOCK, + "request failed"); + rc = onoff_notifier_release(¬ifier); + zassert_equal(rc, -EWOULDBLOCK, + "release failed"); + + settle_onoff(0, false); + zassert_equal(num_notify, 2, + "notify wrong"); + zassert_equal(notify_status[1], 0, + "notify status wrong"); +} + +void test_main(void) +{ + ztest_test_suite(onoff_notifier_api, + ztest_unit_test(test_basic), + ztest_unit_test(test_async), + ztest_unit_test(test_failed_request), + ztest_unit_test(test_cancelled_request), + ztest_unit_test(test_bicancelled_request), + ztest_unit_test(test_cancelled_release), + ztest_unit_test(test_bicancelled_release), + ztest_unit_test(test_basic_reset), + ztest_unit_test(test_unsupported_reset), + ztest_unit_test(test_already_reset), + ztest_unit_test(test_async_reset) + ); + ztest_run_test_suite(onoff_notifier_api); +} diff --git a/tests/lib/onoff_notifier/testcase.yaml b/tests/lib/onoff_notifier/testcase.yaml new file mode 100644 index 0000000000000..cc2cee71a2a9a --- /dev/null +++ b/tests/lib/onoff_notifier/testcase.yaml @@ -0,0 +1,3 @@ +tests: + libraries.onoff_notifier: + tags: onoff diff --git a/tests/lib/queued_operation/CMakeLists.txt b/tests/lib/queued_operation/CMakeLists.txt new file mode 100644 index 0000000000000..90b0fb17ea108 --- /dev/null +++ b/tests/lib/queued_operation/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.13.1) +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(queued_operation) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/lib/queued_operation/prj.conf b/tests/lib/queued_operation/prj.conf new file mode 100644 index 0000000000000..b008d47339881 --- /dev/null +++ b/tests/lib/queued_operation/prj.conf @@ -0,0 +1,3 @@ +CONFIG_POLL=y +CONFIG_ZTEST=y +CONFIG_ZTEST_STACKSIZE=2048 diff --git a/tests/lib/queued_operation/src/main.c b/tests/lib/queued_operation/src/main.c new file mode 100644 index 0000000000000..c81d3faf3933d --- /dev/null +++ b/tests/lib/queued_operation/src/main.c @@ -0,0 +1,982 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +struct service; + +struct operation { + struct queued_operation operation; + void (*callback)(struct service *sp, + struct operation *op, + void *ud); + void *user_data; +}; + +struct service { + /* State of the manager */ + struct queued_operation_manager manager; + + /* State for an on-off service optionally used by the manager. */ + struct onoff_manager onoff; + + /* Value to return from basic_request handler. */ + int onoff_request_rv; + + /* Value to return from basic_release handler. */ + int onoff_release_rv; + + /* Notifier to use when async_onoff is set. */ + onoff_notify_fn onoff_notify; + /* The current operation cast for this service type. Null if + * service is idle. + */ + struct operation *current; + + /* Value to return from service_impl_validate() */ + int validate_rv; + + /* Value to return from service_impl_validate() + * + * This is incremented before each synchronous finalization by + * service_impl_callback. + */ + int process_rv; + + /* Parameters passed to test_callback */ + struct operation *callback_op; + int callback_res; + + /* Count of process submissions since reset. */ + size_t process_cnt; + + /* Test-specific data associated with the service. */ + void *data; + + /* If set defer notification of onoff operation. + * + * The callback to invoke will be stored in onoff_notify. + */ + bool async_onoff; + + /* If set inhibit synchronous completion. */ + bool async; + + /* Set to indicate that the lass process() call provided an + * operation. + */ + bool active; +}; + +static void basic_start(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async_onoff) { + __ASSERT_NO_MSG(sp->onoff_notify == NULL); + sp->onoff_notify = notify; + } else { + sp->active = sp->onoff_request_rv >= 0; + notify(mp, sp->onoff_request_rv); + } +} + +static void basic_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async_onoff) { + __ASSERT_NO_MSG(sp->onoff_notify == NULL); + sp->onoff_notify = notify; + } else { + sp->active = false; + notify(mp, sp->onoff_release_rv); + } +} + +static struct onoff_transitions const basic_onoff_transitions = { + .start = basic_start, + .stop = basic_stop, + .flags = 0, +}; + +typedef void (*service_callback)(struct service *sp, + struct operation *op, + int res); + +static void test_callback(struct service *sp, + struct operation *op, + int res) +{ + sp->callback_op = op; + sp->callback_res = res; + if (op->callback) { + op->callback(sp, op, op->user_data); + } +} + +static inline void operation_init_spinwait(struct operation *op) +{ + *op = (struct operation){}; + sys_notify_init_spinwait(&op->operation.notify); +} + +static inline void operation_init_signal(struct operation *op, + struct k_poll_signal *sigp) +{ + *op = (struct operation){}; + sys_notify_init_signal(&op->operation.notify, sigp); +} + +static inline void operation_init_callback(struct operation *op, + service_callback handler) +{ + *op = (struct operation){}; + sys_notify_init_callback(&op->operation.notify, + (sys_notify_generic_callback)handler); +} + +static int service_submit(struct service *sp, + struct operation *op, + int priority) +{ + return queued_operation_submit(&sp->manager, &op->operation, priority); +} + +static int service_cancel(struct service *sp, + struct operation *op) +{ + return queued_operation_cancel(&sp->manager, &op->operation); +} + +static int service_impl_validate(struct queued_operation_manager *mgr, + struct queued_operation *op) +{ + struct service *sp = CONTAINER_OF(mgr, struct service, manager); + + return sp->validate_rv; +} + +static void service_impl_callback(struct queued_operation_manager *mgr, + struct queued_operation *op, + sys_notify_generic_callback cb) +{ + service_callback handler = (service_callback)cb; + struct service *sp = CONTAINER_OF(mgr, struct service, manager); + struct operation *sop = CONTAINER_OF(op, struct operation, operation); + int res = -EINPROGRESS; + + zassert_equal(queued_operation_fetch_result(op, &res), 0, + "callback before finalized"); + handler(sp, sop, res); +} + +/* Split out finalization to support async testing. */ +static void service_finalize(struct service *sp, + int res) +{ + struct queued_operation *op = &sp->current->operation; + + sp->current = NULL; + (void)op; + queued_operation_finalize(&sp->manager, res); +} + +static void service_impl_process(struct queued_operation_manager *mgr, + struct queued_operation *op) +{ + struct service *sp = CONTAINER_OF(mgr, struct service, manager); + + zassert_equal(sp->current, NULL, + "process collision"); + + sp->process_cnt++; + sp->active = (op != NULL); + if (sp->active) { + sp->current = CONTAINER_OF(op, struct operation, operation); + if (!sp->async) { + service_finalize(sp, ++sp->process_rv); + } + } +} + +static struct queued_operation_functions const service_vtable = { + .validate = service_impl_validate, + .callback = service_impl_callback, + .process = service_impl_process, +}; +/* Live copy, mutated for testing. */ +static struct queued_operation_functions vtable; + +static struct service service = { + .manager = QUEUED_OPERATION_MANAGER_INITIALIZER(&vtable, + &service.onoff), + .onoff = { + .transitions = &basic_onoff_transitions, + }, +}; + +static void service_onoff_notify(int res) +{ + onoff_notify_fn notify = service.onoff_notify; + + __ASSERT_NO_MSG(notify != NULL); + service.onoff_notify = NULL; + + notify(&service.onoff, res); +} + +static void reset_service(bool onoff) +{ + vtable = service_vtable; + service = (struct service){ + .manager = QUEUED_OPERATION_MANAGER_INITIALIZER(&vtable, + &service.onoff), + .onoff = { + .transitions = &basic_onoff_transitions, + }, + }; + + if (!onoff) { + service.manager.onoff = NULL; + } +} + +static void replace_service_onoff(struct onoff_transitions *transitions) +{ + service.onoff.transitions = transitions; +} + +static void test_notification_spinwait(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(true); + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait failed: %d != %d", rc, + service.validate_rv); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait result"); + + zassert_false(service.active, "service not idled"); +} + +static void test_notification_signal(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + struct k_poll_signal sig; + unsigned int signaled; + int res = 0; + int rc = 0; + + reset_service(false); + + k_poll_signal_init(&sig); + operation_init_signal(op, &sig); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed signal unfinalized"); + k_poll_signal_check(&sig, &signaled, &res); + zassert_equal(signaled, 0, + "failed signal unsignaled"); + + service.process_rv = 23; + rc = service_submit(&service, op, 0); + zassert_equal(rc, 0, + "submit signal failed: %d", rc); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed signal fetch"); + zassert_equal(res, service.process_rv, + "failed signal result"); + k_poll_signal_check(&sig, &signaled, &res); + zassert_equal(signaled, 1, + "failed signal signaled"); + zassert_equal(res, service.process_rv, + "failed signal signal result"); +} + +static void test_notification_callback(void) +{ + struct operation operation; + struct operation *op = &operation; + struct service *sp = &service; + struct sys_notify *np = &op->operation.notify; + struct k_poll_signal sig; + int res = 0; + int rc = 0; + + reset_service(false); + + k_poll_signal_init(&sig); + operation_init_callback(op, test_callback); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed callback unfinalized"); + zassert_equal(sp->callback_op, NULL, + "failed callback pre-check"); + + service.process_rv = 142; + rc = service_submit(&service, op, 0); + zassert_equal(rc, 0, + "submit callback failed: %d", rc); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed callback fetch"); + zassert_equal(res, service.process_rv, + "failed callback result"); + zassert_equal(sp->callback_op, op, + "failed callback captured op"); + zassert_equal(sp->callback_res, service.process_rv, + "failed callback captured res"); +} + +struct pri_order { + int priority; + size_t ordinal; +}; + +static void test_sync_priority(void) +{ + struct pri_order const pri_order[] = { + { 0, 0 }, /* first because it gets grabbed when submitted */ + /* rest in FIFO within priority */ + { -1, 2 }, + { 1, 4 }, + { -2, 1 }, + { 2, 6 }, + { 1, 5 }, + { 0, 3 }, + }; + struct operation operation[ARRAY_SIZE(pri_order)]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = -EINPROGRESS; + int rc; + + /* Reset the service, and tell it to not finalize operations + * synchronously (so we can build up a queue). + */ + reset_service(false); + service.async = true; + + for (u32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + operation_init_spinwait(&operation[i]); + np[i] = &operation[i].operation.notify; + rc = service_submit(&service, &operation[i], + pri_order[i].priority); + zassert_equal(rc, 0, + "submit op%u failed: %d", i, rc); + zassert_equal(sys_notify_fetch_result(np[i], &res), -EAGAIN, + "op%u finalized!", i); + } + + zassert_equal(service.current, &operation[0], + "submit op0 didn't process"); + + /* Enable synchronous finalization and kick off the first + * entry. All the others will execute immediately. + */ + service.async = false; + service_finalize(&service, service.process_rv); + + for (u32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + size_t ordinal = pri_order[i].ordinal; + + zassert_equal(sys_notify_fetch_result(np[i], &res), 0, + "op%u unfinalized", i); + zassert_equal(res, ordinal, + "op%u wrong order: %d != %u", i, res, ordinal); + } +} + +static void test_special_priority(void) +{ + struct pri_order const pri_order[] = { + { 0, 0 }, /* first because it gets grabbed when submitted */ + /* rest gets tricky */ + { QUEUED_OPERATION_PRIORITY_APPEND, 3 }, + { INT8_MAX, 4 }, + { INT8_MIN, 2 }, + { QUEUED_OPERATION_PRIORITY_PREPEND, 1 }, + { QUEUED_OPERATION_PRIORITY_APPEND, 5 }, + }; + struct operation operation[ARRAY_SIZE(pri_order)]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = -EINPROGRESS; + int rc; + + /* Reset the service, and tell it to not finalize operations + * synchronously (so we can build up a queue). + */ + reset_service(false); + service.async = true; + + for (u32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + operation_init_spinwait(&operation[i]); + np[i] = &operation[i].operation.notify; + rc = service_submit(&service, &operation[i], + pri_order[i].priority); + zassert_equal(rc, 0, + "submit op%u failed: %d", i, rc); + zassert_equal(sys_notify_fetch_result(np[i], &res), -EAGAIN, + "op%u finalized!", i); + } + + zassert_equal(service.current, &operation[0], + "submit op0 didn't process"); + + /* Enable synchronous finalization and kick off the first + * entry. All the others will execute immediately. + */ + service.async = false; + service_finalize(&service, service.process_rv); + + for (u32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + size_t ordinal = pri_order[i].ordinal; + + zassert_equal(sys_notify_fetch_result(np[i], &res), 0, + "op%u unfinalized", i); + zassert_equal(res, ordinal, + "op%u wrong order: %d != %u", i, res, ordinal); + } +} + +struct delayed_submit { + struct operation *op; + int priority; +}; + +static void test_delayed_submit(struct service *sp, + struct operation *op, + void *ud) +{ + struct delayed_submit *dsp = ud; + int rc = service_submit(sp, dsp->op, dsp->priority); + + zassert_equal(rc, 0, + "delayed submit failed: %d", rc); +} + +static void test_resubmit_priority(void) +{ + struct pri_order const pri_order[] = { + /* first because it gets grabbed when submitted */ + { 0, 0 }, + /* delayed by submit of higher priority during callback */ + { 0, 2 }, + /* submitted during completion of op0 */ + { -1, 1 }, + }; + size_t di = ARRAY_SIZE(pri_order) - 1; + struct operation operation[ARRAY_SIZE(pri_order)]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = -EINPROGRESS; + int rc; + + /* Queue two operations, but in the callback for the first + * schedule a third operation that has higher priority. + */ + reset_service(false); + service.async = true; + + for (u32_t i = 0; i <= di; ++i) { + operation_init_callback(&operation[i], test_callback); + np[i] = &operation[i].operation.notify; + if (i < di) { + rc = service_submit(&service, &operation[i], 0); + zassert_equal(rc, 0, + "submit op%u failed: %d", i, rc); + zassert_equal(sys_notify_fetch_result(np[i], &res), + -EAGAIN, + "op%u finalized!", i); + } + } + + struct delayed_submit ds = { + .op = &operation[di], + .priority = pri_order[di].priority, + }; + operation[0].callback = test_delayed_submit; + operation[0].user_data = &ds; + + /* Enable synchronous finalization and kick off the first + * entry. All the others will execute immediately. + */ + service.async = false; + service_finalize(&service, service.process_rv); + + zassert_equal(service.process_cnt, ARRAY_SIZE(operation), + "not all processed once: %d != %d", + ARRAY_SIZE(operation), service.process_cnt); + + for (u32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + size_t ordinal = pri_order[i].ordinal; + + zassert_equal(sys_notify_fetch_result(np[i], &res), 0, + "op%u unfinalized", i); + zassert_equal(res, ordinal, + "op%u wrong order: %d != %u", i, res, ordinal); + } +} + +static void test_missing_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(false); + vtable.validate = NULL; + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, 0, + "submit spinwait failed: %d", rc); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait result"); +} + +static void test_success_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(false); + service.validate_rv = 57; + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit validation did not succeed as expected: %d", rc); +} + +static void test_failed_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(false); + service.validate_rv = -EINVAL; + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit validation did not fail as expected: %d", rc); +} + +static void test_callback_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + int expect = -ENOTSUP; + int rc = 0; + + reset_service(false); + vtable.callback = NULL; + + operation_init_callback(&operation, test_callback); + rc = service_submit(&service, op, 0); + zassert_equal(rc, expect, + "unsupported callback check failed: %d != %d", + rc, expect); +} + +static void test_priority_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + int expect = -EINVAL; + int rc = 0; + + reset_service(false); + + operation_init_callback(&operation, test_callback); + rc = service_submit(&service, op, 128); + zassert_equal(rc, expect, + "unsupported priority check failed: %d != %d", + rc, expect); +} + +static void test_cancel_active(void) +{ + struct operation operation; + struct operation *op = &operation; + int expect = -EINPROGRESS; + int rc = 0; + + reset_service(false); + service.async = true; + service.validate_rv = 152; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit failed: %d != %d", rc, service.validate_rv); + + rc = service_cancel(&service, op); + zassert_equal(rc, expect, + "cancel failed: %d != %d", rc, expect); +} + +static void test_cancel_inactive(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + struct operation *op1 = &operation[1]; + int res; + int rc = 0; + + reset_service(false); + service.async = true; + + /* Set up two operations, but only submit the first. */ + for (u32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + operation_init_spinwait(&operation[i]); + np[i] = &operation[i].operation.notify; + if (i == 0) { + rc = service_submit(&service, &operation[i], 0); + zassert_equal(rc, service.validate_rv, + "submit failed: %d != %d", + rc, service.validate_rv); + } + } + + zassert_equal(service.current, &operation[0], + "current not op0"); + + zassert_equal(sys_notify_fetch_result(np[1], &res), -EAGAIN, + "op1 finalized!"); + + /* Verify attempt to cancel unsubmitted operation. */ + rc = service_cancel(&service, op1); + zassert_equal(rc, -EINVAL, + "cancel failed: %d != %d", rc, -EINVAL); + + /* Submit, then verify cancel succeeds. */ + rc = service_submit(&service, op1, 0); + zassert_equal(rc, service.validate_rv, + "submit failed: %d != %d", rc, service.validate_rv); + + zassert_equal(sys_notify_fetch_result(np[1], &res), -EAGAIN, + "op1 finalized!"); + + rc = service_cancel(&service, op1); + zassert_equal(rc, 0, + "cancel failed: %d", rc); + + zassert_equal(sys_notify_fetch_result(np[1], &res), 0, + "op1 NOT finalized"); + zassert_equal(res, -ECANCELED, + "op1 cancel result unexpected: %d", res); + + service.async = false; + service_finalize(&service, service.process_rv); + zassert_equal(service.process_cnt, 1, + "too many processed"); +} + +static void test_async_idle(void) +{ + struct operation operation; + + reset_service(true); + service.async = true; + service.process_rv = 142; + + operation_init_spinwait(&operation); + service_submit(&service, &operation, 0); + service_finalize(&service, service.process_rv); + zassert_false(service.active, "service not idled"); +} + +static void test_onoff_success(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(true); + service.process_rv = 23; + service.async_onoff = true; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait failed: %d != %d", rc, + service.validate_rv); + zassert_equal(service.process_cnt, 0, + "unexpected process"); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "unexpected fetch succeeded"); + zassert_not_equal(service.onoff_notify, NULL, + "unexpected notifier"); + + service.active = true; + service.async_onoff = false; + service_onoff_notify(0); + + zassert_equal(service.process_cnt, 1, + "unexpected process"); + + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait result"); + + zassert_false(service.active, "service not idled"); +} + +static void test_onoff_start_failure(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int onoff_res = -13; + int res = 0; + int rc = 0; + + reset_service(true); + service.async_onoff = true; + + /* Queue two operations that will block on onoff start */ + for (u32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + np[idx] = &operation[idx].operation.notify; + operation_init_spinwait(&operation[idx]); + + rc = service_submit(&service, &operation[idx], 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait %u failed: %d != %d", idx, + rc, service.validate_rv); + } + + zassert_equal(service.process_cnt, 0, + "unexpected process"); + for (u32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + zassert_equal(sys_notify_fetch_result(np[idx], &res), -EAGAIN, + "unexpected fetch %u succeeded", idx); + } + zassert_not_equal(service.onoff_notify, NULL, + "unexpected notifier"); + + /* Fail the start */ + service.async_onoff = false; + service_onoff_notify(onoff_res); + + zassert_equal(service.process_cnt, 0, + "unexpected process"); + + for (u32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + zassert_equal(sys_notify_fetch_result(np[idx], &res), 0, + "fetch %u failed", idx); + /* TBD: provide access to onoff result code? */ + zassert_equal(res, -ENODEV, + "fetch %u value failed", idx); + } +} + +/* Data used to submit an operation during an onoff transition. */ +struct onoff_restart_data { + struct operation *op; + int res; + bool invoked; +}; + +/* Mutate the operation list during a stop to force a restart. */ +static void onoff_restart_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + struct onoff_restart_data *dp = sp->data; + + if (dp) { + int rc = service_submit(sp, dp->op, 0); + + zassert_equal(rc, sp->validate_rv, + "submit spinwait failed: %d != %d", + rc, sp->validate_rv); + sp->data = NULL; + dp->invoked = true; + } + + basic_stop(mp, notify); +} + +static void test_onoff_restart(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = 0; + int rc = 0; + + reset_service(true); + + struct onoff_transitions onoff_transitions = *service.onoff.transitions; + struct onoff_restart_data stop_data = { + .op = &operation[1], + }; + + service.data = &stop_data; + onoff_transitions.stop = onoff_restart_stop; + replace_service_onoff(&onoff_transitions); + + /* Initialize two operations. The first is submitted, onoff + * starts, invokes the first, then stops. During the stop the + * second is queued, which causes a restart when the stop + * completes. + */ + for (u32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + np[idx] = &operation[idx].operation.notify; + operation_init_spinwait(&operation[idx]); + + } + + rc = service_submit(&service, &operation[0], 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait 0 failed: %d != %d", + rc, service.validate_rv); + + zassert_equal(service.process_cnt, 2, + "unexpected process"); + + zassert_equal(stop_data.invoked, true, + "stop mock not invoked"); + + for (u32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + zassert_equal(sys_notify_fetch_result(np[idx], &res), 0, + "failed spinwait fetch"); + zassert_equal(res, 1 + idx, + "failed spinwait result"); + } +} + +/* Mutate the operation list during a stop to force a restart. */ +static void onoff_stop_failure_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + struct onoff_restart_data *dp = sp->data; + int rc = service_submit(sp, dp->op, 0); + + zassert_equal(rc, sp->validate_rv, + "submit spinwait failed: %d != %d", + rc, sp->validate_rv); + dp->invoked = true; + sp->onoff_release_rv = dp->res; + + basic_stop(mp, notify); +} + +static void test_onoff_stop_failure(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = 0; + int rc = 0; + + reset_service(true); + + struct onoff_transitions onoff_transitions = *service.onoff.transitions; + struct onoff_restart_data stop_data = { + .op = &operation[1], + .res = -14, + }; + + service.data = &stop_data; + onoff_transitions.stop = onoff_stop_failure_stop; + replace_service_onoff(&onoff_transitions); + + /* Initialize two operations. The first is submitted, onoff + * starts, invokes the first, then stops. During the stop the + * second is queued, but the stop operation forces an error. + */ + for (u32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + np[idx] = &operation[idx].operation.notify; + operation_init_spinwait(&operation[idx]); + } + + rc = service_submit(&service, &operation[0], 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait 0 failed: %d != %d", + rc, service.validate_rv); + + zassert_equal(service.process_cnt, 1, + "unexpected process"); + zassert_equal(stop_data.invoked, true, + "stop mock not invoked"); + + zassert_equal(sys_notify_fetch_result(np[0], &res), 0, + "failed spinwait 0 fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait 0 result"); + zassert_equal(sys_notify_fetch_result(np[1], &res), 0, + "failed spinwait 1 fetch"); + zassert_equal(res, -ENODEV, + "failed spinwait 1 result"); + + /* Verify that resubmits also return failure */ + + operation_init_spinwait(&operation[0]); + rc = service_submit(&service, &operation[0], 0); + zassert_equal(rc, -ENODEV, + "failed error submit"); +} + +void test_main(void) +{ + ztest_test_suite(queued_operation_api, + ztest_unit_test(test_notification_spinwait), + ztest_unit_test(test_notification_signal), + ztest_unit_test(test_notification_callback), + ztest_unit_test(test_sync_priority), + ztest_unit_test(test_special_priority), + ztest_unit_test(test_resubmit_priority), + ztest_unit_test(test_missing_validation), + ztest_unit_test(test_success_validation), + ztest_unit_test(test_failed_validation), + ztest_unit_test(test_callback_validation), + ztest_unit_test(test_priority_validation), + ztest_unit_test(test_async_idle), + ztest_unit_test(test_cancel_active), + ztest_unit_test(test_cancel_inactive), + ztest_unit_test(test_onoff_success), + ztest_unit_test(test_onoff_start_failure), + ztest_unit_test(test_onoff_restart), + ztest_unit_test(test_onoff_stop_failure)); + ztest_run_test_suite(queued_operation_api); +} diff --git a/tests/lib/queued_operation/testcase.yaml b/tests/lib/queued_operation/testcase.yaml new file mode 100644 index 0000000000000..8558dedb9892b --- /dev/null +++ b/tests/lib/queued_operation/testcase.yaml @@ -0,0 +1,3 @@ +tests: + libraries.queued_operation: + tags: queued_operation timer