From 714f4eac8a4f337f9dacd3183beb402c1d13594f Mon Sep 17 00:00:00 2001 From: Peter Bigot Date: Thu, 30 Apr 2020 11:02:32 -0500 Subject: [PATCH 1/2] sys: onoff: clarify return value interpretation Text failed to distinguish between success of initiating a reset and success of the reset. Signed-off-by: Peter Bigot --- include/sys/onoff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/sys/onoff.h b/include/sys/onoff.h index 456076aa01459..143fc6bd936be 100644 --- a/include/sys/onoff.h +++ b/include/sys/onoff.h @@ -471,7 +471,7 @@ static inline int onoff_cancel_or_release(struct onoff_manager *mgr, * operation. * * @retval non-negative the observed state of the machine at the time - * of the reset, if the reset succeeds. + * a reset was successfully initiated. * @retval -ENOTSUP if reset is not supported by the service. * @retval -EINVAL if the parameters are invalid. * @retval -EALREADY if the service does not have a recorded error. From 8bfd256afedb9ca508a7e15340132ccf7603747b Mon Sep 17 00:00:00 2001 From: Peter Bigot Date: Thu, 13 Feb 2020 12:19:53 -0600 Subject: [PATCH 2/2] lib: queued_operation: add API for managing a queue of operations Full asynchronous support for APIs such as bus transactions generally requires managing operations from unrelated clients. This API provides a data structure and functions to manage those operations generically, leaving the service to provide only the service-specific operation description and implementation. Signed-off-by: Peter Bigot --- doc/reference/resource_management/index.rst | 58 + include/sys/queued_operation.h | 343 ++++++ lib/os/CMakeLists.txt | 1 + lib/os/queued_operation.c | 526 ++++++++ tests/lib/queued_operation/CMakeLists.txt | 8 + tests/lib/queued_operation/prj.conf | 3 + tests/lib/queued_operation/src/main.c | 1207 +++++++++++++++++++ tests/lib/queued_operation/testcase.yaml | 3 + 8 files changed, 2149 insertions(+) create mode 100644 include/sys/queued_operation.h create mode 100644 lib/os/queued_operation.c create mode 100644 tests/lib/queued_operation/CMakeLists.txt create mode 100644 tests/lib/queued_operation/prj.conf create mode 100644 tests/lib/queued_operation/src/main.c create mode 100644 tests/lib/queued_operation/testcase.yaml diff --git a/doc/reference/resource_management/index.rst b/doc/reference/resource_management/index.rst index 533ee0117971e..0fd1943cd4f4c 100644 --- a/doc/reference/resource_management/index.rst +++ b/doc/reference/resource_management/index.rst @@ -86,3 +86,61 @@ state. .. doxygengroup:: resource_mgmt_onoff_apis :project: Zephyr + +.. _resource_mgmt_queued_operation: + +Queued Operation Manager +************************ + +While :ref:`resource_mgmt_onoff` supports a shared resource that must be +available as long as any user still depends on it, the queued operation +manager provides serialized exclusive access to a resource that executes +operations asynchronously. This can be used to support (for example) +ADC sampling for different sensors, or groups of bus transactions. +Clients submit a operation request that is processed when the device +becomes available, with clients being notified of the completion of the +operation though the standard :ref:`async_notification`. + +As with the on-off manager, the queued resource manager is a generic +infrastructure tool that should be used by a extending service, such as +an I2C bus controller or an ADC. The manager has the following +characteristics: + +* The stable states are idle and processing. The manager always begins + in the idle state. +* The core client operations are submit (add an operation) and cancel + (remove an operation before it starts). +* Ownership of the operation object transitions from the client to the + manager when a queue request is accepted, and is returned to the + client when the manager notifies the client of operation completion. +* The core client event is completion. Manager state changes only as a + side effect from submitting or completing an operation. +* The service transitions from idle to processing when an operation is + submitted. +* The service transitions from processing to idle when notification of + the last operation has completed and there are no queued operations. +* The manager selects the next operation to process when notification of + completion has itself completed. In particular, changes to the set of + pending operations that are made during a completion callback affect + the next operation to execute. +* Each submitted operation includes a priority that orders execution by + first-come-first-served within priority. +* Operations are asynchronous, with completion notification through the + :ref:`async_notification`. The operations and notifications are run + in a context that is service-specific. This may be one or more + dedicated threads, or work queues. Notifications may come from + interrupt handlers. Note that operations may complete before the + submit request has returned to its caller. + +The generic infrastructure holds the active operation and a queue of +pending operations. A service extension shall provide functions that: + +* check that a request is well-formed, i.e. can be added to the queue; +* receive notification that a new operation is to be processed, or that + no operations are available (allowing the service to enter a + power-down mode); +* translate a generic completion callback into a service-specific + callback. + +.. doxygengroup:: resource_mgmt_queued_operation_apis + :project: Zephyr diff --git a/include/sys/queued_operation.h b/include/sys/queued_operation.h new file mode 100644 index 0000000000000..2b4e47514e321 --- /dev/null +++ b/include/sys/queued_operation.h @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2019 Peter Bigot Consulting, LLC + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_SYS_QUEUED_OPERATION_H_ +#define ZEPHYR_INCLUDE_SYS_QUEUED_OPERATION_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Forward declaration */ +struct queued_operation; +struct queued_operation_manager; + +/** + * @defgroup resource_mgmt_queued_operation_apis Queued Operation APIs + * @ingroup kernel_apis + * @{ + */ + +/** @internal */ +#define QUEUED_OPERATION_PRIORITY_POS SYS_NOTIFY_EXTENSION_POS +/** @internal */ +#define QUEUED_OPERATION_PRIORITY_BITS 8U +/** @internal */ +#define QUEUED_OPERATION_PRIORITY_MASK BIT_MASK(QUEUED_OPERATION_PRIORITY_BITS) + +/** + * @brief Special priority value to indicate operation should be + * placed last in current queue. + * + * This is like providing the lowest priority but uses a constant-time + * insertion and is FIFO. + */ +#define QUEUED_OPERATION_PRIORITY_APPEND \ + ((int)QUEUED_OPERATION_PRIORITY_MASK + 1) + +/** + * @brief Special priority value to indicate operation should be + * placed first in the current queue. + * + * This is like providing the highest priority but uses a + * constant-time insertion and is LIFO. + */ +#define QUEUED_OPERATION_PRIORITY_PREPEND \ + ((int)QUEUED_OPERATION_PRIORITY_MASK + 2) + +/** + * @brief Identify the region of sys_notify flags available for + * containing services. + * + * Bits of the flags field of the sys_notify structure contained + * within the queued_operation structure at and above this position + * may be used by extensions to the sys_notify structure. + * + * These bits are intended for use by containing service + * implementations to record client-specific information. The bits + * are cleared by sys_notify_validate(). Use of these does not + * imply that the flags field becomes public API. + */ +#define QUEUED_OPERATION_EXTENSION_POS \ + (QUEUED_OPERATION_PRIORITY_POS + QUEUED_OPERATION_PRIORITY_BITS) + +/** + * @brief Base object providing state for an operation. + * + * Instances of this should be members of a service-specific structure + * that provides the operation parameters. + */ +struct queued_operation { + /** @internal + * + * Links the operation into the operation queue. + */ + sys_snode_t node; + + /** + * @brief Notification configuration. + * + * This must be initialized using sys_notify_init_callback() + * or its sibling functions before an operation can be passed + * to queued_operation_submit(). + * + * The queued operation manager provides specific error codes + * for failures identified at the manager level: + * * -ENODEV indicates a failure in an onoff service. + */ + struct sys_notify notify; +}; + +/** + * @ brief Table of functions used by a queued operation manager. + */ +struct queued_operation_functions { + /** + * @brief Function used to verify an operation is well-defined. + * + * When provided this function is invoked by + * queued_operation_submit() to verify that the operation + * definition meets the expectations of the service. The + * operation is acceptable only if a non-negative value is + * returned. + * + * If not provided queued_operation_submit() will assume + * service-specific expectations are trivially satisfied, and + * will reject the operation only if sys_notify_validate() + * fails. Because that validation is limited services should + * at a minimum verify that the extension bits have the + * expected value (zero, when none are being used). + * + * @note The validate function must be isr-ok. + * + * @param mgr the service that supports queued operations. + * + * @param op the operation being considered for suitability. + * + * @return the value to be returned from queued_operation_submit(). + */ + int (*validate)(struct queued_operation_manager *mgr, + struct queued_operation *op); + + /** + * @brief Function to transform a generic notification + * callback to its service-specific form. + * + * The implementation should cast cb to the proper signature + * for the service, and invoke the cast pointer with the + * appropriate arguments. + * + * @note The callback function must be isr-ok. + * + * @param mgr the service that supports queued operations. + * + * @param op the operation that has been completed. + * + * @param cb the generic callback to invoke. + */ + void (*callback)(struct queued_operation_manager *mgr, + struct queued_operation *op, + sys_notify_generic_callback cb); + + /** + * @brief Function used to inform the manager of a new operation. + * + * This function can be called as a side effect of + * queued_operation_submit() or queued_operation_finalize() to + * tell the service that a new operation needs to be + * processed. + * + * Be aware that if processing is entirely + * synchronous--meaning queued_operation_finalize() can be + * invoked during process()--then the process() function will + * be invoked recursively, possibly with another operation. + * This can cause unbounded stack growth, and requires that + * process() be re-entrant. Generally the process() function + * should itself be async, with finalization done after + * process() returns. + * + * @note The process function must be isr-ok. + * + * @param mgr the service that supports queued operations. + * + * @param op the operation that should be initiated. A null + * pointer is passed if there are no pending operations. + */ + void (*process)(struct queued_operation_manager *mgr, + struct queued_operation *op); +}; + +/** + * @brief State associated with a manager instance. + */ +struct queued_operation_manager { + /* Links the operation into the operation queue. */ + sys_slist_t operations; + + /* Pointer to the functions that support the manager. */ + const struct queued_operation_functions *vtable; + + /* Pointer to an on-off service supporting this service. NULL + * if service is always available. + */ + struct onoff_manager *onoff; + + /* The state of on-off service requests. */ + struct onoff_client onoff_client; + + /* Lock controlling access to other fields. */ + struct k_spinlock lock; + + /* The operation that is being processed. */ + struct queued_operation *current; + + /* Information about the internal state of the manager. */ + uint32_t volatile state; +}; + +#define QUEUED_OPERATION_MANAGER_INITIALIZER(_vtable, _onoff) { \ + .vtable = _vtable, \ + .onoff = _onoff, \ +} + +/** + * @brief Submit an operation to be processed when the service is + * available. + * + * The service process function will be invoked during this call if + * the service is available. + * + * @param mgr a generic pointer to the service instance + * + * @param op a generic pointer to an operation to be performed. The + * notify field in the provided operation must have been initialized + * before being submitted, even if the operation description is being + * re-used. This may be done directly with sys_notify API or by + * wrapping it in a service-specific operation init function. + * + * @param priority the priority of the operation relative to other + * operations. Numerically lower values are higher priority. Values + * outside the range of a signed 8-bit integer will be rejected, + * except for named priorities like QUEUED_OPERATION_PRIORITY_APPEND. + * + * @retval -ENOTSUP if callback notification is requested and the + * service does not provide a callback translation. This may also be + * returned due to service-specific validation. + * + * @retval -EINVAL if the passed priority is out of the range of + * supported priorities. This may also be returned due to + * service-specific validation. + * + * @return A negative value if the operation was rejected by service + * validation or due to other configuration errors. A non-negative + * value indicates the operation has been accepted for processing and + * completion notification will be provided. + */ +int queued_operation_submit(struct queued_operation_manager *mgr, + struct queued_operation *op, + int priority); + +/** + * @brief Helper to extract the result from a queued operation. + * + * This forwards to sys_notify_fetch_result(). + */ +static inline int queued_operation_fetch_result(const struct queued_operation *op, + int *result) +{ + return sys_notify_fetch_result(&op->notify, result); +} + +/** + * @brief Attempt to cancel a queued operation. + * + * Successful cancellation issues a completion notification with + * result -ECANCELED for the submitted operation before this function + * returns. + * + * @retval 0 if successfully cancelled. + * @retval -EINPROGRESS if op is currently being executed, so cannot + * be cancelled. + * @retval -EINVAL if op is neither being executed nor in the queue of + * pending operations + */ +int queued_operation_cancel(struct queued_operation_manager *mgr, + struct queued_operation *op); + +/** + * @brief Send the completion notification for a queued operation. + * + * This function must be invoked by services that support queued + * operations when the operation provided to them through the process + * function have been completed. It is not intended to be invoked by + * users of a service. + * + * @param mgr a generic pointer to the service instance + * @param res the result of the operation, as with + * sys_notify_finalize(). + */ +void queued_operation_finalize(struct queued_operation_manager *mgr, + int res); + +/** + * @brief Test whether the queued operation service is in an error + * state. + * + * This function can be used to determine whether the service has + * recorded an error. These errors occur only when the manager's + * attempt to turn an underlying service on or off has itself produced + * an error, leaving the service in an undefined state. + * + * queued_operation_reset() may be invoked to attempt to clear an + * error. + * + * This is an convenience function that due to race conditions may + * incorrectly represent the state of the service at the moment + * control returns to the caller. + * + * @return true if and only if the service has an error. + */ +bool queued_operation_has_error(struct queued_operation_manager *mgr); + +/** + * @brief Attempt to reset the manager when it is in an error state. + * + * A queued operation service can only be reset when it is in an error + * state as indicated by queued_operation_has_error(). + * + * @note If a non-null @p cli is provided in a path where the manager + * successfully initiates a reset the return value from this function + * will reflect the success or failure of registering the independent + * notification through @p cli. + * + * @param mgr a generic pointer to the service instance. + * + * @param cli an optional client structure that will be passed to + * onoff_reset() on behalf of the service client to provide + * independent notification when a succesfully initiated reset has + * completed. + * + * @retval nonnegative on successful initiation of a reset (when + * @p cli is null) + * @retval -EALREADY if the service has not recorded an error + * @retval other success and failure values from onoff_reset() + */ +int queued_operation_reset(struct queued_operation_manager *mgr, + struct onoff_client *cli); + +/** @} */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_SYS_ASYNCNOTIFY_H_ */ diff --git a/lib/os/CMakeLists.txt b/lib/os/CMakeLists.txt index c19cc83cf2b4d..10289ddfab854 100644 --- a/lib/os/CMakeLists.txt +++ b/lib/os/CMakeLists.txt @@ -14,6 +14,7 @@ zephyr_sources( notify.c printk.c onoff.c + queued_operation.c rb.c sem.c thread_entry.c diff --git a/lib/os/queued_operation.c b/lib/os/queued_operation.c new file mode 100644 index 0000000000000..e189a1e939010 --- /dev/null +++ b/lib/os/queued_operation.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include + +/* States used in the manager state field. */ +enum state { + /* Service is not active. + * + * Transitions to STARTING on queued_operation_submit(). + */ + ST_OFF = 0, + + /* Service is being started. + * + * This is a transient state while an associated on-off + * service request is incomplete. Transitions to IDLE and + * reschedules on successful start, and ERROR on failure to + * start. + */ + ST_STARTING, + + /* Service is active with no known operations. + * + * This is a transient substate of an implicit ON state that + * encompasses IDLE, NOTIFYING, PROCESSING, and + * FINALIZING. The state can be observed only when the manager + * lock is held. The machine will transition to NOTIFYING or + * STOPPING within the current mutex region. + */ + ST_IDLE, + + /* The manager is invoking process() to notify the service of + * a new operation or a transition to idle. + * + * Transitions to PROCESSING if an operation was passed and + * queued_operation_finalize() has not been invoked before + * process() returns to the manager. + * + * Transitions to IDLE if an operation was not passed and the + * manager queue remains empty when process() returns to the + * manager. + * + * Re-runs select in all other cases. + */ + ST_NOTIFYING, + + /* A new operation has been identified and the service + * process() function will be/is/has-been invoked on it. + * + * Transitions to FINALIZING when queued_operation_finalize() + * is invoked. + */ + ST_PROCESSING, + + /* An operation that was processing is being finalized. + * + * Re-selects after finalization and any containing notifying + * completes. + */ + ST_FINALIZING, + + /* Service is being started. + * + * This is a transient state while an associated on-off + * service request is pending. Transitions to OFF or ERROR + * based on the results of a call to onoff_release(). + */ + ST_STOPPING, + + /* Service is in an error state. + * + * This can be reached only as a consequence of a failure in + * the on-off service used to transition between OFF and IDLE. + * Transitions to RESETTING when queued_operation_reset() is + * invoked. + */ + ST_ERROR, + + /* Service is attempting to reset the underlying on-off service. + * + * Transitions to OFF when the reset attempt completes + * successfully, and to ERROR if the attempt fails. + */ + ST_RESETTING, +}; + +static inline bool state_is_error(uint32_t state) +{ + return (state == ST_ERROR) || (state == ST_RESETTING); +} + +static void select_next_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key); +static void start_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key); + +static inline void trivial_start_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + mgr->state = ST_IDLE; + select_next_and_unlock(mgr, key); +} + +static inline int op_get_priority(const struct queued_operation *op) +{ + return (int8_t)(op->notify.flags >> QUEUED_OPERATION_PRIORITY_POS); +} + +static inline int op_set_priority(struct queued_operation *op, + int priority) +{ + int8_t prio = (int8_t)priority; + uint32_t mask = (QUEUED_OPERATION_PRIORITY_MASK + << QUEUED_OPERATION_PRIORITY_POS); + + if (priority == QUEUED_OPERATION_PRIORITY_PREPEND) { + prio = INT8_MIN; + } else if (priority == QUEUED_OPERATION_PRIORITY_APPEND) { + prio = INT8_MAX; + } else if (prio != priority) { + return -EINVAL; + } + + op->notify.flags = (op->notify.flags & ~mask) + | (mask & (prio << QUEUED_OPERATION_PRIORITY_POS)); + + return 0; +} + +static inline void finalize_and_notify(struct queued_operation_manager *mgr, + struct queued_operation *op, + int res) +{ + sys_notify_generic_callback cb = sys_notify_finalize(&op->notify, res); + + if (cb != NULL) { + mgr->vtable->callback(mgr, op, cb); + } +} + +/* React to the completion of an onoff transition, either from a + * manager or directly. + * + * @param mgr the operation manager + * + * @param from either ST_STARTING or ST_STOPPING depending on + * transition direction + * + * @param res the transition completion value, negative for error. + */ +static void settle_onoff(struct queued_operation_manager *mgr, + enum state from, + int res) +{ + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + __ASSERT_NO_MSG((mgr->state == from) + || (state_is_error(mgr->state) + && (ST_ERROR == from))); + + if (res >= 0) { + if (from == ST_STARTING) { + trivial_start_and_unlock(mgr, key); + return; + } + + /* Came from STOPPING or RESETTING so set to OFF. If + * the underlying onoff service is in an error state + * that will be discovered when it is next used. + */ + mgr->state = ST_OFF; + + k_spin_unlock(&mgr->lock, key); + return; + } + + /* On transition failure mark service failed. All unstarted + * operations are unlinked and completed as a service failure. + */ + sys_slist_t ops = mgr->operations; + + sys_slist_init(&mgr->operations); + mgr->state = ST_ERROR; + + k_spin_unlock(&mgr->lock, key); + + struct queued_operation *op; + + SYS_SLIST_FOR_EACH_CONTAINER(&ops, op, node) { + finalize_and_notify(mgr, op, -ENODEV); + } +} + +static void start_callback(struct onoff_manager *oomgr, + struct onoff_client *cli, + uint32_t state, + int res) +{ + struct queued_operation_manager *mgr + = CONTAINER_OF(cli, struct queued_operation_manager, onoff_client); + + settle_onoff(mgr, ST_STARTING, res); +} + +static void stop_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + __ASSERT_NO_MSG(mgr->state == ST_IDLE); + + if (mgr->onoff == NULL) { + mgr->state = ST_OFF; + k_spin_unlock(&mgr->lock, key); + return; + } + + mgr->state = ST_STOPPING; + + k_spin_unlock(&mgr->lock, key); + + settle_onoff(mgr, ST_STOPPING, onoff_release(mgr->onoff)); +} + +/* Enters while locked, and looks for a new operation to initiate. + * The lock is released to process new operations, then retaken after + * process() returns. + */ +static void select_next_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + bool loop = false; + + do { + sys_snode_t *node = sys_slist_get(&mgr->operations); + uint32_t state = mgr->state; + + __ASSERT_NO_MSG((state == ST_IDLE) + || (state == ST_FINALIZING)); + loop = false; + if (node) { + struct queued_operation *op + = CONTAINER_OF(node, struct queued_operation, node); + mgr->state = ST_NOTIFYING; + mgr->current = op; + + k_spin_unlock(&mgr->lock, key); + + /* Notify the service, then check everything again + * because the operation might have completed or the + * queue might have changed while we were unlocked. + */ + mgr->vtable->process(mgr, op); + + /* Update the state to one of IDLE, PROCESSING, or + * leave it FINALIZING; loop if something needs to be + * done. + */ + key = k_spin_lock(&mgr->lock); + + state = mgr->state; + + /* If an operation finalized during notification we + * need to reselect because finalization couldn't do + * that, otherwise it's still running. + */ + loop = (state == ST_FINALIZING); + if (!loop) { + __ASSERT_NO_MSG(state == ST_NOTIFYING); + mgr->state = ST_PROCESSING; + } + } else { + __ASSERT_NO_MSG(state == ST_FINALIZING); + mgr->state = ST_IDLE; + mgr->current = NULL; + } + } while (loop); + + /* All done, transition to STOPPING if IDLE, and release the + * lock. + */ + if (mgr->state == ST_IDLE) { + stop_and_unlock(mgr, key); + } else { + k_spin_unlock(&mgr->lock, key); + } +} + +static void start_and_unlock(struct queued_operation_manager *mgr, + k_spinlock_key_t key) +{ + struct onoff_manager *onoff = mgr->onoff; + struct onoff_client *cli = &mgr->onoff_client; + int rv = 0; + + if (onoff == NULL) { + trivial_start_and_unlock(mgr, key); + return; + } + + mgr->state = ST_STARTING; + k_spin_unlock(&mgr->lock, key); + + memset(cli, 0, sizeof(*cli)); + sys_notify_init_callback(&cli->notify, start_callback); + + rv = onoff_request(onoff, cli); + + if (rv < 0) { + /* Synchronous failure, record the error state */ + settle_onoff(mgr, ST_STARTING, rv); + } +} + +int queued_operation_submit(struct queued_operation_manager *mgr, + struct queued_operation *op, + int priority) +{ + int validate_rv = -ENOTSUP; + int rv = 0; + + __ASSERT_NO_MSG(mgr != NULL); + __ASSERT_NO_MSG(mgr->vtable != NULL); + __ASSERT_NO_MSG(mgr->vtable->process != NULL); + __ASSERT_NO_MSG(op != NULL); + + /* Validation is optional; if present, use it. */ + if (mgr->vtable->validate) { + validate_rv = mgr->vtable->validate(mgr, op); + rv = validate_rv; + } + + /* Set the priority, checking whether it's in range. */ + if (rv >= 0) { + rv = op_set_priority(op, priority); + } + + /* Reject callback notifications without translation + * function. + */ + if ((rv >= 0) + && sys_notify_uses_callback(&op->notify) + && (mgr->vtable->callback == NULL)) { + rv = -ENOTSUP; + } + + if (rv < 0) { + goto out; + } + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + sys_slist_t *list = &mgr->operations; + uint32_t state = mgr->state; + + /* Preserve error state, or insert the item into the list. */ + if (state_is_error(state)) { + rv = -ENODEV; + } else if (priority == QUEUED_OPERATION_PRIORITY_PREPEND) { + sys_slist_prepend(list, &op->node); + } else if (priority == QUEUED_OPERATION_PRIORITY_APPEND) { + sys_slist_append(list, &op->node); + } else { + struct queued_operation *prev = NULL; + struct queued_operation *tmp; + + SYS_SLIST_FOR_EACH_CONTAINER(list, tmp, node) { + if (priority < op_get_priority(tmp)) { + break; + } + prev = tmp; + } + + if (prev == NULL) { + sys_slist_prepend(list, &op->node); + } else { + sys_slist_insert(list, &prev->node, &op->node); + } + } + + /* Initiate an operation only if we're off. */ + if (state == ST_OFF) { + start_and_unlock(mgr, key); + } else { + __ASSERT_NO_MSG(state != ST_IDLE); + k_spin_unlock(&mgr->lock, key); + } + +out: + /* Preserve a service-specific success code on success */ + if ((rv >= 0) && (validate_rv >= 0)) { + rv = validate_rv; + } + + return rv; +} + +void queued_operation_finalize(struct queued_operation_manager *mgr, + int res) +{ + __ASSERT_NO_MSG(mgr != NULL); + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + struct queued_operation *op = mgr->current; + uint32_t state = mgr->state; + bool processing = (state == ST_PROCESSING); + + __ASSERT_NO_MSG(op != NULL); + __ASSERT_NO_MSG((state == ST_NOTIFYING) + || (state == ST_PROCESSING)); + + mgr->state = ST_FINALIZING; + + k_spin_unlock(&mgr->lock, key); + + finalize_and_notify(mgr, op, res); + + /* If we were processing we need to reselect; if we were + * notifying we'll reselect when the notification completes. + */ + if (processing) { + select_next_and_unlock(mgr, k_spin_lock(&mgr->lock)); + } +} + +int queued_operation_cancel(struct queued_operation_manager *mgr, + struct queued_operation *op) +{ + __ASSERT_NO_MSG(mgr != NULL); + __ASSERT_NO_MSG(op != NULL); + + int rv = 0; + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + if (op == mgr->current) { + rv = -EINPROGRESS; + } else if (!sys_slist_find_and_remove(&mgr->operations, &op->node)) { + rv = -EINVAL; + } + + k_spin_unlock(&mgr->lock, key); + + if (rv == 0) { + finalize_and_notify(mgr, op, -ECANCELED); + } + + return rv; +} + +bool queued_operation_has_error(struct queued_operation_manager *mgr) +{ + __ASSERT_NO_MSG(mgr != NULL); + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + + bool rv = state_is_error(mgr->state); + + k_spin_unlock(&mgr->lock, key); + + return rv; +} + +static void reset_callback(struct onoff_manager *oomgr, + struct onoff_client *cli, + uint32_t state, + int res) +{ + struct queued_operation_manager *mgr + = CONTAINER_OF(cli, struct queued_operation_manager, onoff_client); + + settle_onoff(mgr, ST_ERROR, res); +} + +int queued_operation_reset(struct queued_operation_manager *mgr, + struct onoff_client *oocli) +{ + __ASSERT_NO_MSG(mgr != NULL); + + int rv = -ENOTSUP; + + if (mgr->onoff == NULL) { + goto out; + } + + k_spinlock_key_t key = k_spin_lock(&mgr->lock); + uint32_t state = mgr->state; + + if (!state_is_error(state)) { + rv = -EALREADY; + goto out_locked; + } + + if (state == ST_ERROR) { + struct onoff_client *cli = &mgr->onoff_client; + + memset(cli, 0, sizeof(*cli)); + sys_notify_init_callback(&cli->notify, reset_callback); + + mgr->state = ST_RESETTING; + + k_spin_unlock(&mgr->lock, key); + + rv = onoff_reset(mgr->onoff, cli); + + key = k_spin_lock(&mgr->lock); + + if (rv < 0) { + mgr->state = ST_ERROR; + goto out_locked; + } + } + +out_locked: + k_spin_unlock(&mgr->lock, key); + +out: + if ((rv >= 0) && (oocli != NULL)) { + rv = onoff_reset(mgr->onoff, oocli); + } + + return rv; +} diff --git a/tests/lib/queued_operation/CMakeLists.txt b/tests/lib/queued_operation/CMakeLists.txt new file mode 100644 index 0000000000000..90b0fb17ea108 --- /dev/null +++ b/tests/lib/queued_operation/CMakeLists.txt @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.13.1) +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(queued_operation) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/lib/queued_operation/prj.conf b/tests/lib/queued_operation/prj.conf new file mode 100644 index 0000000000000..b008d47339881 --- /dev/null +++ b/tests/lib/queued_operation/prj.conf @@ -0,0 +1,3 @@ +CONFIG_POLL=y +CONFIG_ZTEST=y +CONFIG_ZTEST_STACKSIZE=2048 diff --git a/tests/lib/queued_operation/src/main.c b/tests/lib/queued_operation/src/main.c new file mode 100644 index 0000000000000..715b1d3b92200 --- /dev/null +++ b/tests/lib/queued_operation/src/main.c @@ -0,0 +1,1207 @@ +/* + * Copyright (c) 2020 Nordic Semiconductor ASA + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include + +struct service; + +struct operation { + struct queued_operation operation; + void (*callback)(struct service *sp, + struct operation *op, + void *ud); + void *user_data; +}; + +struct service { + /* State of the manager */ + struct queued_operation_manager manager; + + /* State for an on-off service optionally used by the manager. */ + struct onoff_manager onoff; + + /* Value to return from basic_request handler. */ + int onoff_request_rv; + + /* Value to return from basic_release handler. */ + int onoff_release_rv; + + /* Value to return from basic_reset handler. */ + int onoff_reset_rv; + + /* Notifier to use when async_onoff is set. */ + onoff_notify_fn onoff_notify; + + /* The current operation cast for this service type. Null if + * service is idle. + */ + struct operation *current; + + /* Value to return from service_impl_validate() */ + int validate_rv; + + /* Value to return from service_impl_validate() + * + * This is incremented before each synchronous finalization by + * service_impl_callback. + */ + int process_rv; + + /* Parameters passed to test_callback */ + struct operation *callback_op; + int callback_res; + + /* Count of process submissions since reset. */ + size_t process_cnt; + + /* Test-specific data associated with the service. */ + void *data; + + /* If set defer notification of onoff operation. + * + * The callback to invoke will be stored in onoff_notify. + */ + bool async_onoff; + + /* If set inhibit synchronous completion. */ + bool async; + + /* Set to indicate that the lass process() call provided an + * operation. + */ + bool active; +}; + +static void basic_start(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async_onoff) { + __ASSERT_NO_MSG(sp->onoff_notify == NULL); + sp->onoff_notify = notify; + } else { + sp->active = sp->onoff_request_rv >= 0; + notify(mp, sp->onoff_request_rv); + } +} + +static void basic_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async_onoff) { + __ASSERT_NO_MSG(sp->onoff_notify == NULL); + sp->onoff_notify = notify; + } else { + sp->active = false; + notify(mp, sp->onoff_release_rv); + } +} + +static void basic_reset(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + + if (sp->async_onoff) { + __ASSERT_NO_MSG(sp->onoff_notify == NULL); + sp->onoff_notify = notify; + } else { + sp->active = false; + notify(mp, sp->onoff_reset_rv); + } +} + +static struct onoff_transitions const basic_onoff_transitions = { + .start = basic_start, + .stop = basic_stop, + .reset = basic_reset, +}; + +typedef void (*service_callback)(struct service *sp, + struct operation *op, + int res); + +static void test_callback(struct service *sp, + struct operation *op, + int res) +{ + sp->callback_op = op; + sp->callback_res = res; + if (op->callback) { + op->callback(sp, op, op->user_data); + } +} + +static inline void operation_init_spinwait(struct operation *op) +{ + *op = (struct operation){}; + sys_notify_init_spinwait(&op->operation.notify); +} + +static inline void operation_init_signal(struct operation *op, + struct k_poll_signal *sigp) +{ + *op = (struct operation){}; + sys_notify_init_signal(&op->operation.notify, sigp); +} + +static inline void operation_init_callback(struct operation *op, + service_callback handler) +{ + *op = (struct operation){}; + sys_notify_init_callback(&op->operation.notify, + (sys_notify_generic_callback)handler); +} + +static int service_submit(struct service *sp, + struct operation *op, + int priority) +{ + return queued_operation_submit(&sp->manager, &op->operation, priority); +} + +static int service_cancel(struct service *sp, + struct operation *op) +{ + return queued_operation_cancel(&sp->manager, &op->operation); +} + +static int service_impl_validate(struct queued_operation_manager *mgr, + struct queued_operation *op) +{ + struct service *sp = CONTAINER_OF(mgr, struct service, manager); + + return sp->validate_rv; +} + +static bool service_has_error(struct service *sp) +{ + return queued_operation_has_error(&sp->manager); +} + +static int service_reset(struct service *sp, + struct onoff_client *oocli) +{ + return queued_operation_reset(&sp->manager, oocli); +} + +static void service_impl_callback(struct queued_operation_manager *mgr, + struct queued_operation *op, + sys_notify_generic_callback cb) +{ + service_callback handler = (service_callback)cb; + struct service *sp = CONTAINER_OF(mgr, struct service, manager); + struct operation *sop = CONTAINER_OF(op, struct operation, operation); + int res = -EINPROGRESS; + + zassert_equal(queued_operation_fetch_result(op, &res), 0, + "callback before finalized"); + handler(sp, sop, res); +} + +/* Split out finalization to support async testing. */ +static void service_finalize(struct service *sp, + int res) +{ + struct queued_operation *op = &sp->current->operation; + + sp->current = NULL; + (void)op; + queued_operation_finalize(&sp->manager, res); +} + +static void service_impl_process(struct queued_operation_manager *mgr, + struct queued_operation *op) +{ + struct service *sp = CONTAINER_OF(mgr, struct service, manager); + + zassert_equal(sp->current, NULL, + "process collision"); + + sp->process_cnt++; + sp->active = (op != NULL); + if (sp->active) { + sp->current = CONTAINER_OF(op, struct operation, operation); + if (!sp->async) { + service_finalize(sp, ++sp->process_rv); + } + } +} + +static struct queued_operation_functions const service_vtable = { + .validate = service_impl_validate, + .callback = service_impl_callback, + .process = service_impl_process, +}; +/* Live copy, mutated for testing. */ +static struct queued_operation_functions vtable; + +static struct service service = { + .manager = QUEUED_OPERATION_MANAGER_INITIALIZER(&vtable, + &service.onoff), + .onoff = { + .transitions = &basic_onoff_transitions, + }, +}; + +static void service_onoff_notify(int res) +{ + onoff_notify_fn notify = service.onoff_notify; + + __ASSERT_NO_MSG(notify != NULL); + service.onoff_notify = NULL; + + notify(&service.onoff, res); +} + +static void reset_service(bool onoff) +{ + vtable = service_vtable; + service = (struct service){ + .manager = QUEUED_OPERATION_MANAGER_INITIALIZER(&vtable, + &service.onoff), + .onoff = { + .transitions = &basic_onoff_transitions, + }, + }; + + if (!onoff) { + service.manager.onoff = NULL; + } +} + +static void replace_service_onoff(struct onoff_transitions *transitions) +{ + service.onoff.transitions = transitions; +} + +static void test_notification_spinwait(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(true); + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait failed: %d != %d", rc, + service.validate_rv); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait result"); + + zassert_false(service.active, "service not idled"); +} + +static void test_notification_signal(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + struct k_poll_signal sig; + unsigned int signaled; + int res = 0; + int rc = 0; + + reset_service(false); + + k_poll_signal_init(&sig); + operation_init_signal(op, &sig); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed signal unfinalized"); + k_poll_signal_check(&sig, &signaled, &res); + zassert_equal(signaled, 0, + "failed signal unsignaled"); + + service.process_rv = 23; + rc = service_submit(&service, op, 0); + zassert_equal(rc, 0, + "submit signal failed: %d", rc); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed signal fetch"); + zassert_equal(res, service.process_rv, + "failed signal result"); + k_poll_signal_check(&sig, &signaled, &res); + zassert_equal(signaled, 1, + "failed signal signaled"); + zassert_equal(res, service.process_rv, + "failed signal signal result"); +} + +static void test_notification_callback(void) +{ + struct operation operation; + struct operation *op = &operation; + struct service *sp = &service; + struct sys_notify *np = &op->operation.notify; + struct k_poll_signal sig; + int res = 0; + int rc = 0; + + reset_service(false); + + k_poll_signal_init(&sig); + operation_init_callback(op, test_callback); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed callback unfinalized"); + zassert_equal(sp->callback_op, NULL, + "failed callback pre-check"); + + service.process_rv = 142; + rc = service_submit(&service, op, 0); + zassert_equal(rc, 0, + "submit callback failed: %d", rc); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed callback fetch"); + zassert_equal(res, service.process_rv, + "failed callback result"); + zassert_equal(sp->callback_op, op, + "failed callback captured op"); + zassert_equal(sp->callback_res, service.process_rv, + "failed callback captured res"); +} + +struct pri_order { + int priority; + size_t ordinal; +}; + +static void test_sync_priority(void) +{ + struct pri_order const pri_order[] = { + { 0, 0 }, /* first because it gets grabbed when submitted */ + /* rest in FIFO within priority */ + { -1, 2 }, + { 1, 4 }, + { -2, 1 }, + { 2, 6 }, + { 1, 5 }, + { 0, 3 }, + }; + struct operation operation[ARRAY_SIZE(pri_order)]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = -EINPROGRESS; + int rc; + + /* Reset the service, and tell it to not finalize operations + * synchronously (so we can build up a queue). + */ + reset_service(false); + service.async = true; + + for (uint32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + operation_init_spinwait(&operation[i]); + np[i] = &operation[i].operation.notify; + rc = service_submit(&service, &operation[i], + pri_order[i].priority); + zassert_equal(rc, 0, + "submit op%u failed: %d", i, rc); + zassert_equal(sys_notify_fetch_result(np[i], &res), -EAGAIN, + "op%u finalized!", i); + } + + zassert_equal(service.current, &operation[0], + "submit op0 didn't process"); + + /* Enable synchronous finalization and kick off the first + * entry. All the others will execute immediately. + */ + service.async = false; + service_finalize(&service, service.process_rv); + + for (uint32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + size_t ordinal = pri_order[i].ordinal; + + zassert_equal(sys_notify_fetch_result(np[i], &res), 0, + "op%u unfinalized", i); + zassert_equal(res, ordinal, + "op%u wrong order: %d != %u", i, res, ordinal); + } +} + +static void test_special_priority(void) +{ + struct pri_order const pri_order[] = { + { 0, 0 }, /* first because it gets grabbed when submitted */ + /* rest gets tricky */ + { QUEUED_OPERATION_PRIORITY_APPEND, 3 }, + { INT8_MAX, 4 }, + { INT8_MIN, 2 }, + { QUEUED_OPERATION_PRIORITY_PREPEND, 1 }, + { QUEUED_OPERATION_PRIORITY_APPEND, 5 }, + }; + struct operation operation[ARRAY_SIZE(pri_order)]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = -EINPROGRESS; + int rc; + + /* Reset the service, and tell it to not finalize operations + * synchronously (so we can build up a queue). + */ + reset_service(false); + service.async = true; + + for (uint32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + operation_init_spinwait(&operation[i]); + np[i] = &operation[i].operation.notify; + rc = service_submit(&service, &operation[i], + pri_order[i].priority); + zassert_equal(rc, 0, + "submit op%u failed: %d", i, rc); + zassert_equal(sys_notify_fetch_result(np[i], &res), -EAGAIN, + "op%u finalized!", i); + } + + zassert_equal(service.current, &operation[0], + "submit op0 didn't process"); + + /* Enable synchronous finalization and kick off the first + * entry. All the others will execute immediately. + */ + service.async = false; + service_finalize(&service, service.process_rv); + + for (uint32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + size_t ordinal = pri_order[i].ordinal; + + zassert_equal(sys_notify_fetch_result(np[i], &res), 0, + "op%u unfinalized", i); + zassert_equal(res, ordinal, + "op%u wrong order: %d != %u", i, res, ordinal); + } +} + +struct delayed_submit { + struct operation *op; + int priority; +}; + +static void test_delayed_submit(struct service *sp, + struct operation *op, + void *ud) +{ + struct delayed_submit *dsp = ud; + int rc = service_submit(sp, dsp->op, dsp->priority); + + zassert_equal(rc, 0, + "delayed submit failed: %d", rc); +} + +static void test_resubmit_priority(void) +{ + struct pri_order const pri_order[] = { + /* first because it gets grabbed when submitted */ + { 0, 0 }, + /* delayed by submit of higher priority during callback */ + { 0, 2 }, + /* submitted during completion of op0 */ + { -1, 1 }, + }; + size_t di = ARRAY_SIZE(pri_order) - 1; + struct operation operation[ARRAY_SIZE(pri_order)]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = -EINPROGRESS; + int rc; + + /* Queue two operations, but in the callback for the first + * schedule a third operation that has higher priority. + */ + reset_service(false); + service.async = true; + + for (uint32_t i = 0; i <= di; ++i) { + operation_init_callback(&operation[i], test_callback); + np[i] = &operation[i].operation.notify; + if (i < di) { + rc = service_submit(&service, &operation[i], 0); + zassert_equal(rc, 0, + "submit op%u failed: %d", i, rc); + zassert_equal(sys_notify_fetch_result(np[i], &res), + -EAGAIN, + "op%u finalized!", i); + } + } + + struct delayed_submit ds = { + .op = &operation[di], + .priority = pri_order[di].priority, + }; + operation[0].callback = test_delayed_submit; + operation[0].user_data = &ds; + + /* Enable synchronous finalization and kick off the first + * entry. All the others will execute immediately. + */ + service.async = false; + service_finalize(&service, service.process_rv); + + zassert_equal(service.process_cnt, ARRAY_SIZE(operation), + "not all processed once: %d != %d", + ARRAY_SIZE(operation), service.process_cnt); + + for (uint32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + size_t ordinal = pri_order[i].ordinal; + + zassert_equal(sys_notify_fetch_result(np[i], &res), 0, + "op%u unfinalized", i); + zassert_equal(res, ordinal, + "op%u wrong order: %d != %u", i, res, ordinal); + } +} + +static void test_missing_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(false); + vtable.validate = NULL; + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, 0, + "submit spinwait failed: %d", rc); + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait result"); +} + +static void test_success_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(false); + service.validate_rv = 57; + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit validation did not succeed as expected: %d", rc); +} + +static void test_failed_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(false); + service.validate_rv = -EINVAL; + + operation_init_spinwait(&operation); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "failed spinwait unfinalized"); + + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit validation did not fail as expected: %d", rc); +} + +static void test_callback_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + int expect = -ENOTSUP; + int rc = 0; + + reset_service(false); + vtable.callback = NULL; + + operation_init_callback(&operation, test_callback); + rc = service_submit(&service, op, 0); + zassert_equal(rc, expect, + "unsupported callback check failed: %d != %d", + rc, expect); +} + +static void test_priority_validation(void) +{ + struct operation operation; + struct operation *op = &operation; + int expect = -EINVAL; + int rc = 0; + + reset_service(false); + + operation_init_callback(&operation, test_callback); + rc = service_submit(&service, op, 128); + zassert_equal(rc, expect, + "unsupported priority check failed: %d != %d", + rc, expect); +} + +static void test_cancel_active(void) +{ + struct operation operation; + struct operation *op = &operation; + int expect = -EINPROGRESS; + int rc = 0; + + reset_service(false); + service.async = true; + service.validate_rv = 152; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit failed: %d != %d", rc, service.validate_rv); + + rc = service_cancel(&service, op); + zassert_equal(rc, expect, + "cancel failed: %d != %d", rc, expect); +} + +static void test_cancel_inactive(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + struct operation *op1 = &operation[1]; + int res; + int rc = 0; + + reset_service(false); + service.async = true; + + /* Set up two operations, but only submit the first. */ + for (uint32_t i = 0; i < ARRAY_SIZE(operation); ++i) { + operation_init_spinwait(&operation[i]); + np[i] = &operation[i].operation.notify; + if (i == 0) { + rc = service_submit(&service, &operation[i], 0); + zassert_equal(rc, service.validate_rv, + "submit failed: %d != %d", + rc, service.validate_rv); + } + } + + zassert_equal(service.current, &operation[0], + "current not op0"); + + zassert_equal(sys_notify_fetch_result(np[1], &res), -EAGAIN, + "op1 finalized!"); + + /* Verify attempt to cancel unsubmitted operation. */ + rc = service_cancel(&service, op1); + zassert_equal(rc, -EINVAL, + "cancel failed: %d != %d", rc, -EINVAL); + + /* Submit, then verify cancel succeeds. */ + rc = service_submit(&service, op1, 0); + zassert_equal(rc, service.validate_rv, + "submit failed: %d != %d", rc, service.validate_rv); + + zassert_equal(sys_notify_fetch_result(np[1], &res), -EAGAIN, + "op1 finalized!"); + + rc = service_cancel(&service, op1); + zassert_equal(rc, 0, + "cancel failed: %d", rc); + + zassert_equal(sys_notify_fetch_result(np[1], &res), 0, + "op1 NOT finalized"); + zassert_equal(res, -ECANCELED, + "op1 cancel result unexpected: %d", res); + + service.async = false; + service_finalize(&service, service.process_rv); + zassert_equal(service.process_cnt, 1, + "too many processed"); +} + +static void test_async_idle(void) +{ + struct operation operation; + + reset_service(true); + service.async = true; + service.process_rv = 142; + + operation_init_spinwait(&operation); + service_submit(&service, &operation, 0); + service_finalize(&service, service.process_rv); + zassert_false(service.active, "service not idled"); +} + +static void test_onoff_success(void) +{ + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(true); + service.process_rv = 23; + service.async_onoff = true; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait failed: %d != %d", rc, + service.validate_rv); + zassert_equal(service.process_cnt, 0, + "unexpected process"); + zassert_equal(sys_notify_fetch_result(np, &res), -EAGAIN, + "unexpected fetch succeeded"); + zassert_not_equal(service.onoff_notify, NULL, + "unexpected notifier"); + + service.active = true; + service.async_onoff = false; + service_onoff_notify(0); + + zassert_equal(service.process_cnt, 1, + "unexpected process"); + + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait result"); + + zassert_false(service.active, "service not idled"); +} + +static void test_onoff_start_sync_failure(void) +{ + struct onoff_manager *oosrv; + struct onoff_client oocli; + struct operation operation; + struct operation *op = &operation; + struct sys_notify *np = &op->operation.notify; + int res = 0; + int rc = 0; + + reset_service(true); + + /* Force onoff service into error state. */ + service.onoff_request_rv = -14; + + oosrv = service.manager.onoff; + oocli = (struct onoff_client){}; + sys_notify_init_spinwait(&oocli.notify); + + /* Request will succeed, transition will fail putting service + * into error state, which will cause a failure when the + * queued operation manager attempts to start the service. + */ + rc = onoff_request(oosrv, &oocli); + zassert_equal(rc, 0, + "oo req: %d", rc); + zassert_equal(sys_notify_fetch_result(&oocli.notify, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, service.onoff_request_rv, + "res: %d", rc); + zassert_true(onoff_has_error(oosrv), + "onoff error"); + + service.onoff_request_rv = 0; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait failed: %d != %d", rc, + service.validate_rv); + + zassert_equal(sys_notify_fetch_result(np, &res), 0, + "failed spinwait fetch"); + zassert_equal(res, -ENODEV, + "failed spinwait result: %d", res); + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + +} + +static void test_onoff_start_failure(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int onoff_res = -13; + int res = 0; + int rc = 0; + + reset_service(true); + service.async_onoff = true; + + /* Queue two operations that will block on onoff start */ + for (uint32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + np[idx] = &operation[idx].operation.notify; + operation_init_spinwait(&operation[idx]); + + rc = service_submit(&service, &operation[idx], 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait %u failed: %d != %d", idx, + rc, service.validate_rv); + } + + zassert_equal(service.process_cnt, 0, + "unexpected process"); + for (uint32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + zassert_equal(sys_notify_fetch_result(np[idx], &res), -EAGAIN, + "unexpected fetch %u succeeded", idx); + } + zassert_not_equal(service.onoff_notify, NULL, + "unexpected notifier"); + + /* Fail the start */ + service.async_onoff = false; + service_onoff_notify(onoff_res); + + zassert_equal(service.process_cnt, 0, + "unexpected process"); + + for (uint32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + zassert_equal(sys_notify_fetch_result(np[idx], &res), 0, + "fetch %u failed", idx); + /* TBD: provide access to onoff result code? */ + zassert_equal(res, -ENODEV, + "fetch %u value failed", idx); + } +} + +/* Data used to submit an operation during an onoff transition. */ +struct onoff_restart_data { + struct operation *op; + int res; + bool invoked; +}; + +/* Mutate the operation list during a stop to force a restart. */ +static void onoff_restart_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + struct onoff_restart_data *dp = sp->data; + + if (dp) { + int rc = service_submit(sp, dp->op, 0); + + zassert_equal(rc, sp->validate_rv, + "submit spinwait failed: %d != %d", + rc, sp->validate_rv); + sp->data = NULL; + dp->invoked = true; + } + + basic_stop(mp, notify); +} + +static void test_onoff_restart(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = 0; + int rc = 0; + + reset_service(true); + + struct onoff_transitions onoff_transitions = *service.onoff.transitions; + struct onoff_restart_data stop_data = { + .op = &operation[1], + }; + + service.data = &stop_data; + onoff_transitions.stop = onoff_restart_stop; + replace_service_onoff(&onoff_transitions); + + /* Initialize two operations. The first is submitted, onoff + * starts, invokes the first, then stops. During the stop the + * second is queued, which causes a restart when the stop + * completes. + */ + for (uint32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + np[idx] = &operation[idx].operation.notify; + operation_init_spinwait(&operation[idx]); + + } + + rc = service_submit(&service, &operation[0], 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait 0 failed: %d != %d", + rc, service.validate_rv); + + zassert_equal(service.process_cnt, 2, + "unexpected process"); + + zassert_equal(stop_data.invoked, true, + "stop mock not invoked"); + + for (uint32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + zassert_equal(sys_notify_fetch_result(np[idx], &res), 0, + "failed spinwait fetch"); + zassert_equal(res, 1 + idx, + "failed spinwait result"); + } +} + +/* Mutate the operation list during a stop to force a restart. */ +static void onoff_stop_failure_stop(struct onoff_manager *mp, + onoff_notify_fn notify) +{ + struct service *sp = CONTAINER_OF(mp, struct service, onoff); + struct onoff_restart_data *dp = sp->data; + int rc = service_submit(sp, dp->op, 0); + + zassert_equal(rc, sp->validate_rv, + "submit spinwait failed: %d != %d", + rc, sp->validate_rv); + dp->invoked = true; + sp->onoff_release_rv = dp->res; + + basic_stop(mp, notify); +} + +static void test_onoff_stop_failure(void) +{ + struct operation operation[2]; + struct sys_notify *np[ARRAY_SIZE(operation)]; + int res = 0; + int rc = 0; + + reset_service(true); + + struct onoff_transitions onoff_transitions = *service.onoff.transitions; + struct onoff_restart_data stop_data = { + .op = &operation[1], + .res = -14, + }; + + service.data = &stop_data; + onoff_transitions.stop = onoff_stop_failure_stop; + replace_service_onoff(&onoff_transitions); + + /* Initialize two operations. The first is submitted, onoff + * starts, invokes the first, then stops. During the stop the + * second is queued, but the stop operation forces an error. + */ + for (uint32_t idx = 0; idx < ARRAY_SIZE(operation); ++idx) { + np[idx] = &operation[idx].operation.notify; + operation_init_spinwait(&operation[idx]); + } + + rc = service_submit(&service, &operation[0], 0); + zassert_equal(rc, service.validate_rv, + "submit spinwait 0 failed: %d != %d", + rc, service.validate_rv); + + zassert_equal(service.process_cnt, 1, + "unexpected process"); + zassert_equal(stop_data.invoked, true, + "stop mock not invoked"); + + zassert_equal(sys_notify_fetch_result(np[0], &res), 0, + "failed spinwait 0 fetch"); + zassert_equal(res, service.process_rv, + "failed spinwait 0 result"); + zassert_equal(sys_notify_fetch_result(np[1], &res), 0, + "failed spinwait 1 fetch"); + zassert_equal(res, -ENODEV, + "failed spinwait 1 result"); + + /* Verify that resubmits also return failure */ + + operation_init_spinwait(&operation[0]); + rc = service_submit(&service, &operation[0], 0); + zassert_equal(rc, -ENODEV, + "failed error submit"); +} + +static void test_has_error(void) +{ + struct operation operation; + struct operation *op = &operation; + int rc = 0; + + reset_service(true); + service.onoff_request_rv = -3; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + + zassert_true(service_has_error(&service), + "missing error"); +} + +static void test_reset_notsup(void) +{ + struct operation operation; + struct operation *op = &operation; + int rc = 0; + + reset_service(false); + + zassert_false(service_has_error(&service), + "missing error"); + rc = service_reset(&service, NULL); + zassert_equal(rc, -ENOTSUP, + "unexpected reset: %d\n", rc); + + reset_service(true); + + struct onoff_transitions onoff_transitions = *service.onoff.transitions; + onoff_transitions.reset = NULL; + replace_service_onoff(&onoff_transitions); + + zassert_false(service_has_error(&service), + "missing error"); + rc = service_reset(&service, NULL); + zassert_equal(rc, -EALREADY, + "unexpected reset: %d\n", rc); + + service.onoff_request_rv = -3; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + + zassert_true(service_has_error(&service), + "missing error"); + + rc = service_reset(&service, NULL); + zassert_equal(rc, -ENOTSUP, + "unexpected reset: %d\n", rc); +} + +static void test_reset(void) +{ + struct operation operation; + struct operation *op = &operation; + int rc = 0; + + reset_service(true); + + zassert_false(service_has_error(&service), + "missing error"); + rc = service_reset(&service, NULL); + zassert_equal(rc, -EALREADY, + "unexpected reset: %d\n", rc); + + service.onoff_request_rv = -3; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + + zassert_true(service_has_error(&service), + "missing error"); + + rc = service_reset(&service, NULL); + zassert_true(rc >= 0, + "unexpected reset: %d\n", rc); + + zassert_false(service_has_error(&service), + "reset failed"); +} + +static void test_notifying_reset(void) +{ + struct operation operation; + struct operation *op = &operation; + int res; + int rc = 0; + struct onoff_client oocli; + + reset_service(true); + + memset(&oocli, 0, sizeof(oocli)); + sys_notify_init_spinwait(&oocli.notify); + + zassert_false(service_has_error(&service), + "missing error"); + rc = service_reset(&service, &oocli); + zassert_equal(rc, -EALREADY, + "unexpected reset: %d\n", rc); + + service.onoff_request_rv = -3; + + operation_init_spinwait(&operation); + rc = service_submit(&service, op, 0); + + zassert_true(service_has_error(&service), + "missing error"); + + service.async_onoff = true; + + rc = service_reset(&service, &oocli); + zassert_true(rc >= 0, + "unexpected reset: %d\n", rc); + + zassert_true(service_has_error(&service), + "missing error"); + + rc = sys_notify_fetch_result(&oocli.notify, &res); + zassert_equal(rc, -EAGAIN, + "unexpected fetch async: %d", rc); + + int reset_res = 21; + + service_onoff_notify(reset_res); + + zassert_false(service_has_error(&service), + "reset failed"); + + rc = sys_notify_fetch_result(&oocli.notify, &res); + zassert_equal(rc, 0, + "unexpected fetch complete: %d", rc); + zassert_equal(res, reset_res, + "unexpected completion: %d", res); +} + +void test_main(void) +{ + ztest_test_suite(queued_operation_api, + ztest_unit_test(test_notification_spinwait), + ztest_unit_test(test_notification_signal), + ztest_unit_test(test_notification_callback), + ztest_unit_test(test_sync_priority), + ztest_unit_test(test_special_priority), + ztest_unit_test(test_resubmit_priority), + ztest_unit_test(test_missing_validation), + ztest_unit_test(test_success_validation), + ztest_unit_test(test_failed_validation), + ztest_unit_test(test_callback_validation), + ztest_unit_test(test_priority_validation), + ztest_unit_test(test_async_idle), + ztest_unit_test(test_cancel_active), + ztest_unit_test(test_cancel_inactive), + ztest_unit_test(test_onoff_success), + ztest_unit_test(test_onoff_start_sync_failure), + ztest_unit_test(test_onoff_start_failure), + ztest_unit_test(test_onoff_restart), + ztest_unit_test(test_onoff_stop_failure), + ztest_unit_test(test_has_error), + ztest_unit_test(test_reset_notsup), + ztest_unit_test(test_reset), + ztest_unit_test(test_notifying_reset) + ); + ztest_run_test_suite(queued_operation_api); +} diff --git a/tests/lib/queued_operation/testcase.yaml b/tests/lib/queued_operation/testcase.yaml new file mode 100644 index 0000000000000..8558dedb9892b --- /dev/null +++ b/tests/lib/queued_operation/testcase.yaml @@ -0,0 +1,3 @@ +tests: + libraries.queued_operation: + tags: queued_operation timer