Skip to content

Commit 56ab584

Browse files
committed
kernel: thread: k_thread_foreach_unlocked: Implement
Implement thread foreach processing with limited locking to allow threads processing that may take more time but allows missing some threads processing when the thread list is modified. Signed-off-by: Radoslaw Koppel <radoslaw.koppel@nordicsemi.no>
1 parent 71adcf9 commit 56ab584

File tree

2 files changed

+52
-4
lines changed

2 files changed

+52
-4
lines changed

include/kernel.h

Lines changed: 34 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -619,15 +619,45 @@ typedef void (*k_thread_user_cb_t)(const struct k_thread *thread,
619619
* @param user_data Pointer to user data.
620620
*
621621
* @note CONFIG_THREAD_MONITOR must be set for this function
622-
* to be effective. Also this API uses irq_lock to protect the
623-
* _kernel.threads list which means creation of new threads and
624-
* terminations of existing threads are blocked until this
625-
* API returns.
622+
* to be effective.
623+
* @note This API uses @ref k_spin_lock to protect the _kernel.threads
624+
* list which means creation of new threads and terminations of existing
625+
* threads are blocked until this API returns.
626626
*
627627
* @return N/A
628628
*/
629629
extern void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data);
630630

631+
/**
632+
* @brief Iterate over all the threads in the system without locking.
633+
*
634+
* This routine works exactly the same like @ref k_thread_foreach
635+
* but unlocks interrupts when user_cb is executed.
636+
*
637+
* @param user_cb Pointer to the user callback function.
638+
* @param user_data Pointer to user data.
639+
*
640+
* @note CONFIG_THREAD_MONITOR must be set for this function
641+
* to be effective.
642+
* @note This API uses @ref k_spin_lock only when accessing the _kernel.threads
643+
* queue elements. It unlocks it during user callback function processing.
644+
* If a new task is created when this @c foreach function is in progress,
645+
* the added new task would not be included in the enumeration.
646+
* If a task is aborted during this enumeration, there would be a race here
647+
* and there is a possibility that this aborted task would be included in the
648+
* enumeration.
649+
* @note If the task is aborted and the memory occupied by its @c k_thread
650+
* structure is reused when this @c k_thread_foreach_unlocked is in progress
651+
* it might even lead to the system behave unstable.
652+
* This function may never return, as it would follow some @c next task
653+
* pointers treating given pointer as a pointer to the k_thread structure
654+
* while it is something different right now.
655+
* Do not reuse the memory that was occupied by k_thread structure of aborted
656+
* task if it was aborted after this function was called in any context.
657+
*/
658+
extern void k_thread_foreach_unlocked(
659+
k_thread_user_cb_t user_cb, void *user_data);
660+
631661
/** @} */
632662

633663
/**

kernel/thread.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,24 @@ void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
5555
#endif
5656
}
5757

58+
void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
59+
{
60+
#if defined(CONFIG_THREAD_MONITOR)
61+
struct k_thread *thread;
62+
k_spinlock_key_t key;
63+
64+
__ASSERT(user_cb != NULL, "user_cb can not be NULL");
65+
66+
key = k_spin_lock(&lock);
67+
for (thread = _kernel.threads; thread; thread = thread->next_thread) {
68+
k_spin_unlock(&lock, key);
69+
user_cb(thread, user_data);
70+
key = k_spin_lock(&lock);
71+
}
72+
k_spin_unlock(&lock, key);
73+
#endif
74+
}
75+
5876
bool k_is_in_isr(void)
5977
{
6078
return arch_is_in_isr();

0 commit comments

Comments
 (0)