Skip to content

Commit

Permalink
[nrf fromtree] kernel/queue: Remove interior use of k_poll()
Browse files Browse the repository at this point in the history
The k_queue data structure, when CONFIG_POLL was enabled, would
inexplicably use k_poll() as its blocking mechanism instead of the
original wait_q/pend() code.  This was actually racy, see commit
b173e43.  The code was structured as a condition variable: using
a spinlock around the queue data before deciding to block.  But unlike
pend_current_thread(), k_poll() cannot atomically release a lock.

A workaround had been in place for this, and then accidentally
reverted (both by me!) because the code looked "wrong".

This is just fragile, there's no reason to have two implementations of
k_queue_get().  Remove.

Note that this also removes a test case in the work_queue test where
(when CONFIG_POLL was enabled, but not otherwise) it was checking for
the ability to immediately cancel a delayed work item that was
submitted with a timeout of K_NO_WAIT (i.e. "queue it immediately").
This DOES NOT work with the origina/non-poll queue backend, and has
never been a documented behavior of k_delayed_work_submit_to_queue()
under any circumstances.  I don't know why we were testing this.

Fixes zephyrproject-rtos#25904

Signed-off-by: Andy Ross <andrew.j.ross@intel.com>
(cherry picked from commit 99c2d2d)
Signed-off-by: Joakim Andersson <joakim.andersson@nordicsemi.no>
  • Loading branch information
Andy Ross authored and joerchan committed Jun 10, 2020
1 parent 3a31123 commit bbd71e2
Show file tree
Hide file tree
Showing 3 changed files with 8 additions and 70 deletions.
13 changes: 4 additions & 9 deletions include/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -2149,12 +2149,9 @@ static inline u32_t k_cycle_get_32(void)
struct k_queue {
sys_sflist_t data_q;
struct k_spinlock lock;
union {
_wait_q_t wait_q;

_POLL_EVENT;
};
_wait_q_t wait_q;

_POLL_EVENT;
_OBJECT_TRACING_NEXT_PTR(k_queue)
_OBJECT_TRACING_LINKED_FLAG
};
Expand All @@ -2163,10 +2160,8 @@ struct k_queue {
{ \
.data_q = SYS_SLIST_STATIC_INIT(&obj.data_q), \
.lock = { }, \
{ \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
_POLL_EVENT_OBJ_INIT(obj) \
}, \
.wait_q = Z_WAIT_Q_INIT(&obj.wait_q), \
_POLL_EVENT_OBJ_INIT(obj) \
_OBJECT_TRACING_INIT \
}

Expand Down
57 changes: 4 additions & 53 deletions kernel/queue.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,36 +100,31 @@ static inline void z_vrfy_k_queue_init(struct k_queue *queue)
#include <syscalls/k_queue_init_mrsh.c>
#endif

#if !defined(CONFIG_POLL)
static void prepare_thread_to_run(struct k_thread *thread, void *data)
{
z_thread_return_value_set_with_data(thread, 0, data);
z_ready_thread(thread);
}
#endif /* CONFIG_POLL */

#ifdef CONFIG_POLL
static inline void handle_poll_events(struct k_queue *queue, u32_t state)
{
#ifdef CONFIG_POLL
z_handle_obj_poll_events(&queue->poll_events, state);
}
#endif
}

void z_impl_k_queue_cancel_wait(struct k_queue *queue)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
#if !defined(CONFIG_POLL)
struct k_thread *first_pending_thread;

first_pending_thread = z_unpend_first_thread(&queue->wait_q);

if (first_pending_thread != NULL) {
prepare_thread_to_run(first_pending_thread, NULL);
}
#else
handle_poll_events(queue, K_POLL_STATE_CANCELLED);
#endif /* !CONFIG_POLL */

handle_poll_events(queue, K_POLL_STATE_CANCELLED);
z_reschedule(&queue->lock, key);
}

Expand All @@ -146,7 +141,6 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
bool alloc)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
#if !defined(CONFIG_POLL)
struct k_thread *first_pending_thread;

first_pending_thread = z_unpend_first_thread(&queue->wait_q);
Expand All @@ -156,7 +150,6 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
z_reschedule(&queue->lock, key);
return 0;
}
#endif /* !CONFIG_POLL */

/* Only need to actually allocate if no threads are pending */
if (alloc) {
Expand All @@ -173,12 +166,9 @@ static s32_t queue_insert(struct k_queue *queue, void *prev, void *data,
} else {
sys_sfnode_init(data, 0x0);
}
sys_sflist_insert(&queue->data_q, prev, data);

#if defined(CONFIG_POLL)
sys_sflist_insert(&queue->data_q, prev, data);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* CONFIG_POLL */

z_reschedule(&queue->lock, key);
return 0;
}
Expand Down Expand Up @@ -238,7 +228,6 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
}

k_spinlock_key_t key = k_spin_lock(&queue->lock);
#if !defined(CONFIG_POLL)
struct k_thread *thread = NULL;

if (head != NULL) {
Expand All @@ -255,13 +244,8 @@ int k_queue_append_list(struct k_queue *queue, void *head, void *tail)
sys_sflist_append_list(&queue->data_q, head, tail);
}

#else
sys_sflist_append_list(&queue->data_q, head, tail);
handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE);
#endif /* !CONFIG_POLL */

z_reschedule(&queue->lock, key);

return 0;
}

Expand Down Expand Up @@ -292,32 +276,6 @@ int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list)
return 0;
}

#if defined(CONFIG_POLL)
static void *k_queue_poll(struct k_queue *queue, k_timeout_t timeout)
{
struct k_poll_event event;
int err;
k_spinlock_key_t key;
void *val;

k_poll_event_init(&event, K_POLL_TYPE_FIFO_DATA_AVAILABLE,
K_POLL_MODE_NOTIFY_ONLY, queue);

event.state = K_POLL_STATE_NOT_READY;
err = k_poll(&event, 1, timeout);

if (err && err != -EAGAIN) {
return NULL;
}

key = k_spin_lock(&queue->lock);
val = z_queue_node_peek(sys_sflist_get(&queue->data_q), true);
k_spin_unlock(&queue->lock, key);

return val;
}
#endif /* CONFIG_POLL */

void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
{
k_spinlock_key_t key = k_spin_lock(&queue->lock);
Expand All @@ -337,16 +295,9 @@ void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout)
return NULL;
}

#if defined(CONFIG_POLL)
k_spin_unlock(&queue->lock, key);

return k_queue_poll(queue, timeout);

#else
int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout);

return (ret != 0) ? NULL : _current->base.swap_data;
#endif /* CONFIG_POLL */
}

#ifdef CONFIG_USERSPACE
Expand Down
8 changes: 0 additions & 8 deletions tests/kernel/workq/work_queue/src/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,14 +286,6 @@ static void coop_delayed_work_cancel_main(int arg1, int arg2)

TC_PRINT(" - Cancel delayed work from coop thread\n");
k_delayed_work_cancel(&delayed_tests[1].work);

#if defined(CONFIG_POLL)
k_delayed_work_submit(&delayed_tests[2].work,
K_NO_WAIT /* Submit immediately */);

TC_PRINT(" - Cancel pending delayed work from coop thread\n");
k_delayed_work_cancel(&delayed_tests[2].work);
#endif
}

/**
Expand Down

0 comments on commit bbd71e2

Please sign in to comment.