Skip to content

Commit

Permalink
scst.h: Refactor wait_event_locked() to enhance usability and clarity
Browse files Browse the repository at this point in the history
1. Set the default process state to TASK_UNINTERRUPTIBLE during sleep.
   This change is made because our current code does not check whether a
   process was interrupted by a signal.

2. Prefix all SCST wait_event-related macros with 'scst_'. This helps to
   distinguish SCST-specific macros from those provided by the Linux
   kernel itself.

3. Add the capability to return an error code when a process in a
   non-TASK_UNINTERRUPTIBLE state is interrupted by a signal.

4. Divide the wait_event_locked function based on each lock type,
   resulting in the following new functions: scst_wait_event_lock(),
   scst_wait_event_lock_bh(), and scst_wait_event_lock_irq().
  • Loading branch information
lnocturno committed Jun 20, 2023
1 parent 334d29c commit d8894cb
Show file tree
Hide file tree
Showing 6 changed files with 136 additions and 53 deletions.
6 changes: 2 additions & 4 deletions iscsi-scst/kernel/nthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -960,8 +960,7 @@ int istrd(void *arg)

spin_lock_bh(&p->rd_lock);
while (!kthread_should_stop()) {
wait_event_locked(p->rd_waitQ, test_rd_list(p), lock_bh,
p->rd_lock);
scst_wait_event_lock_bh(p->rd_waitQ, test_rd_list(p), p->rd_lock);
scst_do_job_rd(p);
}
spin_unlock_bh(&p->rd_lock);
Expand Down Expand Up @@ -1613,8 +1612,7 @@ int istwr(void *arg)

spin_lock_bh(&p->wr_lock);
while (!kthread_should_stop()) {
wait_event_locked(p->wr_waitQ, test_wr_list(p), lock_bh,
p->wr_lock);
scst_wait_event_lock_bh(p->wr_waitQ, test_wr_list(p), p->wr_lock);
scst_do_job_wr(p);
}
spin_unlock_bh(&p->wr_lock);
Expand Down
150 changes: 118 additions & 32 deletions scst/include/scst.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@
#endif
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#include <linux/sched/signal.h>
#endif
#include <linux/wait.h>
#include <linux/cpumask.h>
#include <linux/dlm.h>
Expand Down Expand Up @@ -5382,59 +5385,142 @@ void scst_dev_inquiry_data_changed(struct scst_device *dev);
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0) && \
!defined(CONFIG_SUSE_KERNEL)
static inline void
static inline long
prepare_to_wait_exclusive_head(wait_queue_head_t *wq_head,
wait_queue_t *wq_entry, int state)
{
unsigned long flags;
long ret = 0;

wq_entry->flags |= WQ_FLAG_EXCLUSIVE;

spin_lock_irqsave(&wq_head->lock, flags);
if (list_empty(&wq_entry->task_list))
__add_wait_queue(wq_head, wq_entry);
set_current_state(state);
if (signal_pending_state(state, current)) {
/*
* Exclusive waiter must not fail if it was selected by wakeup,
* it should "consume" the condition we were waiting for.
*
* The caller will recheck the condition and return success if
* we were already woken up, we can not miss the event because
* wakeup locks/unlocks the same wq_head->lock.
*
* But we need to ensure that set-condition + wakeup after that
* can't see us, it should wake up another exclusive waiter if
* we fail.
*/
list_del_init(&wq_entry->task_list);
ret = -ERESTARTSYS;
} else {
if (list_empty(&wq_entry->task_list))
__add_wait_queue(wq_head, wq_entry);
set_current_state(state);
}
spin_unlock_irqrestore(&wq_head->lock, flags);

return ret;
}
#else
static inline void
static inline long
prepare_to_wait_exclusive_head(struct wait_queue_head *wq_head,
struct wait_queue_entry *wq_entry, int state)
{
unsigned long flags;
long ret = 0;

wq_entry->flags |= WQ_FLAG_EXCLUSIVE;

spin_lock_irqsave(&wq_head->lock, flags);
if (list_empty(&wq_entry->entry))
__add_wait_queue(wq_head, wq_entry);
set_current_state(state);
if (signal_pending_state(state, current)) {
/*
* Exclusive waiter must not fail if it was selected by wakeup,
* it should "consume" the condition we were waiting for.
*
* The caller will recheck the condition and return success if
* we were already woken up, we can not miss the event because
* wakeup locks/unlocks the same wq_head->lock.
*
* But we need to ensure that set-condition + wakeup after that
* can't see us, it should wake up another exclusive waiter if
* we fail.
*/
list_del_init(&wq_entry->entry);
ret = -ERESTARTSYS;
} else {
if (list_empty(&wq_entry->entry))
__add_wait_queue(wq_head, wq_entry);
set_current_state(state);
}
spin_unlock_irqrestore(&wq_head->lock, flags);

return ret;
}
#endif

/**
* wait_event_locked() - Wait until a condition becomes true.
* @wq: Wait queue to wait on if @condition is false.
* @condition: Condition to wait for. Can be any C expression.
* @lock_type: One of lock, lock_bh or lock_irq.
* @lock: A spinlock.
*
* Caller must hold lock of type @lock_type on @lock.
*/
#define wait_event_locked(wq, condition, lock_type, lock) do { \
if (!(condition)) { \
DEFINE_WAIT(__wait); \
\
do { \
prepare_to_wait_exclusive_head(&(wq), &__wait, \
TASK_INTERRUPTIBLE); \
if (condition) \
break; \
spin_un ## lock_type(&(lock)); \
schedule(); \
spin_ ## lock_type(&(lock)); \
} while (!(condition)); \
finish_wait(&(wq), &__wait); \
} \
#define ___scst_wait_is_interruptible(state) \
(!__builtin_constant_p(state) || \
(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))

#define ___scst_wait_event(wq_head, condition, state, ret, cmd) \
({ \
__label__ __out; \
DEFINE_WAIT(__wq_entry); \
long __ret = ret; /* explicit shadow */ \
\
for (;;) { \
long __int = prepare_to_wait_exclusive_head(&wq_head, &__wq_entry,\
state); \
\
if (condition) \
break; \
\
if (___scst_wait_is_interruptible(state) && __int) { \
__ret = __int; \
goto __out; \
} \
\
cmd; \
} \
finish_wait(&wq_head, &__wq_entry); \
__out: __ret; \
})

#define __scst_wait_event_lock(wq_head, condition, lock) \
(void)___scst_wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, \
spin_unlock(&lock); \
schedule(); \
spin_lock(&lock))

#define scst_wait_event_lock(wq_head, condition, lock) \
do { \
if (condition) \
break; \
__scst_wait_event_lock(wq_head, condition, lock); \
} while (0)

#define __scst_wait_event_lock_bh(wq_head, condition, lock) \
(void)___scst_wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, \
spin_unlock_bh(&lock); \
schedule(); \
spin_lock_bh(&lock))

#define scst_wait_event_lock_bh(wq_head, condition, lock) \
do { \
if (condition) \
break; \
__scst_wait_event_lock_bh(wq_head, condition, lock); \
} while (0)

#define __scst_wait_event_lock_irq(wq_head, condition, lock) \
(void)___scst_wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, \
spin_unlock_irq(&lock); \
schedule(); \
spin_lock_irq(&lock))

#define scst_wait_event_lock_irq(wq_head, condition, lock) \
do { \
if (condition) \
break; \
__scst_wait_event_lock_irq(wq_head, condition, lock); \
} while (0)

#if defined(CONFIG_SCST_DEBUG) || defined(CONFIG_SCST_TRACING)
Expand Down
10 changes: 5 additions & 5 deletions scst/src/dev_handlers/scst_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -2233,9 +2233,9 @@ static int dev_user_get_next_cmd(struct scst_user_dev *dev,
TRACE_ENTRY();

while (1) {
wait_event_locked(dev->udev_cmd_threads.cmd_list_waitQ,
test_cmd_threads(dev, can_block), lock_irq,
dev->udev_cmd_threads.cmd_list_lock);
scst_wait_event_lock_irq(dev->udev_cmd_threads.cmd_list_waitQ,
test_cmd_threads(dev, can_block),
dev->udev_cmd_threads.cmd_list_lock);

dev_user_process_scst_commands(dev);

Expand Down Expand Up @@ -4053,8 +4053,8 @@ static int dev_user_cleanup_thread(void *arg)

spin_lock(&cleanup_lock);
while (!kthread_should_stop()) {
wait_event_locked(cleanup_list_waitQ, test_cleanup_list(),
lock, cleanup_lock);
scst_wait_event_lock(cleanup_list_waitQ, test_cleanup_list(),
cleanup_lock);

/*
* We have to poll devices, because commands can go from SCST
Expand Down
3 changes: 1 addition & 2 deletions scst/src/scst_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -14978,8 +14978,7 @@ int scst_ext_block_dev(struct scst_device *dev, ext_blocker_done_fn_t done_fn,
b->ext_blocker_done_fn = scst_sync_ext_blocking_done;
*((void **)&b->ext_blocker_data[0]) = &w;

wait_event_locked(w, (dev->on_dev_cmd_count == 0),
lock_bh, dev->dev_lock);
scst_wait_event_lock_bh(w, dev->on_dev_cmd_count == 0, dev->dev_lock);

spin_unlock_bh(&dev->dev_lock);
} else {
Expand Down
4 changes: 2 additions & 2 deletions scst/src/scst_sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -470,8 +470,8 @@ static int sysfs_work_thread_fn(void *arg)
while (!kthread_should_stop()) {
if (one_time_only && !test_sysfs_work_list())
break;
wait_event_locked(sysfs_work_waitQ, test_sysfs_work_list(),
lock, sysfs_work_lock);
scst_wait_event_lock(sysfs_work_waitQ, test_sysfs_work_list(),
sysfs_work_lock);
scst_process_sysfs_works();
}
spin_unlock(&sysfs_work_lock);
Expand Down
16 changes: 8 additions & 8 deletions scst/src/scst_targ.c
Original file line number Diff line number Diff line change
Expand Up @@ -4510,9 +4510,9 @@ int scst_init_thread(void *arg)

spin_lock_irq(&scst_init_lock);
while (!kthread_should_stop()) {
wait_event_locked(scst_init_cmd_list_waitQ,
test_init_cmd_list(),
lock_irq, scst_init_lock);
scst_wait_event_lock_irq(scst_init_cmd_list_waitQ,
test_init_cmd_list(),
scst_init_lock);
scst_do_job_init();
}
spin_unlock_irq(&scst_init_lock);
Expand Down Expand Up @@ -6794,9 +6794,9 @@ int scst_tm_thread(void *arg)

spin_lock_irq(&scst_mcmd_lock);
while (!kthread_should_stop()) {
wait_event_locked(scst_mgmt_cmd_list_waitQ,
test_mgmt_cmd_list(), lock_irq,
scst_mcmd_lock);
scst_wait_event_lock_irq(scst_mgmt_cmd_list_waitQ,
test_mgmt_cmd_list(),
scst_mcmd_lock);

while (!list_empty(&scst_active_mgmt_cmd_list)) {
int rc;
Expand Down Expand Up @@ -7610,8 +7610,8 @@ int scst_global_mgmt_thread(void *arg)

spin_lock_irq(&scst_mgmt_lock);
while (!kthread_should_stop()) {
wait_event_locked(scst_mgmt_waitQ, test_mgmt_list(), lock_irq,
scst_mgmt_lock);
scst_wait_event_lock_irq(scst_mgmt_waitQ, test_mgmt_list(),
scst_mgmt_lock);

while (!list_empty(&scst_sess_init_list)) {
sess = list_first_entry(&scst_sess_init_list,
Expand Down

0 comments on commit d8894cb

Please sign in to comment.