Skip to content

Commit

Permalink
Merge branch 'for-6.13/block' into for-next
Browse files Browse the repository at this point in the history
* for-6.13/block:
  block: fix ordering between checking BLK_MQ_S_STOPPED request adding
  block: fix ordering between checking QUEUE_FLAG_QUIESCED request adding
  block: fix missing dispatching request when queue is started or unquiesced
  • Loading branch information
axboe committed Oct 16, 2024
2 parents dcdcc9b + 1936f2e commit f82eab0
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 13 deletions.
55 changes: 42 additions & 13 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -2227,6 +2227,24 @@ void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

static inline bool blk_mq_hw_queue_need_run(struct blk_mq_hw_ctx *hctx)
{
bool need_run;

/*
* When queue is quiesced, we may be switching io scheduler, or
* updating nr_hw_queues, or other things, and we can't run queue
* any more, even blk_mq_hctx_has_pending() can't be called safely.
*
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced.
*/
__blk_mq_run_dispatch_ops(hctx->queue, false,
need_run = !blk_queue_quiesced(hctx->queue) &&
blk_mq_hctx_has_pending(hctx));
return need_run;
}

/**
* blk_mq_run_hw_queue - Start to run a hardware queue.
* @hctx: Pointer to the hardware queue to run.
Expand All @@ -2247,20 +2265,23 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)

might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);

/*
* When queue is quiesced, we may be switching io scheduler, or
* updating nr_hw_queues, or other things, and we can't run queue
* any more, even __blk_mq_hctx_has_pending() can't be called safely.
*
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced.
*/
__blk_mq_run_dispatch_ops(hctx->queue, false,
need_run = !blk_queue_quiesced(hctx->queue) &&
blk_mq_hctx_has_pending(hctx));
need_run = blk_mq_hw_queue_need_run(hctx);
if (!need_run) {
unsigned long flags;

if (!need_run)
return;
/*
* Synchronize with blk_mq_unquiesce_queue(), because we check
* if hw queue is quiesced locklessly above, we need the use
* ->queue_lock to make sure we see the up-to-date status to
* not miss rerunning the hw queue.
*/
spin_lock_irqsave(&hctx->queue->queue_lock, flags);
need_run = blk_mq_hw_queue_need_run(hctx);
spin_unlock_irqrestore(&hctx->queue->queue_lock, flags);

if (!need_run)
return;
}

if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
blk_mq_delay_run_hw_queue(hctx, 0);
Expand Down Expand Up @@ -2417,6 +2438,12 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
return;

clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
/*
* Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
* clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
* list in the subsequent routine.
*/
smp_mb__after_atomic();
blk_mq_run_hw_queue(hctx, async);
}
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
Expand Down Expand Up @@ -2647,6 +2674,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,

if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
blk_mq_insert_request(rq, 0);
blk_mq_run_hw_queue(hctx, false);
return;
}

Expand Down Expand Up @@ -2677,6 +2705,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)

if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
blk_mq_insert_request(rq, 0);
blk_mq_run_hw_queue(hctx, false);
return BLK_STS_OK;
}

Expand Down
13 changes: 13 additions & 0 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,19 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data

static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{
/* Fast path: hardware queue is not stopped most of the time. */
if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return false;

/*
* This barrier is used to order adding of dispatch list before and
* the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
* in blk_mq_start_stopped_hw_queue() so that dispatch code could
* either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
* empty to avoid missing dispatching requests.
*/
smp_mb();

return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
}

Expand Down

0 comments on commit f82eab0

Please sign in to comment.