Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

created by mistake #277

Closed
wants to merge 29 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
60dc958
block: add ability to flag write back caching on a device
axboe Apr 12, 2016
ee1cf78
sd: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
a614eb7
NVMe: switch to using blk_queue_write_cache()
axboe Apr 12, 2016
49c6b62
drbd: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
33b7035
loop: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
14937a3
mtip32xx: remove call to blk_queue_flush()
axboe Apr 12, 2016
488c6bb
nbd: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
ba09f79
osdblk: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
9810844
skd_main: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
af9a3b6
ps3disk: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
100a546
virtio_blk: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
59037cf
bcache: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
10b73da
dm: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
2ddefd2
xen-blkfront: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
3171764
ide-disk: update to using blk_queue_write_cache()
axboe Mar 30, 2016
a88a43c
md: update to using blk_queue_write_cache()
axboe Mar 30, 2016
6762c30
mmc/block: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
793e392
mtd: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
f7a85c4
um: switch to using blk_queue_write_cache()
axboe Mar 30, 2016
d7709d6
block: kill blk_queue_flush()
axboe Mar 30, 2016
c48fcd8
block: kill off q->flush_flags
axboe Apr 13, 2016
462ca58
block: add WRITE_BG
axboe Apr 14, 2016
cb9ee03
writeback: add wbc_to_write_cmd()
axboe Apr 14, 2016
047b239
writeback: use WRITE_BG for kupdate and background writeback
axboe Apr 14, 2016
a9b3027
writeback: track if we're sleeping on progress in balance_dirty_pages()
axboe Apr 14, 2016
944f7bd
block: add code to track actual device queue depth
axboe Mar 30, 2016
1d591a3
block: add scalable completion tracking of requests
axboe Apr 16, 2016
605f106
writeback: throttle buffered writeback
axboe Apr 19, 2016
46e8281
Merge branch 'wb-buf-throttle' of git://git.kernel.dk/linux-block int…
Apr 20, 2016
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions Documentation/block/queue-sysfs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing
an IO scheduler name to this file will attempt to load that IO scheduler
module, if it isn't already present in the system.

write_cache (RW)
----------------
When read, this file will display whether the device has write back
caching enabled or not. It will return "write back" for the former
case, and "write through" for the latter. Writing to this file can
change the kernels view of the device, but it doesn't alter the
device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.


Jens Axboe <jens.axboe@oracle.com>, February 2009
4 changes: 2 additions & 2 deletions Documentation/block/writeback_cache_control.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,15 @@ requests that have a payload. For devices with volatile write caches the
driver needs to tell the block layer that it supports flushing caches by
doing:

blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
blk_queue_write_cache(sdkp->disk->queue, true, false);

and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
REQ_FLUSH requests with a payload are automatically turned into a sequence
of an empty REQ_FLUSH request followed by the actual write by the block
layer. For devices that also support the FUA bit the block layer needs
to be told to pass through the REQ_FUA bit using:

blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
blk_queue_write_cache(sdkp->disk->queue, true, true);

and the driver must handle write requests that have the REQ_FUA bit set
in prep_fn/request_fn. If the FUA bit is not natively supported the block
Expand Down
2 changes: 1 addition & 1 deletion arch/um/drivers/ubd_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -862,7 +862,7 @@ static int ubd_add(int n, char **error_out)
goto out;
}
ubd_dev->queue->queuedata = ubd_dev;
blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
blk_queue_write_cache(ubd_dev->queue, true, false);

blk_queue_max_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
Expand Down
2 changes: 1 addition & 1 deletion block/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o blk-wb.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/
Expand Down
22 changes: 21 additions & 1 deletion block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@

#include "blk.h"
#include "blk-mq.h"
#include "blk-wb.h"

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
Expand Down Expand Up @@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,

fail:
blk_free_flush_queue(q->fq);
blk_wb_exit(q);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
Expand Down Expand Up @@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
blk_wb_requeue(q->rq_wb, rq);

if (rq->cmd_flags & REQ_QUEUED)
blk_queue_end_tag(q, rq);
Expand Down Expand Up @@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
/* this is a bio leak */
WARN_ON(req->bio != NULL);

blk_wb_done(q->rq_wb, req);

/*
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
Expand Down Expand Up @@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
struct request *req;
unsigned int request_count = 0;
bool wb_acct;

/*
* low level driver can indicate that it wants pages above a
Expand Down Expand Up @@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
}

get_rq:
wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);

/*
* This sync check and mask will be re-done in init_request_from_bio(),
* but we need to set it earlier to expose the sync flag to the
Expand All @@ -1781,11 +1789,16 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
*/
req = get_request(q, rw_flags, bio, GFP_NOIO);
if (IS_ERR(req)) {
if (wb_acct)
__blk_wb_done(q->rq_wb);
bio->bi_error = PTR_ERR(req);
bio_endio(bio);
goto out_unlock;
}

if (wb_acct)
req->cmd_flags |= REQ_BUF_INFLIGHT;

/*
* After dropping the lock and possibly sleeping here, our request
* may now be mergeable after it had proven unmergeable (above).
Expand Down Expand Up @@ -1963,7 +1976,8 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
Expand Down Expand Up @@ -2513,6 +2527,9 @@ void blk_start_request(struct request *req)
{
blk_dequeue_request(req);

req->issue_time = ktime_to_ns(ktime_get());
blk_wb_issue(req->q->rq_wb, req);

/*
* We are now handing the request to the hardware, initialize
* resid_len to full count and add the timeout handler.
Expand Down Expand Up @@ -2580,6 +2597,8 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)

trace_block_rq_complete(req->q, req, nr_bytes);

blk_stat_add(&req->q->rq_stats[rq_data_dir(req)], req);

if (!req->bio)
return false;

Expand Down Expand Up @@ -2746,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
blk_unprep_request(req);

blk_account_io_done(req);
blk_wb_done(req->q->rq_wb, req);

if (req->end_io)
req->end_io(req, error);
Expand Down
11 changes: 6 additions & 5 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,17 +95,18 @@ enum {
static bool blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq);

static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;

if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;

if (fflags & REQ_FLUSH) {
if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
(rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
Expand Down Expand Up @@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned int fflags = q->flush_flags; /* may change, cache */
unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);

Expand All @@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
if (!(fflags & REQ_FUA))
if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
rq->cmd_flags &= ~REQ_FUA;

/*
Expand Down
47 changes: 47 additions & 0 deletions block/blk-mq-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,47 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret;
}

static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
unsigned int i;

hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_init(&ctx->stat[0]);
blk_stat_init(&ctx->stat[1]);
}
}

static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t count)
{
blk_mq_stat_clear(hctx);
return count;
}

static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
{
return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
pre, (long long) stat->nr_samples,
(long long) stat->mean, (long long) stat->min,
(long long) stat->max);
}

static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
{
struct blk_rq_stat stat[2];
ssize_t ret;

blk_stat_init(&stat[0]);
blk_stat_init(&stat[1]);

blk_hctx_stat_get(hctx, stat);

ret = print_stat(page, &stat[0], "read :");
ret += print_stat(page + ret, &stat[1], "write:");
return ret;
}

static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
Expand Down Expand Up @@ -304,6 +345,11 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
.attr = {.name = "io_poll", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
.attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
.show = blk_mq_hw_sysfs_stat_show,
.store = blk_mq_hw_sysfs_stat_store,
};

static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
Expand All @@ -314,6 +360,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
&blk_mq_hw_sysfs_poll.attr,
&blk_mq_hw_sysfs_stat.attr,
NULL,
};

Expand Down
Loading