From 3c08b0931eedd04c530040499fadeccab50ed646 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 14 Oct 2021 13:20:22 -1000 Subject: [PATCH 001/205] blk-cgroup: blk_cgroup_bio_start() should use irq-safe operations on blkg->iostat_cpu c3df5fb57fe8 ("cgroup: rstat: fix A-A deadlock on 32bit around u64_stats_sync") made u64_stats updates irq-safe to avoid A-A deadlocks. Unfortunately, the conversion missed one in blk_cgroup_bio_start(). Fix it. Fixes: 2d146aa3aa84 ("mm: memcontrol: switch to rstat") Cc: stable@vger.kernel.org # v5.13+ Reported-by: syzbot+9738c8815b375ce482a1@syzkaller.appspotmail.com Signed-off-by: Tejun Heo Link: https://lore.kernel.org/r/YWi7NrQdVlxD6J9W@slm.duckdns.org Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 38b9f7684952a..9a1c5839dd469 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio) { int rwd = blk_cgroup_io_type(bio), cpu; struct blkg_iostat_set *bis; + unsigned long flags; cpu = get_cpu(); bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu); - u64_stats_update_begin(&bis->sync); + flags = u64_stats_update_begin_irqsave(&bis->sync); /* * If the bio is flagged with BIO_CGROUP_ACCT it means this is a split @@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio) } bis->cur.ios[rwd]++; - u64_stats_update_end(&bis->sync); + u64_stats_update_end_irqrestore(&bis->sync, flags); if (cgroup_subsys_on_dfl(io_cgrp_subsys)) cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu); put_cpu(); From 348332e000697b4ca82ef96719e02876434b8346 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:12 +0200 Subject: [PATCH 002/205] mm: don't include in blk-cgroup.h pulls in blkdev.h and thus pretty much all the block headers. Break this dependency chain by turning wbc_blkcg_css into a macro and dropping the blk-cgroup.h include. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-2-hch@lst.de Signed-off-by: Jens Axboe --- drivers/gpu/drm/i915/i915_utils.h | 1 + fs/btrfs/inode.c | 1 + fs/quota/quota.c | 1 + include/linux/writeback.h | 14 +++++--------- lib/random32.c | 1 + 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 5259edacde380..066a9118c3748 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -30,6 +30,7 @@ #include #include #include +#include struct drm_i915_private; struct timer_list; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 487533c35ddb6..4a9077c524448 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 2bcc9a6f1bfc0..052f143e2e0e1 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d1f65adf6a266..8eb165760752b 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -11,7 +11,6 @@ #include #include #include -#include struct bio; @@ -109,15 +108,12 @@ static inline int wbc_to_write_flags(struct writeback_control *wbc) return flags; } -static inline struct cgroup_subsys_state * -wbc_blkcg_css(struct writeback_control *wbc) -{ #ifdef CONFIG_CGROUP_WRITEBACK - if (wbc->wb) - return wbc->wb->blkcg_css; -#endif - return blkcg_root_css; -} +#define wbc_blkcg_css(wbc) \ + ((wbc)->wb ? (wbc)->wb->blkcg_css : blkcg_root_css) +#else +#define wbc_blkcg_css(wbc) (blkcg_root_css) +#endif /* CONFIG_CGROUP_WRITEBACK */ /* * A wb_domain represents a domain that wb's (bdi_writeback's) belong to diff --git a/lib/random32.c b/lib/random32.c index 4d0e05e471d72..a57a0e18819d0 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include From e41d12f539f7c8bac9b0897031fee6cc9158a6be Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:13 +0200 Subject: [PATCH 003/205] mm: don't include in There is no need to pull blk-cgroup.h and thus blkdev.h in here, so break the include chain. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-3-hch@lst.de Signed-off-by: Jens Axboe --- arch/powerpc/platforms/cell/spufs/inode.c | 1 + block/blk-iolatency.c | 1 + block/blk-mq.c | 1 + block/bounce.c | 1 + drivers/md/dm-ima.c | 1 + fs/btrfs/compression.c | 1 + fs/btrfs/ctree.c | 1 + fs/f2fs/compress.c | 1 + fs/orangefs/super.c | 1 + fs/ramfs/inode.c | 1 + include/linux/backing-dev.h | 3 ++- mm/backing-dev.c | 3 ++- mm/swapfile.c | 2 +- 13 files changed, 15 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index bed05b644c2c5..cb25acccd746e 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index c0545f9da549c..6593c7123b97e 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "blk-rq-qos.h" #include "blk-stat.h" #include "blk.h" diff --git a/block/blk-mq.c b/block/blk-mq.c index bc026372de439..0fefe00cebd4e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/block/bounce.c b/block/bounce.c index 05fc7148489d7..7af1a72835b99 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c index 2c5edfbd77117..957999998d703 100644 --- a/drivers/md/dm-ima.c +++ b/drivers/md/dm-ima.c @@ -12,6 +12,7 @@ #include "dm-ima.h" #include +#include #include #include #include diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 7869ad12bc6e1..ddc4f5436cc91 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84627cbd5b5b5..66290b214f2bc 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index c1bf9ad4c2207..20a083dc90423 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 2f2e430461b21..8bb0a53a658b0 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -11,6 +11,7 @@ #include #include +#include /* a cache for orangefs-inode objects (i.e. orangefs inode private data) */ static struct kmem_cache *orangefs_inode_cache; diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 65e7e56005b8f..e2302342a67f4 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "internal.h" struct ramfs_mount_opts { diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index ac7f231b88258..843bf4be675f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -15,10 +15,11 @@ #include #include #include -#include #include #include +struct blkcg; + static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) { kref_get(&bdi->refcnt); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 4a9d4e27d0d9b..5245744437dc9 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -2,8 +2,9 @@ #include #include -#include #include +#include +#include #include #include #include diff --git a/mm/swapfile.c b/mm/swapfile.c index 22d10f7138487..30db362749c06 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include From ccdf774189b6466457ca9c7de1fe9ed18547d249 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:14 +0200 Subject: [PATCH 004/205] mm: don't include in Move inode_to_bdi out of line to avoid having to include blkdev.h. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-4-hch@lst.de Signed-off-by: Jens Axboe --- drivers/mtd/mtdsuper.c | 1 + fs/ntfs/file.c | 1 + fs/ntfs3/file.c | 1 + fs/orangefs/inode.c | 2 +- include/linux/backing-dev.h | 16 +--------------- mm/backing-dev.c | 16 ++++++++++++++++ 6 files changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c index 38b6aa849c638..5ff001140ef4a 100644 --- a/drivers/mtd/mtdsuper.c +++ b/drivers/mtd/mtdsuper.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "mtdcore.h" diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index ab4f3362466d0..373dbb6276570 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -5,6 +5,7 @@ * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc. */ +#include #include #include #include diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 43b1451bff539..a3cd3c3f091ec 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index c1bb4c4b5d672..e5e3e500ed462 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -10,7 +10,7 @@ * Linux VFS inode operations. */ -#include +#include #include #include "protocol.h" #include "orangefs-kernel.h" diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 843bf4be675f3..4ac7ce0960139 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -134,20 +133,7 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb) return test_bit(WB_writeback_running, &wb->state); } -static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) -{ - struct super_block *sb; - - if (!inode) - return &noop_backing_dev_info; - - sb = inode->i_sb; -#ifdef CONFIG_BLOCK - if (sb_is_blkdev_sb(sb)) - return I_BDEV(inode)->bd_disk->bdi; -#endif - return sb->s_bdi; -} +struct backing_dev_info *inode_to_bdi(struct inode *inode); static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 5245744437dc9..c878d995af06e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -978,6 +978,22 @@ void bdi_put(struct backing_dev_info *bdi) } EXPORT_SYMBOL(bdi_put); +struct backing_dev_info *inode_to_bdi(struct inode *inode) +{ + struct super_block *sb; + + if (!inode) + return &noop_backing_dev_info; + + sb = inode->i_sb; +#ifdef CONFIG_BLOCK + if (sb_is_blkdev_sb(sb)) + return I_BDEV(inode)->bd_disk->bdi; +#endif + return sb->s_bdi; +} +EXPORT_SYMBOL(inode_to_bdi); + const char *bdi_dev_name(struct backing_dev_info *bdi) { if (!bdi || !bdi->dev) From 518d55051a8c368f1ba8ba1bed837a582f27a584 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:15 +0200 Subject: [PATCH 005/205] mm: remove spurious blkdev.h includes Various files have acquired spurious includes of over time. Remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-5-hch@lst.de Signed-off-by: Jens Axboe --- mm/filemap.c | 1 - mm/highmem.c | 1 - mm/mempool.c | 1 - mm/nommu.c | 1 - mm/readahead.c | 1 - mm/shmem.c | 1 - 6 files changed, 6 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index dae481293b5d9..44b4b551e430f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/highmem.c b/mm/highmem.c index 4212ad0e4a195..471d9779a7f47 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/mempool.c b/mm/mempool.c index 0b8afbec3e358..b933d0fc21b8b 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include "slab.h" diff --git a/mm/nommu.c b/mm/nommu.c index 02d2427b8f9e8..41ef204e74820 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/readahead.c b/mm/readahead.c index 41b75d76d36e1..e71e719e36c90 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/shmem.c b/mm/shmem.c index b5860f4a2738e..2ca4a341652c6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -59,7 +59,6 @@ static struct vfsmount *shm_mnt; #include #include #include -#include #include #include #include From dcbfa221b57b6ddf794c6dfc3fd7813ede368052 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:16 +0200 Subject: [PATCH 006/205] arch: remove spurious blkdev.h includes Various files have acquired spurious includes of over time. Remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-6-hch@lst.de Signed-off-by: Jens Axboe --- arch/mips/rb532/prom.c | 1 - arch/mips/sibyte/common/cfe.c | 1 - arch/mips/sibyte/swarm/setup.c | 1 - arch/openrisc/mm/init.c | 1 - 4 files changed, 4 deletions(-) diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c index 23ad8dd9aa5e0..b116937155474 100644 --- a/arch/mips/rb532/prom.c +++ b/arch/mips/rb532/prom.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c index a3323f8dcc1b8..1a504294d85f8 100644 --- a/arch/mips/sibyte/common/cfe.c +++ b/arch/mips/sibyte/common/cfe.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 538a2791b48ca..f07b15dd1c1a3 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index cfef61a7b6c2a..97305bde1b169 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -25,7 +25,6 @@ #include #include #include -#include /* for initrd_* */ #include #include From 545c6647d2d9f3bf8a086d5ff47fb85e5c5dca28 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:17 +0200 Subject: [PATCH 007/205] kernel: remove spurious blkdev.h includes Various files have acquired spurious includes of over time. Remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-7-hch@lst.de Signed-off-by: Jens Axboe --- kernel/acct.c | 1 - kernel/exit.c | 1 - kernel/fork.c | 1 - 3 files changed, 3 deletions(-) diff --git a/kernel/acct.c b/kernel/acct.c index 23a7ab8e6cbc8..3df53cf1dcd5e 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -60,7 +60,6 @@ #include #include -#include /* sector_div */ #include #include diff --git a/kernel/exit.c b/kernel/exit.c index 91a43e57a32eb..a53863dafb3d7 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -48,7 +48,6 @@ #include #include /* for audit_free() */ #include -#include #include #include #include diff --git a/kernel/fork.c b/kernel/fork.c index 38681ad44c76b..67679e3933f2f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -76,7 +76,6 @@ #include #include #include -#include #include #include #include From 6a5850d12977eb0a814c4977da389cebdc558863 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:18 +0200 Subject: [PATCH 008/205] sched: move the include out of kernel/sched/sched.h Only core.c needs blkdev.h, so move the #include statement there. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-8-hch@lst.de Signed-off-by: Jens Axboe --- kernel/sched/core.c | 2 +- kernel/sched/sched.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1bba4128a3e68..92ef7b68198c4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -13,7 +13,7 @@ #include "sched.h" #include - +#include #include #include diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3d3e5793e1172..66128dfc05fb3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -37,7 +37,6 @@ #include #include -#include #include #include #include From 1d9433cdd04adf7cfd5e3ef81f5acb29d80aae9b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:19 +0200 Subject: [PATCH 009/205] block: remove the unused rq_end_sector macro Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-9-hch@lst.de Signed-off-by: Jens Axboe --- drivers/md/dm-rq.c | 1 - include/linux/elevator.h | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index a896dea9750e4..579ab6183d4d8 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -7,7 +7,6 @@ #include "dm-core.h" #include "dm-rq.h" -#include /* for rq_end_sector() */ #include #define DM_MSG_PREFIX "core-rq" diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ef9ceead3db13..80633f3b600c2 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -162,7 +162,6 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define ELEVATOR_INSERT_FLUSH 5 #define ELEVATOR_INSERT_SORT_MERGE 6 -#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) From 90138237a56282f99d71130ab7c68903a2eba5a7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:20 +0200 Subject: [PATCH 010/205] block: remove the unused blk_queue_state enum Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-10-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 12b9dbcc980ee..9e367509bea10 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -262,11 +262,6 @@ enum blk_eh_timer_return { BLK_EH_RESET_TIMER, /* reset timer and try again */ }; -enum blk_queue_state { - Queue_down, - Queue_up, -}; - #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ From 713e4e11088875bf7aee3df81b167c8b2f6c5d65 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:21 +0200 Subject: [PATCH 011/205] block: remove the cmd_size field from struct request_queue Entirely unused. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-11-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9e367509bea10..6737e484280d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -544,8 +544,6 @@ struct request_queue { bool mq_sysfs_init_done; - size_t cmd_size; - #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; From 9778ac77c2027827ffdbb33d3e936b3a0ae9f0f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:22 +0200 Subject: [PATCH 012/205] block: remove the struct blk_queue_ctx forward declaration This type doesn't exist at all, so no need to forward declare it. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-12-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6737e484280d3..4bcbb1ae2d66a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -253,8 +253,6 @@ static inline unsigned short req_get_ioprio(struct request *req) #include -struct blk_queue_ctx; - struct bio_vec; enum blk_eh_timer_return { From 2e9bc3465ac54d282b855b073409c2c3a7d1ae00 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:23 +0200 Subject: [PATCH 013/205] block: move elevator.h to block/ Except for the features passed to blk_queue_required_elevator_features, elevator.h is only needed internally to the block layer. Move the ELEVATOR_F_* definitions to blkdev.h, and the move elevator.h to block/, dropping all the spurious includes outside of that. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-13-hch@lst.de Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 2 +- block/bfq-iosched.c | 2 +- block/blk-mq-sched.h | 1 + block/blk-mq-tag.h | 2 ++ block/blk.h | 2 ++ block/elevator.c | 2 +- {include/linux => block}/elevator.h | 20 +++----------------- block/kyber-iosched.c | 2 +- block/mq-deadline.c | 2 +- drivers/block/amiflop.c | 1 - drivers/scsi/lpfc/lpfc.h | 1 + include/linux/blkdev.h | 11 +++++++++-- init/main.c | 1 - 13 files changed, 23 insertions(+), 26 deletions(-) rename {include/linux => block}/elevator.h (93%) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 85b8e1c3a762d..882ec4bc51ad3 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -6,13 +6,13 @@ #include #include #include -#include #include #include #include #include #include +#include "elevator.h" #include "bfq-iosched.h" #ifdef CONFIG_BFQ_CGROUP_DEBUG diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 480e1a1348596..b5ef23f63d518 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -117,7 +117,6 @@ #include #include #include -#include #include #include #include @@ -127,6 +126,7 @@ #include +#include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-tag.h" diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 5246ae0407047..5181487db7923 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -2,6 +2,7 @@ #ifndef BLK_MQ_SCHED_H #define BLK_MQ_SCHED_H +#include "elevator.h" #include "blk-mq.h" #include "blk-mq-tag.h" diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 8ed55af084273..f0a0ee758a556 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -2,6 +2,8 @@ #ifndef INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H +struct blk_mq_alloc_data; + /* * Tag address space map. */ diff --git a/block/blk.h b/block/blk.h index 6c3c00a8fe19d..f245699809050 100644 --- a/block/blk.h +++ b/block/blk.h @@ -12,6 +12,8 @@ #include "blk-mq.h" #include "blk-mq-sched.h" +struct elevator_type; + /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) diff --git a/block/elevator.c b/block/elevator.c index ff45d8388f487..57be09cd7f6dd 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -40,6 +39,7 @@ #include +#include "elevator.h" #include "blk.h" #include "blk-mq-sched.h" #include "blk-pm.h" diff --git a/include/linux/elevator.h b/block/elevator.h similarity index 93% rename from include/linux/elevator.h rename to block/elevator.h index 80633f3b600c2..16cd8bdedb7ea 100644 --- a/include/linux/elevator.h +++ b/block/elevator.h @@ -1,17 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_ELEVATOR_H -#define _LINUX_ELEVATOR_H +#ifndef _ELEVATOR_H +#define _ELEVATOR_H #include #include -#ifdef CONFIG_BLOCK - struct io_cq; struct elevator_type; -#ifdef CONFIG_BLK_DEBUG_FS struct blk_mq_debugfs_attr; -#endif /* * Return values from elevator merger @@ -167,14 +163,4 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); #define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) -/* - * Elevator features. - */ - -/* Supports zoned block devices sequential write constraint */ -#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) -/* Supports scheduling on multiple hardware queues */ -#define ELEVATOR_F_MQ_AWARE (1U << 1) - -#endif /* CONFIG_BLOCK */ -#endif +#endif /* _ELEVATOR_H */ diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index a0ffbabfac2c6..53dafc4bcd72f 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -9,12 +9,12 @@ #include #include #include -#include #include #include #include +#include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 7f3c3932b723e..47f042fa6a688 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -20,6 +19,7 @@ #include +#include "elevator.h" #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 8b1714021498c..b892e5185d6fa 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -64,7 +64,6 @@ #include #include #include -#include #include #include diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index befeb7c342903..337e6ed248218 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -22,6 +22,7 @@ *******************************************************************/ #include +#include #include #include diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4bcbb1ae2d66a..d815238f61ed9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -251,8 +251,6 @@ static inline unsigned short req_get_ioprio(struct request *req) return req->ioprio; } -#include - struct bio_vec; enum blk_eh_timer_return { @@ -1124,6 +1122,15 @@ extern void blk_queue_dma_alignment(struct request_queue *, int); extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); + +/* + * Elevator features for blk_queue_required_elevator_features: + */ +/* Supports zoned block devices sequential write constraint */ +#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) +/* Supports scheduling on multiple hardware queues */ +#define ELEVATOR_F_MQ_AWARE (1U << 1) + extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, diff --git a/init/main.c b/init/main.c index 3c4054a955458..4162d7f3179fb 100644 --- a/init/main.c +++ b/init/main.c @@ -83,7 +83,6 @@ #include #include #include -#include #include #include #include From 3ab0bc78e96bd03a5096e4801550926d81b3e19d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:24 +0200 Subject: [PATCH 014/205] block: drop unused includes in Drop various include not actually used in blkdev.h itself. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-14-hch@lst.de Signed-off-by: Jens Axboe --- drivers/md/dm-ps-historical-service-time.c | 1 + drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 1 + include/linux/blkdev.h | 7 ------- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c index 1856a1b125cc1..875bca30a0dd5 100644 --- a/drivers/md/dm-ps-historical-service-time.c +++ b/drivers/md/dm-ps-historical-service-time.c @@ -27,6 +27,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "multipath historical-service-time" diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 3ab669dc806f6..27884f3106ab8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -3,6 +3,7 @@ * Copyright (c) 2017 Hisilicon Limited. */ +#include #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d815238f61ed9..46a703394f7f4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -3,8 +3,6 @@ #define _LINUX_BLKDEV_H #include -#include -#include #include #include #include @@ -12,17 +10,12 @@ #include #include #include -#include -#include #include -#include #include -#include #include #include #include #include -#include #include struct module; From b81e0c2372e65e5627864ba034433b64b2fc73f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:25 +0200 Subject: [PATCH 015/205] block: drop unused includes in Drop various include not actually used in genhd.h itself, and move the remaning includes closer together. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-15-hch@lst.de Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 1 + block/genhd.c | 1 + block/holder.c | 1 + block/partitions/core.c | 1 + drivers/block/amiflop.c | 1 + drivers/block/ataflop.c | 1 + drivers/block/floppy.c | 1 + drivers/block/swim.c | 1 + drivers/block/xen-blkfront.c | 1 + drivers/md/md.c | 1 + drivers/s390/block/dasd_genhd.c | 1 + drivers/scsi/sd.c | 1 + drivers/scsi/sg.c | 1 + drivers/scsi/sr.c | 1 + drivers/scsi/st.c | 1 + include/linux/genhd.h | 14 ++------------ include/linux/part_stat.h | 1 + 17 files changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index cd9dc0556e913..fefd343412c79 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/block/genhd.c b/block/genhd.c index b49858550fa6e..ffbdb9b245556 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include diff --git a/block/holder.c b/block/holder.c index 9dc084182337f..27cddce1b4461 100644 --- a/block/holder.c +++ b/block/holder.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include struct bd_holder_disk { struct list_head list; diff --git a/block/partitions/core.c b/block/partitions/core.c index 58c4c362c94f9..3a4898433c434 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -5,6 +5,7 @@ * Copyright (C) 2020 Christoph Hellwig */ #include +#include #include #include #include diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index b892e5185d6fa..2909fd9e72fb8 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index a093644ac39fb..58e921ab57298 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index fef79ea52e3ed..6288ce8884147 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -184,6 +184,7 @@ static int print_unex = 1; #include #include #include +#include #include #include #include diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 7ccc8d2a41bc6..3911d0833e1b9 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 72902104f1112..df0deb9277601 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/md.c b/drivers/md/md.c index 6c0c3d0d905aa..1b9bc8dbd95d7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index fa966e0db6ca9..3a6f3af240fa7 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c @@ -14,6 +14,7 @@ #define KMSG_COMPONENT "dasd" #include +#include #include #include diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 523bf2fdc2532..d8f6add416c0a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 8f05248920e8e..3c98f08dc25d9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -31,6 +31,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 8b17b35283aa5..115f7ef7a5def 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ae8636d3780b6..9933722acfd96 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -32,6 +32,7 @@ static const char *verstr = "20160209"; #include #include #include +#include #include #include #include diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 0f5315c2b5a34..0b48a0cf42624 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -12,12 +12,10 @@ #include #include -#include -#include -#include #include #include -#include +#include +#include extern const struct device_type disk_type; extern struct device_type part_type; @@ -26,14 +24,6 @@ extern struct class block_class; #define DISK_MAX_PARTS 256 #define DISK_NAME_LEN 32 -#include -#include -#include -#include -#include -#include -#include - #define PARTITION_META_INFO_VOLNAMELTH 64 /* * Enough for the string representation of any kind of UUID plus NULL. diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index d2558121d48c0..6f7949b2fd8dc 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -3,6 +3,7 @@ #define _LINUX_PART_STAT_H #include +#include struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; From badf7f64378796d460c79eb0f182fa7282eb65d5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:26 +0200 Subject: [PATCH 016/205] block: move a few merge helpers out of These are block-layer internal helpers, so move them to block/blk.h and block/blk-merge.c. Also update a comment a bit to use better grammar. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-16-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-merge.c | 24 ++++++++++++++++ block/blk.h | 38 +++++++++++++++++++++++++ include/linux/blkdev.h | 64 ------------------------------------------ 3 files changed, 62 insertions(+), 64 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 7a5c81c02c800..39f210da399a6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -558,6 +558,23 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq) return queue_max_segments(rq->q); } +static inline unsigned int blk_rq_get_max_sectors(struct request *rq, + sector_t offset) +{ + struct request_queue *q = rq->q; + + if (blk_rq_is_passthrough(rq)) + return q->limits.max_hw_sectors; + + if (!q->limits.chunk_sectors || + req_op(rq) == REQ_OP_DISCARD || + req_op(rq) == REQ_OP_SECURE_ERASE) + return blk_queue_get_max_sectors(q, req_op(rq)); + + return min(blk_max_size_offset(q, offset, 0), + blk_queue_get_max_sectors(q, req_op(rq))); +} + static inline int ll_new_hw_segment(struct request *req, struct bio *bio, unsigned int nr_phys_segs) { @@ -718,6 +735,13 @@ static enum elv_merge blk_try_req_merge(struct request *req, return ELEVATOR_NO_MERGE; } +static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) +{ + if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b)) + return true; + return false; +} + /* * For non-mq, this has to be called with the request spinlock acquired. * For mq with scheduling, the appropriate queue wide lock should be held. diff --git a/block/blk.h b/block/blk.h index f245699809050..fa90691cfdab1 100644 --- a/block/blk.h +++ b/block/blk.h @@ -96,6 +96,44 @@ static inline bool bvec_gap_to_prev(struct request_queue *q, return __bvec_gap_to_prev(q, bprv, offset); } +static inline bool rq_mergeable(struct request *rq) +{ + if (blk_rq_is_passthrough(rq)) + return false; + + if (req_op(rq) == REQ_OP_FLUSH) + return false; + + if (req_op(rq) == REQ_OP_WRITE_ZEROES) + return false; + + if (req_op(rq) == REQ_OP_ZONE_APPEND) + return false; + + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) + return false; + if (rq->rq_flags & RQF_NOMERGE_FLAGS) + return false; + + return true; +} + +/* + * There are two different ways to handle DISCARD merges: + * 1) If max_discard_segments > 1, the driver treats every bio as a range and + * send the bios to controller together. The ranges don't need to be + * contiguous. + * 2) Otherwise, the request will be normal read/write requests. The ranges + * need to be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + #ifdef CONFIG_BLK_DEV_INTEGRITY void blk_flush_integrity(void); bool __bio_integrity_endio(struct bio *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 46a703394f7f4..be534040ca9c3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -745,37 +745,6 @@ static inline bool rq_is_sync(struct request *rq) return op_is_sync(rq->cmd_flags); } -static inline bool rq_mergeable(struct request *rq) -{ - if (blk_rq_is_passthrough(rq)) - return false; - - if (req_op(rq) == REQ_OP_FLUSH) - return false; - - if (req_op(rq) == REQ_OP_WRITE_ZEROES) - return false; - - if (req_op(rq) == REQ_OP_ZONE_APPEND) - return false; - - if (rq->cmd_flags & REQ_NOMERGE_FLAGS) - return false; - if (rq->rq_flags & RQF_NOMERGE_FLAGS) - return false; - - return true; -} - -static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) -{ - if (bio_page(a) == bio_page(b) && - bio_offset(a) == bio_offset(b)) - return true; - - return false; -} - static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) @@ -1030,23 +999,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, return min(q->limits.max_sectors, chunk_sectors); } -static inline unsigned int blk_rq_get_max_sectors(struct request *rq, - sector_t offset) -{ - struct request_queue *q = rq->q; - - if (blk_rq_is_passthrough(rq)) - return q->limits.max_hw_sectors; - - if (!q->limits.chunk_sectors || - req_op(rq) == REQ_OP_DISCARD || - req_op(rq) == REQ_OP_SECURE_ERASE) - return blk_queue_get_max_sectors(q, req_op(rq)); - - return min(blk_max_size_offset(q, offset, 0), - blk_queue_get_max_sectors(q, req_op(rq))); -} - static inline unsigned int blk_rq_count_bios(struct request *rq) { unsigned int nr_bios = 0; @@ -1490,22 +1442,6 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return offset << SECTOR_SHIFT; } -/* - * Two cases of handling DISCARD merge: - * If max_discard_segments > 1, the driver takes every bio - * as a range and send them to controller together. The ranges - * needn't to be contiguous. - * Otherwise, the bios/requests will be handled as same as - * others which should be contiguous. - */ -static inline bool blk_discard_mergable(struct request *req) -{ - if (req_op(req) == REQ_OP_DISCARD && - queue_max_discard_segments(req->q) > 1) - return true; - return false; -} - static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); From fe45e630a1035aea94c29016f2598bbde149bbe3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:27 +0200 Subject: [PATCH 017/205] block: move integrity handling out of Split the integrity/metadata handling definitions out into a new header. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-17-hch@lst.de Signed-off-by: Jens Axboe --- block/bdev.c | 1 + block/bio-integrity.c | 2 +- block/blk-core.c | 1 + block/blk-integrity.c | 2 +- block/blk-merge.c | 1 + block/blk-mq.c | 1 + block/keyslot-manager.c | 1 + block/t10-pi.c | 2 +- drivers/md/dm-bio-record.h | 1 + drivers/md/dm-crypt.c | 1 + drivers/md/dm-table.c | 1 + drivers/md/md.c | 1 + drivers/nvdimm/core.c | 1 + drivers/nvme/host/core.c | 1 + drivers/nvme/host/pci.c | 1 + drivers/nvme/host/rdma.c | 1 + drivers/nvme/target/io-cmd-bdev.c | 1 + drivers/nvme/target/rdma.c | 1 + drivers/scsi/scsi_lib.c | 1 + drivers/scsi/sd_dif.c | 2 +- drivers/scsi/virtio_scsi.c | 1 + drivers/target/target_core_iblock.c | 1 + include/linux/blk-integrity.h | 183 ++++++++++++++++++++++++++++ include/linux/blkdev.h | 183 ---------------------------- 24 files changed, 205 insertions(+), 187 deletions(-) create mode 100644 include/linux/blk-integrity.h diff --git a/block/bdev.c b/block/bdev.c index 485a258b0ab37..93b1188d7e58d 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 6b47cddbbca17..21234ff966d9b 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -6,7 +6,7 @@ * Written by: Martin K. Petersen */ -#include +#include #include #include #include diff --git a/block/blk-core.c b/block/blk-core.c index 4d8f5fe915887..d2c6caadef891 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 16d5d5338392a..cef534a7cbc98 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -6,7 +6,7 @@ * Written by: Martin K. Petersen */ -#include +#include #include #include #include diff --git a/block/blk-merge.c b/block/blk-merge.c index 39f210da399a6..5b4f23014df8a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/block/blk-mq.c b/block/blk-mq.c index 0fefe00cebd4e..2539ba976949c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c index 2c4a55bea6ca1..1792159d12d18 100644 --- a/block/keyslot-manager.c +++ b/block/keyslot-manager.c @@ -35,6 +35,7 @@ #include #include #include +#include struct blk_ksm_keyslot { atomic_t slot_refs; diff --git a/block/t10-pi.c b/block/t10-pi.c index 00c203b2a921e..25a52a2a09a88 100644 --- a/block/t10-pi.c +++ b/block/t10-pi.c @@ -5,7 +5,7 @@ */ #include -#include +#include #include #include #include diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index a3b71350eec84..745e3ab4aa0af 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -8,6 +8,7 @@ #define DM_BIO_RECORD_H #include +#include /* * There are lots of mutable fields in the bio struct that get diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 916b7da16de25..292f7896f7333 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2111daaacabaf..1fa4d5582dca5 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/md.c b/drivers/md/md.c index 1b9bc8dbd95d7..ec09083ff0eff 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 7de592d7eff45..6a45fa91e8a3e 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f8dd664b2eda5..3d444b13cd697 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 149ecf73df384..896328271471b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 042c594bc57e2..40317e1b91835 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 0fc2781ab9708..6139e1de50a66 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include "nvmet.h" diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 891174ccd44bb..38d1f292ecc20 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -5,6 +5,7 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#include #include #include #include diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 572673873ddf8..33fd9a01330ce 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 4cadb26070a8f..349950616adc5 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -6,7 +6,7 @@ * Written by: Martin K. Petersen */ -#include +#include #include #include diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 07d0250f17c3a..b8455fcbf18b6 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 4069a1edcfa34..d39b87e2ed100 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h new file mode 100644 index 0000000000000..8a038ea0717e4 --- /dev/null +++ b/include/linux/blk-integrity.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BLK_INTEGRITY_H +#define _LINUX_BLK_INTEGRITY_H + +#include + +struct request; + +enum blk_integrity_flags { + BLK_INTEGRITY_VERIFY = 1 << 0, + BLK_INTEGRITY_GENERATE = 1 << 1, + BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, + BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, +}; + +struct blk_integrity_iter { + void *prot_buf; + void *data_buf; + sector_t seed; + unsigned int data_size; + unsigned short interval; + const char *disk_name; +}; + +typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); +typedef void (integrity_prepare_fn) (struct request *); +typedef void (integrity_complete_fn) (struct request *, unsigned int); + +struct blk_integrity_profile { + integrity_processing_fn *generate_fn; + integrity_processing_fn *verify_fn; + integrity_prepare_fn *prepare_fn; + integrity_complete_fn *complete_fn; + const char *name; +}; + +#ifdef CONFIG_BLK_DEV_INTEGRITY +void blk_integrity_register(struct gendisk *, struct blk_integrity *); +void blk_integrity_unregister(struct gendisk *); +int blk_integrity_compare(struct gendisk *, struct gendisk *); +int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, + struct scatterlist *); +int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); + +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + struct blk_integrity *bi = &disk->queue->integrity; + + if (!bi->profile) + return NULL; + + return bi; +} + +static inline struct blk_integrity * +bdev_get_integrity(struct block_device *bdev) +{ + return blk_get_integrity(bdev->bd_disk); +} + +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return q->integrity.profile; +} + +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ + q->limits.max_integrity_segments = segs; +} + +static inline unsigned short +queue_max_integrity_segments(const struct request_queue *q) +{ + return q->limits.max_integrity_segments; +} + +/** + * bio_integrity_intervals - Return number of integrity intervals for a bio + * @bi: blk_integrity profile for device + * @sectors: Size of the bio in 512-byte sectors + * + * Description: The block layer calculates everything in 512 byte + * sectors but integrity metadata is done in terms of the data integrity + * interval size of the storage device. Convert the block layer sectors + * to the appropriate number of integrity intervals. + */ +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return sectors >> (bi->interval_exp - 9); +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return bio_integrity_intervals(bi, sectors) * bi->tuple_size; +} + +static inline bool blk_integrity_rq(struct request *rq) +{ + return rq->cmd_flags & REQ_INTEGRITY; +} + +/* + * Return the first bvec that contains integrity data. Only drivers that are + * limited to a single integrity segment should use this helper. + */ +static inline struct bio_vec *rq_integrity_vec(struct request *rq) +{ + if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) + return NULL; + return rq->bio->bi_integrity->bip_vec; +} +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return NULL; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return false; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline void blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short +queue_max_integrity_segments(const struct request_queue *q) +{ + return 0; +} + +static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} + +static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, + unsigned int sectors) +{ + return 0; +} +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} + +static inline struct bio_vec *rq_integrity_vec(struct request *rq) +{ + return NULL; +} +#endif /* CONFIG_BLK_DEV_INTEGRITY */ +#endif /* _LINUX_BLK_INTEGRITY_H */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index be534040ca9c3..56e60e5c09d07 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1555,189 +1555,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ MODULE_ALIAS("block-major-" __stringify(major) "-*") -#if defined(CONFIG_BLK_DEV_INTEGRITY) - -enum blk_integrity_flags { - BLK_INTEGRITY_VERIFY = 1 << 0, - BLK_INTEGRITY_GENERATE = 1 << 1, - BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2, - BLK_INTEGRITY_IP_CHECKSUM = 1 << 3, -}; - -struct blk_integrity_iter { - void *prot_buf; - void *data_buf; - sector_t seed; - unsigned int data_size; - unsigned short interval; - const char *disk_name; -}; - -typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *); -typedef void (integrity_prepare_fn) (struct request *); -typedef void (integrity_complete_fn) (struct request *, unsigned int); - -struct blk_integrity_profile { - integrity_processing_fn *generate_fn; - integrity_processing_fn *verify_fn; - integrity_prepare_fn *prepare_fn; - integrity_complete_fn *complete_fn; - const char *name; -}; - -extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); -extern void blk_integrity_unregister(struct gendisk *); -extern int blk_integrity_compare(struct gendisk *, struct gendisk *); -extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, - struct scatterlist *); -extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); - -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) -{ - struct blk_integrity *bi = &disk->queue->integrity; - - if (!bi->profile) - return NULL; - - return bi; -} - -static inline -struct blk_integrity *bdev_get_integrity(struct block_device *bdev) -{ - return blk_get_integrity(bdev->bd_disk); -} - -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return q->integrity.profile; -} - -static inline bool blk_integrity_rq(struct request *rq) -{ - return rq->cmd_flags & REQ_INTEGRITY; -} - -static inline void blk_queue_max_integrity_segments(struct request_queue *q, - unsigned int segs) -{ - q->limits.max_integrity_segments = segs; -} - -static inline unsigned short -queue_max_integrity_segments(const struct request_queue *q) -{ - return q->limits.max_integrity_segments; -} - -/** - * bio_integrity_intervals - Return number of integrity intervals for a bio - * @bi: blk_integrity profile for device - * @sectors: Size of the bio in 512-byte sectors - * - * Description: The block layer calculates everything in 512 byte - * sectors but integrity metadata is done in terms of the data integrity - * interval size of the storage device. Convert the block layer sectors - * to the appropriate number of integrity intervals. - */ -static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, - unsigned int sectors) -{ - return sectors >> (bi->interval_exp - 9); -} - -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) -{ - return bio_integrity_intervals(bi, sectors) * bi->tuple_size; -} - -/* - * Return the first bvec that contains integrity data. Only drivers that are - * limited to a single integrity segment should use this helper. - */ -static inline struct bio_vec *rq_integrity_vec(struct request *rq) -{ - if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) - return NULL; - return rq->bio->bi_integrity->bip_vec; -} - -#else /* CONFIG_BLK_DEV_INTEGRITY */ - -struct bio; -struct block_device; -struct gendisk; -struct blk_integrity; - -static inline int blk_integrity_rq(struct request *rq) -{ - return 0; -} -static inline int blk_rq_count_integrity_sg(struct request_queue *q, - struct bio *b) -{ - return 0; -} -static inline int blk_rq_map_integrity_sg(struct request_queue *q, - struct bio *b, - struct scatterlist *s) -{ - return 0; -} -static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) -{ - return NULL; -} -static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) -{ - return NULL; -} -static inline bool -blk_integrity_queue_supports_integrity(struct request_queue *q) -{ - return false; -} -static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) -{ - return 0; -} -static inline void blk_integrity_register(struct gendisk *d, - struct blk_integrity *b) -{ -} -static inline void blk_integrity_unregister(struct gendisk *d) -{ -} -static inline void blk_queue_max_integrity_segments(struct request_queue *q, - unsigned int segs) -{ -} -static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) -{ - return 0; -} - -static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi, - unsigned int sectors) -{ - return 0; -} - -static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, - unsigned int sectors) -{ - return 0; -} - -static inline struct bio_vec *rq_integrity_vec(struct request *rq) -{ - return NULL; -} - -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - #ifdef CONFIG_BLK_INLINE_ENCRYPTION bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); From 24b83deb29b7f06a5573b65f2ce96f5482755d43 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Sep 2021 14:33:28 +0200 Subject: [PATCH 018/205] block: move struct request to blk-mq.h struct request is only used by blk-mq drivers, so move it and all related declarations to blk-mq.h. Signed-off-by: Christoph Hellwig Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20210920123328.1399408-18-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-crypto-fallback.c | 1 + block/blk-crypto-internal.h | 2 +- drivers/block/rnbd/rnbd-proto.h | 2 +- drivers/md/dm-verity-target.c | 1 + drivers/mmc/core/sd.c | 1 + drivers/target/target_core_file.c | 1 + drivers/target/target_core_iblock.c | 1 + include/linux/blk-mq.h | 465 +++++++++++++++++++++++++++ include/linux/blk_types.h | 2 - include/linux/blkdev.h | 469 +--------------------------- include/linux/blktrace_api.h | 2 +- include/linux/t10-pi.h | 2 +- include/scsi/scsi_device.h | 2 +- 13 files changed, 476 insertions(+), 475 deletions(-) diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c index c322176a1e099..ec4c7823541c8 100644 --- a/block/blk-crypto-fallback.c +++ b/block/blk-crypto-fallback.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "blk-crypto-internal.h" diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h index 0d36aae538d7b..2fb0d65a464ca 100644 --- a/block/blk-crypto-internal.h +++ b/block/blk-crypto-internal.h @@ -7,7 +7,7 @@ #define __LINUX_BLK_CRYPTO_INTERNAL_H #include -#include +#include /* Represents a crypto mode supported by blk-crypto */ struct blk_crypto_mode { diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h index c1bc5c0fef71d..de5d5a8df81d7 100644 --- a/drivers/block/rnbd/rnbd-proto.h +++ b/drivers/block/rnbd/rnbd-proto.h @@ -10,7 +10,7 @@ #define RNBD_PROTO_H #include -#include +#include #include #include #include diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 88288c8d6bc8c..aae48a8b1a048 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -18,6 +18,7 @@ #include "dm-verity-verify-sig.h" #include #include +#include #define DM_MSG_PREFIX "verity" diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 4646b7a03db6b..c9db24e16af13 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index ef4a8e189fba0..02f64453b4c5f 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d39b87e2ed100..31df20abe141f 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 13ba1861e688f..bd4086a6f28e0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -6,10 +6,218 @@ #include #include #include +#include struct blk_mq_tags; struct blk_flush_queue; +#define BLKDEV_MIN_RQ 4 +#define BLKDEV_MAX_RQ 128 /* Default maximum */ + +typedef void (rq_end_io_fn)(struct request *, blk_status_t); + +/* + * request flags */ +typedef __u32 __bitwise req_flags_t; + +/* drive already may have started this one */ +#define RQF_STARTED ((__force req_flags_t)(1 << 1)) +/* may not be passed by ioscheduler */ +#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) +/* request for flush sequence */ +#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) +/* merge of different types, fail separately */ +#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) +/* track inflight for MQ */ +#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) +/* don't call prep for this one */ +#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) +/* vaguely specified driver internal error. Ignored by the block layer */ +#define RQF_FAILED ((__force req_flags_t)(1 << 10)) +/* don't warn about errors */ +#define RQF_QUIET ((__force req_flags_t)(1 << 11)) +/* elevator private data attached */ +#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) +/* account into disk and partition IO statistics */ +#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +/* runtime pm request */ +#define RQF_PM ((__force req_flags_t)(1 << 15)) +/* on IO scheduler merge hash */ +#define RQF_HASHED ((__force req_flags_t)(1 << 16)) +/* track IO completion time */ +#define RQF_STATS ((__force req_flags_t)(1 << 17)) +/* Look at ->special_vec for the actual data payload instead of the + bio chain. */ +#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) +/* The per-zone write lock is held for this request */ +#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* already slept for hybrid poll */ +#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) +/* ->timeout has been called, don't expire again */ +#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) + +/* flags that prevent us from merging requests: */ +#define RQF_NOMERGE_FLAGS \ + (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) + +enum mq_rq_state { + MQ_RQ_IDLE = 0, + MQ_RQ_IN_FLIGHT = 1, + MQ_RQ_COMPLETE = 2, +}; + +/* + * Try to put the fields that are referenced together in the same cacheline. + * + * If you modify this structure, make sure to update blk_rq_init() and + * especially blk_mq_rq_ctx_init() to take care of the added fields. + */ +struct request { + struct request_queue *q; + struct blk_mq_ctx *mq_ctx; + struct blk_mq_hw_ctx *mq_hctx; + + unsigned int cmd_flags; /* op and common flags */ + req_flags_t rq_flags; + + int tag; + int internal_tag; + + /* the following two fields are internal, NEVER access directly */ + unsigned int __data_len; /* total data len */ + sector_t __sector; /* sector cursor */ + + struct bio *bio; + struct bio *biotail; + + struct list_head queuelist; + + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct llist_node ipi_list; + }; + + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ + }; + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + + struct gendisk *rq_disk; + struct block_device *part; +#ifdef CONFIG_BLK_RQ_ALLOC_TIME + /* Time that the first bio started allocating this request. */ + u64 alloc_time_ns; +#endif + /* Time that this request was allocated for this IO. */ + u64 start_time_ns; + /* Time that I/O was submitted to the device. */ + u64 io_start_time_ns; + +#ifdef CONFIG_BLK_WBT + unsigned short wbt_flags; +#endif + /* + * rq sectors used for blk stats. It has the same value + * with blk_rq_sectors(rq), except that it never be zeroed + * by completion. + */ + unsigned short stats_sectors; + + /* + * Number of scatter-gather DMA addr+len pairs after + * physical address coalescing is performed. + */ + unsigned short nr_phys_segments; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + unsigned short nr_integrity_segments; +#endif + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; +#endif + + unsigned short write_hint; + unsigned short ioprio; + + enum mq_rq_state state; + refcount_t ref; + + unsigned int timeout; + unsigned long deadline; + + union { + struct __call_single_data csd; + u64 fifo_time; + }; + + /* + * completion callback. + */ + rq_end_io_fn *end_io; + void *end_io_data; +}; + +#define req_op(req) \ + ((req)->cmd_flags & REQ_OP_MASK) + +static inline bool blk_rq_is_passthrough(struct request *rq) +{ + return blk_op_is_passthrough(req_op(rq)); +} + +static inline unsigned short req_get_ioprio(struct request *req) +{ + return req->ioprio; +} + +#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) + +#define rq_dma_dir(rq) \ + (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) + +enum blk_eh_timer_return { + BLK_EH_DONE, /* drivers has completed the command */ + BLK_EH_RESET_TIMER, /* reset timer and try again */ +}; + +#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ +#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ + /** * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware * block device @@ -637,4 +845,261 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio); void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, struct lock_class_key *key); +static inline bool rq_is_sync(struct request *rq) +{ + return op_is_sync(rq->cmd_flags); +} + +void blk_rq_init(struct request_queue *q, struct request *rq); +void blk_put_request(struct request *rq); +struct request *blk_get_request(struct request_queue *q, unsigned int op, + blk_mq_req_flags_t flags); +int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), void *data); +void blk_rq_unprep_clone(struct request *rq); +blk_status_t blk_insert_cloned_request(struct request_queue *q, + struct request *rq); + +struct rq_map_data { + struct page **pages; + int page_order; + int nr_entries; + unsigned long offset; + int null_mapped; + int from_user; +}; + +int blk_rq_map_user(struct request_queue *, struct request *, + struct rq_map_data *, void __user *, unsigned long, gfp_t); +int blk_rq_map_user_iov(struct request_queue *, struct request *, + struct rq_map_data *, const struct iov_iter *, gfp_t); +int blk_rq_unmap_user(struct bio *); +int blk_rq_map_kern(struct request_queue *, struct request *, void *, + unsigned int, gfp_t); +int blk_rq_append_bio(struct request *rq, struct bio *bio); +void blk_execute_rq_nowait(struct gendisk *, struct request *, int, + rq_end_io_fn *); +blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, + int at_head); + +struct req_iterator { + struct bvec_iter iter; + struct bio *bio; +}; + +#define __rq_for_each_bio(_bio, rq) \ + if ((rq->bio)) \ + for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) + +#define rq_for_each_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_segment(bvl, _iter.bio, _iter.iter) + +#define rq_for_each_bvec(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bio_for_each_bvec(bvl, _iter.bio, _iter.iter) + +#define rq_iter_last(bvec, _iter) \ + (_iter.bio->bi_next == NULL && \ + bio_iter_last(bvec, _iter.iter)) + +/* + * blk_rq_pos() : the current sector + * blk_rq_bytes() : bytes left in the entire request + * blk_rq_cur_bytes() : bytes left in the current segment + * blk_rq_err_bytes() : bytes left till the next error boundary + * blk_rq_sectors() : sectors left in the entire request + * blk_rq_cur_sectors() : sectors left in the current segment + * blk_rq_stats_sectors() : sectors of the entire request used for stats + */ +static inline sector_t blk_rq_pos(const struct request *rq) +{ + return rq->__sector; +} + +static inline unsigned int blk_rq_bytes(const struct request *rq) +{ + return rq->__data_len; +} + +static inline int blk_rq_cur_bytes(const struct request *rq) +{ + return rq->bio ? bio_cur_bytes(rq->bio) : 0; +} + +unsigned int blk_rq_err_bytes(const struct request *rq); + +static inline unsigned int blk_rq_sectors(const struct request *rq) +{ + return blk_rq_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_cur_sectors(const struct request *rq) +{ + return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; +} + +static inline unsigned int blk_rq_stats_sectors(const struct request *rq) +{ + return rq->stats_sectors; +} + +/* + * Some commands like WRITE SAME have a payload or data transfer size which + * is different from the size of the request. Any driver that supports such + * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to + * calculate the data transfer size. + */ +static inline unsigned int blk_rq_payload_bytes(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec.bv_len; + return blk_rq_bytes(rq); +} + +/* + * Return the first full biovec in the request. The caller needs to check that + * there are any bvecs before calling this helper. + */ +static inline struct bio_vec req_bvec(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return rq->special_vec; + return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); +} + +static inline unsigned int blk_rq_count_bios(struct request *rq) +{ + unsigned int nr_bios = 0; + struct bio *bio; + + __rq_for_each_bio(bio, rq) + nr_bios++; + + return nr_bios; +} + +void blk_steal_bios(struct bio_list *list, struct request *rq); + +/* + * Request completion related functions. + * + * blk_update_request() completes given number of bytes and updates + * the request without completing it. + */ +bool blk_update_request(struct request *rq, blk_status_t error, + unsigned int nr_bytes); +void blk_abort_request(struct request *); + +/* + * Number of physical segments as sent to the device. + * + * Normally this is the number of discontiguous data segments sent by the + * submitter. But for data-less command like discard we might have no + * actual data segments submitted, but the driver might have to add it's + * own special payload. In that case we still return 1 here so that this + * special payload will be mapped. + */ +static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) +{ + if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) + return 1; + return rq->nr_phys_segments; +} + +/* + * Number of discard segments (or ranges) the driver needs to fill in. + * Each discard bio merged into a request is counted as one segment. + */ +static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) +{ + return max_t(unsigned short, rq->nr_phys_segments, 1); +} + +int __blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist, struct scatterlist **last_sg); +static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *last_sg = NULL; + + return __blk_rq_map_sg(q, rq, sglist, &last_sg); +} +void blk_dump_rq_flags(struct request *, char *); + +#ifdef CONFIG_BLK_DEV_ZONED +static inline unsigned int blk_rq_zone_no(struct request *rq) +{ + return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); +} + +static inline unsigned int blk_rq_zone_is_seq(struct request *rq) +{ + return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); +} + +bool blk_req_needs_zone_write_lock(struct request *rq); +bool blk_req_zone_write_trylock(struct request *rq); +void __blk_req_zone_write_lock(struct request *rq); +void __blk_req_zone_write_unlock(struct request *rq); + +static inline void blk_req_zone_write_lock(struct request *rq) +{ + if (blk_req_needs_zone_write_lock(rq)) + __blk_req_zone_write_lock(rq); +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ + if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) + __blk_req_zone_write_unlock(rq); +} + +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return rq->q->seq_zones_wlock && + test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + if (!blk_req_needs_zone_write_lock(rq)) + return true; + return !blk_req_zone_is_write_locked(rq); +} +#else /* CONFIG_BLK_DEV_ZONED */ +static inline bool blk_req_needs_zone_write_lock(struct request *rq) +{ + return false; +} + +static inline void blk_req_zone_write_lock(struct request *rq) +{ +} + +static inline void blk_req_zone_write_unlock(struct request *rq) +{ +} +static inline bool blk_req_zone_is_write_locked(struct request *rq) +{ + return false; +} + +static inline bool blk_req_can_dispatch_to_zone(struct request *rq) +{ + return true; +} +#endif /* CONFIG_BLK_DEV_ZONED */ + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" #endif +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +void rq_flush_dcache_pages(struct request *rq); +#else +static inline void rq_flush_dcache_pages(struct request *rq) +{ +} +#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */ +#endif /* BLK_MQ_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index be622b5a21ed5..3b967053e9f5a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -431,8 +431,6 @@ enum stat_group { #define bio_op(bio) \ ((bio)->bi_opf & REQ_OP_MASK) -#define req_op(req) \ - ((req)->cmd_flags & REQ_OP_MASK) /* obsolete, don't use in new code */ static inline void bio_set_op_attrs(struct bio *bio, unsigned op, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56e60e5c09d07..0e960d74615ec 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -32,9 +31,6 @@ struct blk_queue_stats; struct blk_stat_callback; struct blk_keyslot_manager; -#define BLKDEV_MIN_RQ 4 -#define BLKDEV_MAX_RQ 128 /* Default maximum */ - /* Must be consistent with blk_mq_poll_stats_bkt() */ #define BLK_MQ_POLL_STATS_BKTS 16 @@ -47,213 +43,12 @@ struct blk_keyslot_manager; */ #define BLKCG_MAX_POLS 6 -typedef void (rq_end_io_fn)(struct request *, blk_status_t); - -/* - * request flags */ -typedef __u32 __bitwise req_flags_t; - -/* drive already may have started this one */ -#define RQF_STARTED ((__force req_flags_t)(1 << 1)) -/* may not be passed by ioscheduler */ -#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) -/* request for flush sequence */ -#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4)) -/* merge of different types, fail separately */ -#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5)) -/* track inflight for MQ */ -#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6)) -/* don't call prep for this one */ -#define RQF_DONTPREP ((__force req_flags_t)(1 << 7)) -/* vaguely specified driver internal error. Ignored by the block layer */ -#define RQF_FAILED ((__force req_flags_t)(1 << 10)) -/* don't warn about errors */ -#define RQF_QUIET ((__force req_flags_t)(1 << 11)) -/* elevator private data attached */ -#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12)) -/* account into disk and partition IO statistics */ -#define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) -/* runtime pm request */ -#define RQF_PM ((__force req_flags_t)(1 << 15)) -/* on IO scheduler merge hash */ -#define RQF_HASHED ((__force req_flags_t)(1 << 16)) -/* track IO completion time */ -#define RQF_STATS ((__force req_flags_t)(1 << 17)) -/* Look at ->special_vec for the actual data payload instead of the - bio chain. */ -#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The per-zone write lock is held for this request */ -#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) -/* already slept for hybrid poll */ -#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) -/* ->timeout has been called, don't expire again */ -#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) - -/* flags that prevent us from merging requests: */ -#define RQF_NOMERGE_FLAGS \ - (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD) - -/* - * Request state for blk-mq. - */ -enum mq_rq_state { - MQ_RQ_IDLE = 0, - MQ_RQ_IN_FLIGHT = 1, - MQ_RQ_COMPLETE = 2, -}; - -/* - * Try to put the fields that are referenced together in the same cacheline. - * - * If you modify this structure, make sure to update blk_rq_init() and - * especially blk_mq_rq_ctx_init() to take care of the added fields. - */ -struct request { - struct request_queue *q; - struct blk_mq_ctx *mq_ctx; - struct blk_mq_hw_ctx *mq_hctx; - - unsigned int cmd_flags; /* op and common flags */ - req_flags_t rq_flags; - - int tag; - int internal_tag; - - /* the following two fields are internal, NEVER access directly */ - unsigned int __data_len; /* total data len */ - sector_t __sector; /* sector cursor */ - - struct bio *bio; - struct bio *biotail; - - struct list_head queuelist; - - /* - * The hash is used inside the scheduler, and killed once the - * request reaches the dispatch list. The ipi_list is only used - * to queue the request for softirq completion, which is long - * after the request has been unhashed (and even removed from - * the dispatch list). - */ - union { - struct hlist_node hash; /* merge hash */ - struct llist_node ipi_list; - }; - - /* - * The rb_node is only used inside the io scheduler, requests - * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. - */ - union { - struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; - void *completion_data; - int error_count; /* for legacy drivers, don't use */ - }; - - /* - * Three pointers are available for the IO schedulers, if they need - * more they have to dynamically allocate it. Flush requests are - * never put on the IO scheduler. So let the flush fields share - * space with the elevator data. - */ - union { - struct { - struct io_cq *icq; - void *priv[2]; - } elv; - - struct { - unsigned int seq; - struct list_head list; - rq_end_io_fn *saved_end_io; - } flush; - }; - - struct gendisk *rq_disk; - struct block_device *part; -#ifdef CONFIG_BLK_RQ_ALLOC_TIME - /* Time that the first bio started allocating this request. */ - u64 alloc_time_ns; -#endif - /* Time that this request was allocated for this IO. */ - u64 start_time_ns; - /* Time that I/O was submitted to the device. */ - u64 io_start_time_ns; - -#ifdef CONFIG_BLK_WBT - unsigned short wbt_flags; -#endif - /* - * rq sectors used for blk stats. It has the same value - * with blk_rq_sectors(rq), except that it never be zeroed - * by completion. - */ - unsigned short stats_sectors; - - /* - * Number of scatter-gather DMA addr+len pairs after - * physical address coalescing is performed. - */ - unsigned short nr_phys_segments; - -#if defined(CONFIG_BLK_DEV_INTEGRITY) - unsigned short nr_integrity_segments; -#endif - -#ifdef CONFIG_BLK_INLINE_ENCRYPTION - struct bio_crypt_ctx *crypt_ctx; - struct blk_ksm_keyslot *crypt_keyslot; -#endif - - unsigned short write_hint; - unsigned short ioprio; - - enum mq_rq_state state; - refcount_t ref; - - unsigned int timeout; - unsigned long deadline; - - union { - struct __call_single_data csd; - u64 fifo_time; - }; - - /* - * completion callback. - */ - rq_end_io_fn *end_io; - void *end_io_data; -}; - static inline bool blk_op_is_passthrough(unsigned int op) { op &= REQ_OP_MASK; return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT; } -static inline bool blk_rq_is_passthrough(struct request *rq) -{ - return blk_op_is_passthrough(req_op(rq)); -} - -static inline unsigned short req_get_ioprio(struct request *req) -{ - return req->ioprio; -} - -struct bio_vec; - -enum blk_eh_timer_return { - BLK_EH_DONE, /* drivers has completed the command */ - BLK_EH_RESET_TIMER, /* reset timer and try again */ -}; - -#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ -#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ - /* * Zoned block device models (zoned limit). * @@ -620,11 +415,6 @@ extern void blk_clear_pm_only(struct request_queue *q); #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) -#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) - -#define rq_dma_dir(rq) \ - (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) - #define dma_map_bvec(dev, bv, dir, attrs) \ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) @@ -740,11 +530,6 @@ static inline unsigned int queue_max_active_zones(const struct request_queue *q) } #endif /* CONFIG_BLK_DEV_ZONED */ -static inline bool rq_is_sync(struct request *rq) -{ - return op_is_sync(rq->cmd_flags); -} - static inline unsigned int blk_queue_depth(struct request_queue *q) { if (q->queue_depth) @@ -759,83 +544,20 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) #define BLK_MIN_SG_TIMEOUT (7 * HZ) -struct rq_map_data { - struct page **pages; - int page_order; - int nr_entries; - unsigned long offset; - int null_mapped; - int from_user; -}; - -struct req_iterator { - struct bvec_iter iter; - struct bio *bio; -}; - /* This should not be used directly - use rq_for_each_segment */ #define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) -#define __rq_for_each_bio(_bio, rq) \ - if ((rq->bio)) \ - for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) - -#define rq_for_each_segment(bvl, _rq, _iter) \ - __rq_for_each_bio(_iter.bio, _rq) \ - bio_for_each_segment(bvl, _iter.bio, _iter.iter) -#define rq_for_each_bvec(bvl, _rq, _iter) \ - __rq_for_each_bio(_iter.bio, _rq) \ - bio_for_each_bvec(bvl, _iter.bio, _iter.iter) - -#define rq_iter_last(bvec, _iter) \ - (_iter.bio->bi_next == NULL && \ - bio_iter_last(bvec, _iter.iter)) - -#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" -#endif -#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE -extern void rq_flush_dcache_pages(struct request *rq); -#else -static inline void rq_flush_dcache_pages(struct request *rq) -{ -} -#endif extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); blk_qc_t submit_bio_noacct(struct bio *bio); -extern void blk_rq_init(struct request_queue *q, struct request *rq); -extern void blk_put_request(struct request *); -extern struct request *blk_get_request(struct request_queue *, unsigned int op, - blk_mq_req_flags_t flags); + extern int blk_lld_busy(struct request_queue *q); -extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, - struct bio_set *bs, gfp_t gfp_mask, - int (*bio_ctr)(struct bio *, struct bio *, void *), - void *data); -extern void blk_rq_unprep_clone(struct request *rq); -extern blk_status_t blk_insert_cloned_request(struct request_queue *q, - struct request *rq); -int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_queue_split(struct bio **); extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); -extern int blk_rq_map_user(struct request_queue *, struct request *, - struct rq_map_data *, void __user *, unsigned long, - gfp_t); -extern int blk_rq_unmap_user(struct bio *); -extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); -extern int blk_rq_map_user_iov(struct request_queue *, struct request *, - struct rq_map_data *, const struct iov_iter *, - gfp_t); -extern void blk_execute_rq_nowait(struct gendisk *, - struct request *, int, rq_end_io_fn *); - -blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, - int at_head); /* Helper to convert REQ_OP_XXX to its string format XXX */ extern const char *blk_op_str(unsigned int op); @@ -867,47 +589,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) #define SECTOR_MASK (PAGE_SECTORS - 1) -/* - * blk_rq_pos() : the current sector - * blk_rq_bytes() : bytes left in the entire request - * blk_rq_cur_bytes() : bytes left in the current segment - * blk_rq_err_bytes() : bytes left till the next error boundary - * blk_rq_sectors() : sectors left in the entire request - * blk_rq_cur_sectors() : sectors left in the current segment - * blk_rq_stats_sectors() : sectors of the entire request used for stats - */ -static inline sector_t blk_rq_pos(const struct request *rq) -{ - return rq->__sector; -} - -static inline unsigned int blk_rq_bytes(const struct request *rq) -{ - return rq->__data_len; -} - -static inline int blk_rq_cur_bytes(const struct request *rq) -{ - return rq->bio ? bio_cur_bytes(rq->bio) : 0; -} - -extern unsigned int blk_rq_err_bytes(const struct request *rq); - -static inline unsigned int blk_rq_sectors(const struct request *rq) -{ - return blk_rq_bytes(rq) >> SECTOR_SHIFT; -} - -static inline unsigned int blk_rq_cur_sectors(const struct request *rq) -{ - return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; -} - -static inline unsigned int blk_rq_stats_sectors(const struct request *rq) -{ - return rq->stats_sectors; -} - #ifdef CONFIG_BLK_DEV_ZONED /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */ @@ -924,42 +605,8 @@ static inline unsigned int bio_zone_is_seq(struct bio *bio) return blk_queue_zone_is_seq(bdev_get_queue(bio->bi_bdev), bio->bi_iter.bi_sector); } - -static inline unsigned int blk_rq_zone_no(struct request *rq) -{ - return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); -} - -static inline unsigned int blk_rq_zone_is_seq(struct request *rq) -{ - return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); -} #endif /* CONFIG_BLK_DEV_ZONED */ -/* - * Some commands like WRITE SAME have a payload or data transfer size which - * is different from the size of the request. Any driver that supports such - * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to - * calculate the data transfer size. - */ -static inline unsigned int blk_rq_payload_bytes(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return rq->special_vec.bv_len; - return blk_rq_bytes(rq); -} - -/* - * Return the first full biovec in the request. The caller needs to check that - * there are any bvecs before calling this helper. - */ -static inline struct bio_vec req_bvec(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return rq->special_vec; - return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter); -} - static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, int op) { @@ -999,30 +646,6 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q, return min(q->limits.max_sectors, chunk_sectors); } -static inline unsigned int blk_rq_count_bios(struct request *rq) -{ - unsigned int nr_bios = 0; - struct bio *bio; - - __rq_for_each_bio(bio, rq) - nr_bios++; - - return nr_bios; -} - -void blk_steal_bios(struct bio_list *list, struct request *rq); - -/* - * Request completion related functions. - * - * blk_update_request() completes given number of bytes and updates - * the request without completing it. - */ -extern bool blk_update_request(struct request *rq, blk_status_t error, - unsigned int nr_bytes); - -extern void blk_abort_request(struct request *); - /* * Access functions for manipulating queue properties */ @@ -1081,42 +704,6 @@ extern void blk_queue_required_elevator_features(struct request_queue *q, extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); -/* - * Number of physical segments as sent to the device. - * - * Normally this is the number of discontiguous data segments sent by the - * submitter. But for data-less command like discard we might have no - * actual data segments submitted, but the driver might have to add it's - * own special payload. In that case we still return 1 here so that this - * special payload will be mapped. - */ -static inline unsigned short blk_rq_nr_phys_segments(struct request *rq) -{ - if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) - return 1; - return rq->nr_phys_segments; -} - -/* - * Number of discard segments (or ranges) the driver needs to fill in. - * Each discard bio merged into a request is counted as one segment. - */ -static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) -{ - return max_t(unsigned short, rq->nr_phys_segments, 1); -} - -int __blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist, struct scatterlist **last_sg); -static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, - struct scatterlist *sglist) -{ - struct scatterlist *last_sg = NULL; - - return __blk_rq_map_sg(q, rq, sglist, &last_sg); -} -extern void blk_dump_rq_flags(struct request *, char *); - bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); @@ -1613,60 +1200,6 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); -#ifdef CONFIG_BLK_DEV_ZONED -bool blk_req_needs_zone_write_lock(struct request *rq); -bool blk_req_zone_write_trylock(struct request *rq); -void __blk_req_zone_write_lock(struct request *rq); -void __blk_req_zone_write_unlock(struct request *rq); - -static inline void blk_req_zone_write_lock(struct request *rq) -{ - if (blk_req_needs_zone_write_lock(rq)) - __blk_req_zone_write_lock(rq); -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ - if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) - __blk_req_zone_write_unlock(rq); -} - -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return rq->q->seq_zones_wlock && - test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - if (!blk_req_needs_zone_write_lock(rq)) - return true; - return !blk_req_zone_is_write_locked(rq); -} -#else -static inline bool blk_req_needs_zone_write_lock(struct request *rq) -{ - return false; -} - -static inline void blk_req_zone_write_lock(struct request *rq) -{ -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ -} -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return false; -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - return true; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - static inline void blk_wake_io_task(struct task_struct *waiter) { /* diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index a083e15df6087..22501a293fa54 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -2,7 +2,7 @@ #ifndef BLKTRACE_H #define BLKTRACE_H -#include +#include #include #include #include diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index 96305a64a5a7b..c635c2e014e3d 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -3,7 +3,7 @@ #define _LINUX_T10_PI_H #include -#include +#include /* * A T10 PI-capable target device can be formatted with different diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index b97e142a7ca92..430b73bd02ac1 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -5,7 +5,7 @@ #include #include #include -#include +#include #include #include #include From e2c7275dc0fe3cba6a3de5a5f4c98f923e3d9d14 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:25 -0700 Subject: [PATCH 019/205] block/mq-deadline: Improve request accounting further The scheduler .insert_requests() callback is called when a request is queued for the first time and also when it is requeued. Only count a request the first time it is queued. Additionally, since the mq-deadline scheduler only performs zone locking for requests that have been inserted, skip the zone unlock code for requests that have not been inserted into the mq-deadline scheduler. Fixes: 38ba64d12d4c ("block/mq-deadline: Track I/O statistics") Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210927220328.1410161-2-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 47f042fa6a688..c27b4347ca911 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -677,8 +677,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_req_zone_write_unlock(rq); prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, inserted, prio); - rq->elv.priv[0] = (void *)(uintptr_t)1; + if (!rq->elv.priv[0]) { + dd_count(dd, inserted, prio); + rq->elv.priv[0] = (void *)(uintptr_t)1; + } if (blk_mq_sched_try_insert_merge(q, rq, &free)) { blk_mq_free_requests(&free); @@ -759,12 +761,13 @@ static void dd_finish_request(struct request *rq) /* * The block layer core may call dd_finish_request() without having - * called dd_insert_requests(). Hence only update statistics for - * requests for which dd_insert_requests() has been called. See also - * blk_mq_request_bypass_insert(). + * called dd_insert_requests(). Skip requests that bypassed I/O + * scheduling. See also blk_mq_request_bypass_insert(). */ - if (rq->elv.priv[0]) - dd_count(dd, completed, prio); + if (!rq->elv.priv[0]) + return; + + dd_count(dd, completed, prio); if (blk_queue_is_zoned(q)) { unsigned long flags; From 32f64cad97187f4aed50aca3ed1b5a51a00f848b Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:26 -0700 Subject: [PATCH 020/205] block/mq-deadline: Add an invariant check Check a statistics invariant at module unload time. When running blktests, the invariant is verified every time a request queue is removed and hence is verified at least once per test. Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210927220328.1410161-3-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index c27b4347ca911..2586b3f8c7e9e 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -270,6 +270,12 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, deadline_remove_request(rq->q, per_prio, rq); } +/* Number of requests queued for a given priority level. */ +static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) +{ + return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); +} + /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) @@ -539,6 +545,12 @@ static void dd_exit_sched(struct elevator_queue *e) WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); + WARN_ONCE(dd_queued(dd, prio) != 0, + "statistics for priority %d: i %u m %u d %u c %u\n", + prio, dd_sum(dd, inserted, prio), + dd_sum(dd, merged, prio), + dd_sum(dd, dispatched, prio), + dd_sum(dd, completed, prio)); } free_percpu(dd->stats); @@ -950,12 +962,6 @@ static int dd_async_depth_show(void *data, struct seq_file *m) return 0; } -/* Number of requests queued for a given priority level. */ -static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) -{ - return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); -} - static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; From bce0363ed84af8e8e27df464b2082f4b5f58b57e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:27 -0700 Subject: [PATCH 021/205] block/mq-deadline: Stop using per-CPU counters Calculating the sum over all CPUs of per-CPU counters frequently is inefficient. Hence switch from per-CPU to individual counters. Three counters are protected by the mq-deadline spinlock since these are only accessed from contexts that already hold that spinlock. The fourth counter is atomic because protecting it with the mq-deadline spinlock would trigger lock contention. Reviewed-by: Damien Le Moal Reviewed-by: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Link: https://lore.kernel.org/r/20210927220328.1410161-4-bvanassche@acm.org Signed-off-by: Jens Axboe --- block/mq-deadline.c | 124 ++++++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 68 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 2586b3f8c7e9e..b262f40f32c07 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -51,17 +51,16 @@ enum dd_prio { enum { DD_PRIO_COUNT = 3 }; -/* I/O statistics per I/O priority. */ +/* + * I/O statistics per I/O priority. It is fine if these counters overflow. + * What matters is that these counters are at least as wide as + * log2(max_outstanding_requests). + */ struct io_stats_per_prio { - local_t inserted; - local_t merged; - local_t dispatched; - local_t completed; -}; - -/* I/O statistics for all I/O priorities (enum dd_prio). */ -struct io_stats { - struct io_stats_per_prio stats[DD_PRIO_COUNT]; + uint32_t inserted; + uint32_t merged; + uint32_t dispatched; + atomic_t completed; }; /* @@ -74,6 +73,7 @@ struct dd_per_prio { struct list_head fifo_list[DD_DIR_COUNT]; /* Next request in FIFO order. Read, write or both are NULL. */ struct request *next_rq[DD_DIR_COUNT]; + struct io_stats_per_prio stats; }; struct deadline_data { @@ -88,8 +88,6 @@ struct deadline_data { unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ - struct io_stats __percpu *stats; - /* * settings that change how the i/o scheduler behaves */ @@ -103,33 +101,6 @@ struct deadline_data { spinlock_t zone_lock; }; -/* Count one event of type 'event_type' and with I/O priority 'prio' */ -#define dd_count(dd, event_type, prio) do { \ - struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ - \ - BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ - BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ - local_inc(&io_stats->stats[(prio)].event_type); \ - put_cpu_ptr(io_stats); \ -} while (0) - -/* - * Returns the total number of dd_count(dd, event_type, prio) calls across all - * CPUs. No locking or barriers since it is fine if the returned sum is slightly - * outdated. - */ -#define dd_sum(dd, event_type, prio) ({ \ - unsigned int cpu; \ - u32 sum = 0; \ - \ - BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ - BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ - for_each_present_cpu(cpu) \ - sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ - stats[(prio)].event_type); \ - sum; \ -}) - /* Maps an I/O priority class to a deadline scheduler priority. */ static const enum dd_prio ioprio_class_to_prio[] = { [IOPRIO_CLASS_NONE] = DD_BE_PRIO, @@ -233,7 +204,9 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, const u8 ioprio_class = dd_rq_ioclass(next); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, merged, prio); + lockdep_assert_held(&dd->lock); + + dd->per_prio[prio].stats.merged++; /* * if next expires before rq, assign its expire time to rq @@ -273,7 +246,11 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* Number of requests queued for a given priority level. */ static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) { - return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); + const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; + + lockdep_assert_held(&dd->lock); + + return stats->inserted - atomic_read(&stats->completed); } /* @@ -463,7 +440,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, done: ioprio_class = dd_rq_ioclass(rq); prio = ioprio_class_to_prio[ioprio_class]; - dd_count(dd, dispatched, prio); + dd->per_prio[prio].stats.dispatched++; /* * If the request needs its target zone locked, do it. */ @@ -542,19 +519,22 @@ static void dd_exit_sched(struct elevator_queue *e) for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; + const struct io_stats_per_prio *stats = &per_prio->stats; + uint32_t queued; WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); - WARN_ONCE(dd_queued(dd, prio) != 0, + + spin_lock(&dd->lock); + queued = dd_queued(dd, prio); + spin_unlock(&dd->lock); + + WARN_ONCE(queued != 0, "statistics for priority %d: i %u m %u d %u c %u\n", - prio, dd_sum(dd, inserted, prio), - dd_sum(dd, merged, prio), - dd_sum(dd, dispatched, prio), - dd_sum(dd, completed, prio)); + prio, stats->inserted, stats->merged, + stats->dispatched, atomic_read(&stats->completed)); } - free_percpu(dd->stats); - kfree(dd); } @@ -578,11 +558,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) eq->elevator_data = dd; - dd->stats = alloc_percpu_gfp(typeof(*dd->stats), - GFP_KERNEL | __GFP_ZERO); - if (!dd->stats) - goto free_dd; - for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -604,9 +579,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) q->elevator = eq; return 0; -free_dd: - kfree(dd); - put_eq: kobject_put(&eq->kobj); return ret; @@ -689,8 +661,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_req_zone_write_unlock(rq); prio = ioprio_class_to_prio[ioprio_class]; + per_prio = &dd->per_prio[prio]; if (!rq->elv.priv[0]) { - dd_count(dd, inserted, prio); + per_prio->stats.inserted++; rq->elv.priv[0] = (void *)(uintptr_t)1; } @@ -701,7 +674,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, trace_block_rq_insert(rq); - per_prio = &dd->per_prio[prio]; if (at_head) { list_add(&rq->queuelist, &per_prio->dispatch); } else { @@ -779,7 +751,7 @@ static void dd_finish_request(struct request *rq) if (!rq->elv.priv[0]) return; - dd_count(dd, completed, prio); + atomic_inc(&per_prio->stats.completed); if (blk_queue_is_zoned(q)) { unsigned long flags; @@ -966,28 +938,44 @@ static int dd_queued_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; + u32 rt, be, idle; + + spin_lock(&dd->lock); + rt = dd_queued(dd, DD_RT_PRIO); + be = dd_queued(dd, DD_BE_PRIO); + idle = dd_queued(dd, DD_IDLE_PRIO); + spin_unlock(&dd->lock); + + seq_printf(m, "%u %u %u\n", rt, be, idle); - seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO), - dd_queued(dd, DD_BE_PRIO), - dd_queued(dd, DD_IDLE_PRIO)); return 0; } /* Number of requests owned by the block driver for a given priority. */ static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) { - return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio) - - dd_sum(dd, completed, prio); + const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats; + + lockdep_assert_held(&dd->lock); + + return stats->dispatched + stats->merged - + atomic_read(&stats->completed); } static int dd_owned_by_driver_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct deadline_data *dd = q->elevator->elevator_data; + u32 rt, be, idle; + + spin_lock(&dd->lock); + rt = dd_owned_by_driver(dd, DD_RT_PRIO); + be = dd_owned_by_driver(dd, DD_BE_PRIO); + idle = dd_owned_by_driver(dd, DD_IDLE_PRIO); + spin_unlock(&dd->lock); + + seq_printf(m, "%u %u %u\n", rt, be, idle); - seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO), - dd_owned_by_driver(dd, DD_BE_PRIO), - dd_owned_by_driver(dd, DD_IDLE_PRIO)); return 0; } From 322cff70d46c6c230a722684fd65ae6b5f57436e Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 27 Sep 2021 15:03:28 -0700 Subject: [PATCH 022/205] block/mq-deadline: Prioritize high-priority requests In addition to reverting commit 7b05bf771084 ("Revert "block/mq-deadline: Prioritize high-priority requests""), this patch uses 'jiffies' instead of ktime_get() in the code for aging lower priority requests. This patch has been tested as follows: Measured QD=1/jobs=1 IOPS for nullb with the mq-deadline scheduler. Result without and with this patch: 555 K IOPS. Measured QD=1/jobs=8 IOPS for nullb with the mq-deadline scheduler. Result without and with this patch: about 380 K IOPS. Ran the following script: set -e scriptdir=$(dirname "$0") if [ -e /sys/module/scsi_debug ]; then modprobe -r scsi_debug; fi modprobe scsi_debug ndelay=1000000 max_queue=16 sd='' while [ -z "$sd" ]; do sd=$(basename /sys/bus/pseudo/drivers/scsi_debug/adapter*/host*/target*/*/block/*) done echo $((100*1000)) > "/sys/block/$sd/queue/iosched/prio_aging_expire" if [ -e /sys/fs/cgroup/io.prio.class ]; then cd /sys/fs/cgroup echo restrict-to-be >io.prio.class echo +io > cgroup.subtree_control else cd /sys/fs/cgroup/blkio/ echo restrict-to-be >blkio.prio.class fi echo $$ >cgroup.procs mkdir -p hipri cd hipri if [ -e io.prio.class ]; then echo none-to-rt >io.prio.class else echo none-to-rt >blkio.prio.class fi { "${scriptdir}/max-iops" -a1 -d32 -j1 -e mq-deadline "/dev/$sd" >& ~/low-pri.txt & } echo $$ >cgroup.procs "${scriptdir}/max-iops" -a1 -d32 -j1 -e mq-deadline "/dev/$sd" >& ~/hi-pri.txt Result: * 11000 IOPS for the high-priority job * 40 IOPS for the low-priority job If the prio aging expiry time is changed from 100s into 0, the IOPS results change into 6712 and 6796 IOPS. The max-iops script is a script that runs fio with the following arguments: --bs=4K --gtod_reduce=1 --ioengine=libaio --ioscheduler=${arg_e} --runtime=60 --norandommap --rw=read --thread --buffered=0 --numjobs=${arg_j} --iodepth=${arg_d} --iodepth_batch_submit=${arg_a} --iodepth_batch_complete=$((arg_d / 2)) --name=${positional_argument_1} --filename=${positional_argument_1} Cc: Damien Le Moal Cc: Niklas Cassel Cc: Hannes Reinecke Signed-off-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20210927220328.1410161-5-bvanassche@acm.org [axboe: @latest -> @latest_start] Signed-off-by: Jens Axboe --- block/mq-deadline.c | 77 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 4 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index b262f40f32c07..2001ac186f5cd 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -31,6 +31,11 @@ */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ +/* + * Time after which to dispatch lower priority requests even if higher + * priority requests are pending. + */ +static const int prio_aging_expire = 10 * HZ; static const int writes_starved = 2; /* max times reads can starve a write */ static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. For throughput. */ @@ -96,6 +101,7 @@ struct deadline_data { int writes_starved; int front_merges; u32 async_depth; + int prio_aging_expire; spinlock_t lock; spinlock_t zone_lock; @@ -338,12 +344,27 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, return rq; } +/* + * Returns true if and only if @rq started after @latest_start where + * @latest_start is in jiffies. + */ +static bool started_after(struct deadline_data *dd, struct request *rq, + unsigned long latest_start) +{ + unsigned long start_time = (unsigned long)rq->fifo_time; + + start_time -= dd->fifo_expire[rq_data_dir(rq)]; + + return time_after(start_time, latest_start); +} + /* * deadline_dispatch_requests selects the best request according to - * read/write expire, fifo_batch, etc + * read/write expire, fifo_batch, etc and with a start time <= @latest_start. */ static struct request *__dd_dispatch_request(struct deadline_data *dd, - struct dd_per_prio *per_prio) + struct dd_per_prio *per_prio, + unsigned long latest_start) { struct request *rq, *next_rq; enum dd_data_dir data_dir; @@ -355,6 +376,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, if (!list_empty(&per_prio->dispatch)) { rq = list_first_entry(&per_prio->dispatch, struct request, queuelist); + if (started_after(dd, rq, latest_start)) + return NULL; list_del_init(&rq->queuelist); goto done; } @@ -432,6 +455,9 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, dd->batching = 0; dispatch_request: + if (started_after(dd, rq, latest_start)) + return NULL; + /* * rq is the selected appropriate request. */ @@ -449,6 +475,34 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, return rq; } +/* + * Check whether there are any requests with priority other than DD_RT_PRIO + * that were inserted more than prio_aging_expire jiffies ago. + */ +static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd, + unsigned long now) +{ + struct request *rq; + enum dd_prio prio; + int prio_cnt; + + lockdep_assert_held(&dd->lock); + + prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) + + !!dd_queued(dd, DD_IDLE_PRIO); + if (prio_cnt < 2) + return NULL; + + for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { + rq = __dd_dispatch_request(dd, &dd->per_prio[prio], + now - dd->prio_aging_expire); + if (rq) + return rq; + } + + return NULL; +} + /* * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). * @@ -460,15 +514,26 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; + const unsigned long now = jiffies; struct request *rq; enum dd_prio prio; spin_lock(&dd->lock); + rq = dd_dispatch_prio_aged_requests(dd, now); + if (rq) + goto unlock; + + /* + * Next, dispatch requests in priority order. Ignore lower priority + * requests if any higher priority requests are pending. + */ for (prio = 0; prio <= DD_PRIO_MAX; prio++) { - rq = __dd_dispatch_request(dd, &dd->per_prio[prio]); - if (rq) + rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now); + if (rq || dd_queued(dd, prio)) break; } + +unlock: spin_unlock(&dd->lock); return rq; @@ -573,6 +638,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) dd->front_merges = 1; dd->last_dir = DD_WRITE; dd->fifo_batch = fifo_batch; + dd->prio_aging_expire = prio_aging_expire; spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); @@ -796,6 +862,7 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); +SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); SHOW_INT(deadline_async_depth_show, dd->front_merges); @@ -825,6 +892,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); +STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); @@ -843,6 +911,7 @@ static struct elv_fs_entry deadline_attrs[] = { DD_ATTR(front_merges), DD_ATTR(async_depth), DD_ATTR(fifo_batch), + DD_ATTR(prio_aging_expire), __ATTR_NULL }; From 8a3ee6778ef1a9a3bfd3b1134ea29e95d3568271 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Sep 2021 07:27:55 +0200 Subject: [PATCH 023/205] block: print the current process in handle_bad_sector Make the bad sector information a little more useful by printing current->comm to identify the caller. Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Chaitanya Kulkarni Link: https://lore.kernel.org/r/20210928052755.113016-1-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/blk-core.c b/block/blk-core.c index d2c6caadef891..4695508452443 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -655,8 +655,9 @@ static void handle_bad_sector(struct bio *bio, sector_t maxsector) { char b[BDEVNAME_SIZE]; - pr_info_ratelimited("attempt to access beyond end of device\n" + pr_info_ratelimited("%s: attempt to access beyond end of device\n" "%s: rw=%d, want=%llu, limit=%llu\n", + current->comm, bio_devname(bio, b), bio->bi_opf, bio_end_sector(bio), maxsector); } From 65de57bb2e66f1fbede166c1307570ebd09eae83 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:26 +0800 Subject: [PATCH 024/205] blk-mq: Change rqs check in blk_mq_free_rqs() The original code in commit 24d2f90309b23 ("blk-mq: split out tag initialization, support shared tags") would check tags->rqs is non-NULL and then dereference tags->rqs[]. Then in commit 2af8cbe30531 ("blk-mq: split tag ->rqs[] into two"), we started to dereference tags->static_rqs[], but continued to check non-NULL tags->rqs. Check tags->static_rqs as non-NULL instead, which is more logical. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-2-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 2539ba976949c..03cbde75756d2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2349,7 +2349,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, { struct page *page; - if (tags->rqs && set->ops->exit_request) { + if (tags->static_rqs && set->ops->exit_request) { int i; for (i = 0; i < tags->nr_tags; i++) { From d2a27964e60ff18e637334864042af54de383902 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:27 +0800 Subject: [PATCH 025/205] block: Rename BLKDEV_MAX_RQ -> BLKDEV_DEFAULT_RQ It is a bit confusing that there is BLKDEV_MAX_RQ and MAX_SCHED_RQ, as the name BLKDEV_MAX_RQ would imply the max requests always, which it is not. Rename to BLKDEV_MAX_RQ to BLKDEV_DEFAULT_RQ, matching its usage - that being the default number of requests assigned when allocating a request queue. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-3-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq-sched.c | 2 +- block/blk-mq-sched.h | 2 +- drivers/block/rbd.c | 2 +- include/linux/blk-mq.h | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 4695508452443..2590e77249909 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -587,7 +587,7 @@ struct request_queue *blk_alloc_queue(int node_id) blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); - q->nr_requests = BLKDEV_MAX_RQ; + q->nr_requests = BLKDEV_DEFAULT_RQ; return q; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 0f006cabfd914..2231fb0d4c35e 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -606,7 +606,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) * Additionally, this is a per-hw queue depth. */ q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, - BLKDEV_MAX_RQ); + BLKDEV_DEFAULT_RQ); queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_sched_alloc_tags(q, hctx, i); diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index 5181487db7923..f6a1ce9e31f38 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -6,7 +6,7 @@ #include "blk-mq.h" #include "blk-mq-tag.h" -#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ) +#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ) void blk_mq_sched_assign_ioc(struct request *rq); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e65c9d706f6fb..bf60aebd0cfbe 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -836,7 +836,7 @@ struct rbd_options { u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ }; -#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ +#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024) #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_READ_ONLY_DEFAULT false diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index bd4086a6f28e0..31cc41dfef6b4 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -12,7 +12,7 @@ struct blk_mq_tags; struct blk_flush_queue; #define BLKDEV_MIN_RQ 4 -#define BLKDEV_MAX_RQ 128 /* Default maximum */ +#define BLKDEV_DEFAULT_RQ 128 typedef void (rq_end_io_fn)(struct request *, blk_status_t); From 8fa044640f128c0cd015f72cda42989a664889fc Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:28 +0800 Subject: [PATCH 026/205] blk-mq: Relocate shared sbitmap resize in blk_mq_update_nr_requests() For shared sbitmap, if the call to blk_mq_tag_update_depth() was successful for any hctx when hctx->sched_tags is not set, then it would be successful for all (due to nature in which blk_mq_tag_update_depth() fails). As such, there is no need to call blk_mq_tag_resize_shared_sbitmap() for each hctx. So relocate the call until after the hctx iteration under the !q->elevator check, which is equivalent (to !hctx->sched_tags). Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-4-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 03cbde75756d2..240c04aaa3f8f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3625,8 +3625,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) if (!hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); - if (!ret && blk_mq_is_sbitmap_shared(set->flags)) - blk_mq_tag_resize_shared_sbitmap(set, nr); } else { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); @@ -3644,9 +3642,13 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } if (!ret) { q->nr_requests = nr; - if (q->elevator && blk_mq_is_sbitmap_shared(set->flags)) - sbitmap_queue_resize(&q->sched_bitmap_tags, - nr - set->reserved_tags); + if (blk_mq_is_sbitmap_shared(set->flags)) { + if (q->elevator) + sbitmap_queue_resize(&q->sched_bitmap_tags, + nr - set->reserved_tags); + else + blk_mq_tag_resize_shared_sbitmap(set, nr); + } } blk_mq_unquiesce_queue(q); From f6adcef5f317c78a899ca9766e90e47026834158 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:29 +0800 Subject: [PATCH 027/205] blk-mq: Invert check in blk_mq_update_nr_requests() It's easier to read: if (x) X; else Y; over: if (!x) Y; else X; No functional change intended. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-5-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 240c04aaa3f8f..ca0e65117064e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3622,18 +3622,18 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) * If we're using an MQ scheduler, just update the scheduler * queue depth. This is similar to what the old code would do. */ - if (!hctx->sched_tags) { - ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, - false); - } else { + if (hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, - nr, true); + nr, true); if (blk_mq_is_sbitmap_shared(set->flags)) { hctx->sched_tags->bitmap_tags = &q->sched_bitmap_tags; hctx->sched_tags->breserved_tags = &q->sched_breserved_tags; } + } else { + ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, + false); } if (ret) break; From d99a6bb337677d812d5bef7795c9fcf17f1ccebe Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:30 +0800 Subject: [PATCH 028/205] blk-mq-sched: Rename blk_mq_sched_alloc_{tags -> map_and_rqs}() Function blk_mq_sched_alloc_tags() does same as __blk_mq_alloc_map_and_request(), so give a similar name to be consistent. Similarly rename label err_free_tags -> err_free_map_and_rqs. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-6-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 2231fb0d4c35e..644b6d554d725 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -515,9 +515,9 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx, percpu_ref_put(&q->q_usage_counter); } -static int blk_mq_sched_alloc_tags(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, - unsigned int hctx_idx) +static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, + struct blk_mq_hw_ctx *hctx, + unsigned int hctx_idx) { struct blk_mq_tag_set *set = q->tag_set; int ret; @@ -609,15 +609,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) BLKDEV_DEFAULT_RQ); queue_for_each_hw_ctx(q, hctx, i) { - ret = blk_mq_sched_alloc_tags(q, hctx, i); + ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); if (ret) - goto err_free_tags; + goto err_free_map_and_rqs; } if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { ret = blk_mq_init_sched_shared_sbitmap(q); if (ret) - goto err_free_tags; + goto err_free_map_and_rqs; } ret = e->ops.init_sched(q, e); @@ -645,7 +645,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) err_free_sbitmap: if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) blk_mq_exit_sched_shared_sbitmap(q); -err_free_tags: +err_free_map_and_rqs: blk_mq_sched_free_requests(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; From 1820f4f0a5e75633ff384167027bf45ed1b10234 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:31 +0800 Subject: [PATCH 029/205] blk-mq-sched: Rename blk_mq_sched_free_{requests -> rqs}() To be more concise and consistent in naming, rename blk_mq_sched_free_requests() -> blk_mq_sched_free_rqs(). Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-7-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq-sched.c | 6 +++--- block/blk-mq-sched.h | 2 +- block/blk.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 2590e77249909..0ed17547bfedd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -403,7 +403,7 @@ void blk_cleanup_queue(struct request_queue *q) */ mutex_lock(&q->sysfs_lock); if (q->elevator) - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); mutex_unlock(&q->sysfs_lock); percpu_ref_exit(&q->q_usage_counter); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 644b6d554d725..bdbb6c31b4339 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -631,7 +631,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ret = e->ops.init_hctx(hctx, i); if (ret) { eq = q->elevator; - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); blk_mq_exit_sched(q, eq); kobject_put(&eq->kobj); return ret; @@ -646,7 +646,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) blk_mq_exit_sched_shared_sbitmap(q); err_free_map_and_rqs: - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); blk_mq_sched_tags_teardown(q); q->elevator = NULL; return ret; @@ -656,7 +656,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) * called in either blk_queue_cleanup or elevator_switch, tagset * is required for freeing requests */ -void blk_mq_sched_free_requests(struct request_queue *q) +void blk_mq_sched_free_rqs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; int i; diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index f6a1ce9e31f38..e4d367e0b2a30 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -29,7 +29,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); -void blk_mq_sched_free_requests(struct request_queue *q); +void blk_mq_sched_free_rqs(struct request_queue *q); static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, diff --git a/block/blk.h b/block/blk.h index fa90691cfdab1..8cdee4844d8a3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -242,7 +242,7 @@ static inline void elevator_exit(struct request_queue *q, { lockdep_assert_held(&q->sysfs_lock); - blk_mq_sched_free_requests(q); + blk_mq_sched_free_rqs(q); __elevator_exit(q, e); } From f32e4eafaf29aa493bdea3123dac09ad135b599b Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:32 +0800 Subject: [PATCH 030/205] blk-mq: Pass driver tags to blk_mq_clear_rq_mapping() Function blk_mq_clear_rq_mapping() will be used for shared sbitmap tags in future, so pass a driver tags pointer instead of the tagset container and HW queue index. Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-8-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index ca0e65117064e..0619298d82b4b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2311,10 +2311,9 @@ static size_t order_to_size(unsigned int order) } /* called before freeing request pool in @tags */ -static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, - struct blk_mq_tags *tags, unsigned int hctx_idx) +static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, + struct blk_mq_tags *tags) { - struct blk_mq_tags *drv_tags = set->tags[hctx_idx]; struct page *page; unsigned long flags; @@ -2323,7 +2322,7 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, unsigned long end = start + order_to_size(page->private); int i; - for (i = 0; i < set->queue_depth; i++) { + for (i = 0; i < drv_tags->nr_tags; i++) { struct request *rq = drv_tags->rqs[i]; unsigned long rq_addr = (unsigned long)rq; @@ -2347,8 +2346,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set, void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { + struct blk_mq_tags *drv_tags; struct page *page; + drv_tags = set->tags[hctx_idx]; + if (tags->static_rqs && set->ops->exit_request) { int i; @@ -2362,7 +2364,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } - blk_mq_clear_rq_mapping(set, tags, hctx_idx); + blk_mq_clear_rq_mapping(drv_tags, tags); while (!list_empty(&tags->page_list)) { page = list_first_entry(&tags->page_list, struct page, lru); From 4f245d5bf0f7432c881e22a77066160a6cba8e03 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:33 +0800 Subject: [PATCH 031/205] blk-mq: Don't clear driver tags own mapping Function blk_mq_clear_rq_mapping() is required to clear the sched tags mappings in driver tags rqs[]. But there is no need for a driver tags to clear its own mapping, so skip clearing the mapping in this scenario. Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-9-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/blk-mq.c b/block/blk-mq.c index 0619298d82b4b..38cffe362a29e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2317,6 +2317,10 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags, struct page *page; unsigned long flags; + /* There is no need to clear a driver tags own mapping */ + if (drv_tags == tags) + return; + list_for_each_entry(page, &tags->page_list, lru) { unsigned long start = (unsigned long)page_address(page); unsigned long end = start + order_to_size(page->private); From a7e7388dced47a10ca13ae95ca975ea2830f196b Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:34 +0800 Subject: [PATCH 032/205] blk-mq: Add blk_mq_tag_update_sched_shared_sbitmap() Put the functionality to update the sched shared sbitmap size in a common function. Since the same formula is always used to resize, and it can be got from the request queue argument, so just pass the request queue pointer. Signed-off-by: John Garry Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-10-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 3 +-- block/blk-mq-tag.c | 6 ++++++ block/blk-mq-tag.h | 1 + block/blk-mq.c | 3 +-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index bdbb6c31b4339..6c15f6e98e2e8 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -575,8 +575,7 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) &queue->sched_breserved_tags; } - sbitmap_queue_resize(&queue->sched_bitmap_tags, - queue->nr_requests - set->reserved_tags); + blk_mq_tag_update_sched_shared_sbitmap(queue); return 0; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index ff5caeb825429..55b5a226dcc09 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -634,6 +634,12 @@ void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int s sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags); } +void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) +{ + sbitmap_queue_resize(&q->sched_bitmap_tags, + q->nr_requests - q->tag_set->reserved_tags); +} + /** * blk_mq_unique_tag() - return a tag that is unique queue-wide * @rq: request for which to compute a unique tag diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index f0a0ee758a556..a9f5f18248198 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -50,6 +50,7 @@ extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, unsigned int depth, bool can_grow); extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size); +extern void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, diff --git a/block/blk-mq.c b/block/blk-mq.c index 38cffe362a29e..59bbfc935d874 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3650,8 +3650,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) q->nr_requests = nr; if (blk_mq_is_sbitmap_shared(set->flags)) { if (q->elevator) - sbitmap_queue_resize(&q->sched_bitmap_tags, - nr - set->reserved_tags); + blk_mq_tag_update_sched_shared_sbitmap(q); else blk_mq_tag_resize_shared_sbitmap(set, nr); } From 63064be150e4b1ba1e4af594ef5aa81adf21a52d Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:35 +0800 Subject: [PATCH 033/205] blk-mq: Add blk_mq_alloc_map_and_rqs() Add a function to combine allocating tags and the associated requests, and factor out common patterns to use this new function. Some function only call blk_mq_alloc_map_and_rqs() now, but more functionality will be added later. Also make blk_mq_alloc_rq_map() and blk_mq_alloc_rqs() static since they are only used in blk-mq.c, and finally rename some functions for conciseness and consistency with other function names: - __blk_mq_alloc_map_and_{request -> rqs}() - blk_mq_alloc_{map_and_requests -> set_map_and_rqs}() Suggested-by: Ming Lei Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-11-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 15 +++-------- block/blk-mq-tag.c | 9 +------ block/blk-mq.c | 62 +++++++++++++++++++++++++------------------- block/blk-mq.h | 9 ++----- 4 files changed, 42 insertions(+), 53 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 6c15f6e98e2e8..d1b56bb9ac645 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - struct blk_mq_tag_set *set = q->tag_set; - int ret; + hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, + q->nr_requests); - hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, - set->reserved_tags, set->flags); if (!hctx->sched_tags) return -ENOMEM; - - ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); - if (ret) { - blk_mq_free_rq_map(hctx->sched_tags, set->flags); - hctx->sched_tags = NULL; - } - - return ret; + return 0; } /* called in queue's release handler, tagset has gone away */ diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 55b5a226dcc09..db99f1246795a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > tags->nr_tags) { struct blk_mq_tag_set *set = hctx->queue->tag_set; struct blk_mq_tags *new; - bool ret; if (!can_grow) return -EINVAL; @@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > MAX_SCHED_RQ) return -EINVAL; - new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, - tags->nr_reserved_tags, set->flags); + new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); if (!new) return -ENOMEM; - ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); - if (ret) { - blk_mq_free_rq_map(new, set->flags); - return -ENOMEM; - } blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); blk_mq_free_rq_map(*tagsptr, set->flags); diff --git a/block/blk-mq.c b/block/blk-mq.c index 59bbfc935d874..ab3a831baa9d0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2392,11 +2392,11 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) blk_mq_free_tags(tags, flags); } -struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, - unsigned int hctx_idx, - unsigned int nr_tags, - unsigned int reserved_tags, - unsigned int flags) +static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, + unsigned int hctx_idx, + unsigned int nr_tags, + unsigned int reserved_tags, + unsigned int flags) { struct blk_mq_tags *tags; int node; @@ -2444,8 +2444,9 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, return 0; } -int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, - unsigned int hctx_idx, unsigned int depth) +static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx, unsigned int depth) { unsigned int i, j, entries_per_page, max_order = 4; size_t rq_size, left; @@ -2856,25 +2857,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, } } -static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set, - int hctx_idx) +struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, + unsigned int hctx_idx, + unsigned int depth) { - unsigned int flags = set->flags; - int ret = 0; + struct blk_mq_tags *tags; + int ret; - set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx, - set->queue_depth, set->reserved_tags, flags); - if (!set->tags[hctx_idx]) - return false; + tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags, + set->flags); + if (!tags) + return NULL; - ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx, - set->queue_depth); - if (!ret) - return true; + ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); + if (ret) { + blk_mq_free_rq_map(tags, set->flags); + return NULL; + } - blk_mq_free_rq_map(set->tags[hctx_idx], flags); - set->tags[hctx_idx] = NULL; - return false; + return tags; +} + +static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, + int hctx_idx) +{ + set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, + set->queue_depth); + + return set->tags[hctx_idx]; } static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, @@ -2919,7 +2929,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx_idx = set->map[j].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && - !__blk_mq_alloc_map_and_request(set, hctx_idx)) { + !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) { /* * If tags initialization fail for some hctx, * that hctx won't be brought online. In this @@ -3352,7 +3362,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) int i; for (i = 0; i < set->nr_hw_queues; i++) { - if (!__blk_mq_alloc_map_and_request(set, i)) + if (!__blk_mq_alloc_map_and_rqs(set, i)) goto out_unwind; cond_resched(); } @@ -3371,7 +3381,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) * may reduce the depth asked for, if memory is tight. set->queue_depth * will be updated to reflect the allocated depth. */ -static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set) +static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set) { unsigned int depth; int err; @@ -3537,7 +3547,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (ret) goto out_free_mq_map; - ret = blk_mq_alloc_map_and_requests(set); + ret = blk_mq_alloc_set_map_and_rqs(set); if (ret) goto out_free_mq_map; diff --git a/block/blk-mq.h b/block/blk-mq.h index d08779f77a265..83585a344568e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -55,13 +55,8 @@ void blk_mq_put_rq_ref(struct request *rq); void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); -struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, - unsigned int hctx_idx, - unsigned int nr_tags, - unsigned int reserved_tags, - unsigned int flags); -int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, - unsigned int hctx_idx, unsigned int depth); +struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, + unsigned int hctx_idx, unsigned int depth); /* * Internal helpers for request insertion into sw queues From 645db34e50501aac141713fb47a315e5202ff890 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:36 +0800 Subject: [PATCH 034/205] blk-mq: Refactor and rename blk_mq_free_map_and_{requests->rqs}() Refactor blk_mq_free_map_and_requests() such that it can be used at many sites at which the tag map and rqs are freed. Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the alloc equivalent. Suggested-by: Ming Lei Signed-off-by: John Garry Reviewed-by: Hannes Reinecke Link: https://lore.kernel.org/r/1633429419-228500-12-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 3 +-- block/blk-mq.c | 40 ++++++++++++++++++++++++---------------- block/blk-mq.h | 4 +++- 3 files changed, 28 insertions(+), 19 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index db99f1246795a..a0ecc6d88f84a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -607,8 +607,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (!new) return -ENOMEM; - blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); - blk_mq_free_rq_map(*tagsptr, set->flags); + blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num); *tagsptr = new; } else { /* diff --git a/block/blk-mq.c b/block/blk-mq.c index ab3a831baa9d0..c27591a04c4f0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2887,15 +2887,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, return set->tags[hctx_idx]; } -static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set, - unsigned int hctx_idx) +void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx) { unsigned int flags = set->flags; - if (set->tags && set->tags[hctx_idx]) { - blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx); - blk_mq_free_rq_map(set->tags[hctx_idx], flags); - set->tags[hctx_idx] = NULL; + if (tags) { + blk_mq_free_rqs(set, tags, hctx_idx); + blk_mq_free_rq_map(tags, flags); } } @@ -2976,8 +2976,10 @@ static void blk_mq_map_swqueue(struct request_queue *q) * fallback in case of a new remap fails * allocation */ - if (i && set->tags[i]) - blk_mq_free_map_and_requests(set, i); + if (i && set->tags[i]) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } hctx->tags = NULL; continue; @@ -3273,8 +3275,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx = hctxs[j]; if (hctx) { - if (hctx->tags) - blk_mq_free_map_and_requests(set, j); + blk_mq_free_map_and_rqs(set, set->tags[j], j); + set->tags[j] = NULL; blk_mq_exit_hctx(q, set, hctx, j); hctxs[j] = NULL; } @@ -3370,8 +3372,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; out_unwind: - while (--i >= 0) - blk_mq_free_map_and_requests(set, i); + while (--i >= 0) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } return -ENOMEM; } @@ -3566,8 +3570,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) return 0; out_free_mq_rq_maps: - for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + for (i = 0; i < set->nr_hw_queues; i++) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); @@ -3599,8 +3605,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; - for (i = 0; i < set->nr_hw_queues; i++) - blk_mq_free_map_and_requests(set, i); + for (i = 0; i < set->nr_hw_queues; i++) { + blk_mq_free_map_and_rqs(set, set->tags[i], i); + set->tags[i] = NULL; + } if (blk_mq_is_sbitmap_shared(set->flags)) blk_mq_exit_shared_sbitmap(set); diff --git a/block/blk-mq.h b/block/blk-mq.h index 83585a344568e..bcb0ca89d37a6 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -57,7 +57,9 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth); - +void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + struct blk_mq_tags *tags, + unsigned int hctx_idx); /* * Internal helpers for request insertion into sw queues */ From e155b0c238b20f0a866f4334d292656665836c8a Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:37 +0800 Subject: [PATCH 035/205] blk-mq: Use shared tags for shared sbitmap support Currently we use separate sbitmap pairs and active_queues atomic_t for shared sbitmap support. However a full sets of static requests are used per HW queue, which is quite wasteful, considering that the total number of requests usable at any given time across all HW queues is limited by the shared sbitmap depth. As such, it is considerably more memory efficient in the case of shared sbitmap to allocate a set of static rqs per tag set or request queue, and not per HW queue. So replace the sbitmap pairs and active_queues atomic_t with a shared tags per tagset and request queue, which will hold a set of shared static rqs. Since there is now no valid HW queue index to be passed to the blk_mq_ops .init and .exit_request callbacks, pass an invalid index token. This changes the semantics of the APIs, such that the callback would need to validate the HW queue index before using it. Currently no user of shared sbitmap actually uses the HW queue index (as would be expected). Signed-off-by: John Garry Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/1633429419-228500-13-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 82 ++++++++++++++++----------------- block/blk-mq-tag.c | 63 ++++++++----------------- block/blk-mq-tag.h | 6 +-- block/blk-mq.c | 101 +++++++++++++++++++++-------------------- block/blk-mq.h | 7 ++- include/linux/blk-mq.h | 15 +++--- include/linux/blkdev.h | 3 +- 7 files changed, 125 insertions(+), 152 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index d1b56bb9ac645..428da4949d803 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -519,6 +519,11 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { + hctx->sched_tags = q->shared_sbitmap_tags; + return 0; + } + hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx, q->nr_requests); @@ -527,61 +532,54 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, return 0; } +static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) +{ + blk_mq_free_rq_map(queue->shared_sbitmap_tags); + queue->shared_sbitmap_tags = NULL; +} + /* called in queue's release handler, tagset has gone away */ -static void blk_mq_sched_tags_teardown(struct request_queue *q) +static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags) { struct blk_mq_hw_ctx *hctx; int i; queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) { - blk_mq_free_rq_map(hctx->sched_tags, hctx->flags); + if (!blk_mq_is_sbitmap_shared(q->tag_set->flags)) + blk_mq_free_rq_map(hctx->sched_tags); hctx->sched_tags = NULL; } } + + if (blk_mq_is_sbitmap_shared(flags)) + blk_mq_exit_sched_shared_sbitmap(q); } static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) { struct blk_mq_tag_set *set = queue->tag_set; - int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); - struct blk_mq_hw_ctx *hctx; - int ret, i; /* * Set initial depth at max so that we don't need to reallocate for * updating nr_requests. */ - ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags, - &queue->sched_breserved_tags, - MAX_SCHED_RQ, set->reserved_tags, - set->numa_node, alloc_policy); - if (ret) - return ret; - - queue_for_each_hw_ctx(queue, hctx, i) { - hctx->sched_tags->bitmap_tags = - &queue->sched_bitmap_tags; - hctx->sched_tags->breserved_tags = - &queue->sched_breserved_tags; - } + queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + BLK_MQ_NO_HCTX_IDX, + MAX_SCHED_RQ); + if (!queue->shared_sbitmap_tags) + return -ENOMEM; blk_mq_tag_update_sched_shared_sbitmap(queue); return 0; } -static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) -{ - sbitmap_queue_free(&queue->sched_bitmap_tags); - sbitmap_queue_free(&queue->sched_breserved_tags); -} - int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) { + unsigned int i, flags = q->tag_set->flags; struct blk_mq_hw_ctx *hctx; struct elevator_queue *eq; - unsigned int i; int ret; if (!e) { @@ -598,21 +596,21 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, BLKDEV_DEFAULT_RQ); - queue_for_each_hw_ctx(q, hctx, i) { - ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); + if (blk_mq_is_sbitmap_shared(flags)) { + ret = blk_mq_init_sched_shared_sbitmap(q); if (ret) - goto err_free_map_and_rqs; + return ret; } - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { - ret = blk_mq_init_sched_shared_sbitmap(q); + queue_for_each_hw_ctx(q, hctx, i) { + ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); if (ret) goto err_free_map_and_rqs; } ret = e->ops.init_sched(q, e); if (ret) - goto err_free_sbitmap; + goto err_free_map_and_rqs; blk_mq_debugfs_register_sched(q); @@ -632,12 +630,10 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) return 0; -err_free_sbitmap: - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) - blk_mq_exit_sched_shared_sbitmap(q); err_free_map_and_rqs: blk_mq_sched_free_rqs(q); - blk_mq_sched_tags_teardown(q); + blk_mq_sched_tags_teardown(q, flags); + q->elevator = NULL; return ret; } @@ -651,9 +647,15 @@ void blk_mq_sched_free_rqs(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - if (hctx->sched_tags) - blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); + if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { + blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags, + BLK_MQ_NO_HCTX_IDX); + } else { + queue_for_each_hw_ctx(q, hctx, i) { + if (hctx->sched_tags) + blk_mq_free_rqs(q->tag_set, + hctx->sched_tags, i); + } } } @@ -674,8 +676,6 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) blk_mq_debugfs_unregister_sched(q); if (e->type->ops.exit_sched) e->type->ops.exit_sched(e); - blk_mq_sched_tags_teardown(q); - if (blk_mq_is_sbitmap_shared(flags)) - blk_mq_exit_sched_shared_sbitmap(q); + blk_mq_sched_tags_teardown(q, flags); q->elevator = NULL; } diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index a0ecc6d88f84a..0e10e8404bf0e 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -26,11 +26,10 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { if (blk_mq_is_sbitmap_shared(hctx->flags)) { struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) - atomic_inc(&set->active_queues_shared_sbitmap); + atomic_inc(&hctx->tags->active_queues); } else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) @@ -57,14 +56,14 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; - struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; if (blk_mq_is_sbitmap_shared(hctx->flags)) { + struct request_queue *q = hctx->queue; + if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; - atomic_dec(&set->active_queues_shared_sbitmap); + atomic_dec(&tags->active_queues); } else { if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; @@ -510,38 +509,10 @@ static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, return 0; } -int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set) -{ - int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags); - int i, ret; - - ret = blk_mq_init_bitmaps(&set->__bitmap_tags, &set->__breserved_tags, - set->queue_depth, set->reserved_tags, - set->numa_node, alloc_policy); - if (ret) - return ret; - - for (i = 0; i < set->nr_hw_queues; i++) { - struct blk_mq_tags *tags = set->tags[i]; - - tags->bitmap_tags = &set->__bitmap_tags; - tags->breserved_tags = &set->__breserved_tags; - } - - return 0; -} - -void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set) -{ - sbitmap_queue_free(&set->__bitmap_tags); - sbitmap_queue_free(&set->__breserved_tags); -} - struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, - int node, unsigned int flags) + int node, int alloc_policy) { - int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags); struct blk_mq_tags *tags; if (total_tags > BLK_MQ_TAG_MAX) { @@ -557,9 +528,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_reserved_tags = reserved_tags; spin_lock_init(&tags->lock); - if (blk_mq_is_sbitmap_shared(flags)) - return tags; - if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { kfree(tags); return NULL; @@ -567,12 +535,10 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, return tags; } -void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags) +void blk_mq_free_tags(struct blk_mq_tags *tags) { - if (!blk_mq_is_sbitmap_shared(flags)) { - sbitmap_queue_free(tags->bitmap_tags); - sbitmap_queue_free(tags->breserved_tags); - } + sbitmap_queue_free(tags->bitmap_tags); + sbitmap_queue_free(tags->breserved_tags); kfree(tags); } @@ -603,6 +569,13 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, if (tdepth > MAX_SCHED_RQ) return -EINVAL; + /* + * Only the sbitmap needs resizing since we allocated the max + * initially. + */ + if (blk_mq_is_sbitmap_shared(set->flags)) + return 0; + new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); if (!new) return -ENOMEM; @@ -623,12 +596,14 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) { - sbitmap_queue_resize(&set->__bitmap_tags, size - set->reserved_tags); + struct blk_mq_tags *tags = set->shared_sbitmap_tags; + + sbitmap_queue_resize(&tags->__bitmap_tags, size - set->reserved_tags); } void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) { - sbitmap_queue_resize(&q->sched_bitmap_tags, + sbitmap_queue_resize(q->shared_sbitmap_tags->bitmap_tags, q->nr_requests - q->tag_set->reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index a9f5f18248198..e43f7589b96c0 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -32,16 +32,14 @@ struct blk_mq_tags { extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, - int node, unsigned int flags); -extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); + int node, int alloc_policy); +extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, struct sbitmap_queue *breserved_tags, unsigned int queue_depth, unsigned int reserved, int node, int alloc_policy); -extern int blk_mq_init_shared_sbitmap(struct blk_mq_tag_set *set); -extern void blk_mq_exit_shared_sbitmap(struct blk_mq_tag_set *set); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); diff --git a/block/blk-mq.c b/block/blk-mq.c index c27591a04c4f0..5537375f6400e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2353,7 +2353,10 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, struct blk_mq_tags *drv_tags; struct page *page; - drv_tags = set->tags[hctx_idx]; + if (blk_mq_is_sbitmap_shared(set->flags)) + drv_tags = set->shared_sbitmap_tags; + else + drv_tags = set->tags[hctx_idx]; if (tags->static_rqs && set->ops->exit_request) { int i; @@ -2382,21 +2385,20 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, } } -void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags) +void blk_mq_free_rq_map(struct blk_mq_tags *tags) { kfree(tags->rqs); tags->rqs = NULL; kfree(tags->static_rqs); tags->static_rqs = NULL; - blk_mq_free_tags(tags, flags); + blk_mq_free_tags(tags); } static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int nr_tags, - unsigned int reserved_tags, - unsigned int flags) + unsigned int reserved_tags) { struct blk_mq_tags *tags; int node; @@ -2405,7 +2407,8 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, if (node == NUMA_NO_NODE) node = set->numa_node; - tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags); + tags = blk_mq_init_tags(nr_tags, reserved_tags, node, + BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); if (!tags) return NULL; @@ -2413,7 +2416,7 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node); if (!tags->rqs) { - blk_mq_free_tags(tags, flags); + blk_mq_free_tags(tags); return NULL; } @@ -2422,7 +2425,7 @@ static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, node); if (!tags->static_rqs) { kfree(tags->rqs); - blk_mq_free_tags(tags, flags); + blk_mq_free_tags(tags); return NULL; } @@ -2864,14 +2867,13 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags; int ret; - tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags, - set->flags); + tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags); if (!tags) return NULL; ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth); if (ret) { - blk_mq_free_rq_map(tags, set->flags); + blk_mq_free_rq_map(tags); return NULL; } @@ -2881,6 +2883,12 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, int hctx_idx) { + if (blk_mq_is_sbitmap_shared(set->flags)) { + set->tags[hctx_idx] = set->shared_sbitmap_tags; + + return true; + } + set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx, set->queue_depth); @@ -2891,14 +2899,21 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx) { - unsigned int flags = set->flags; - if (tags) { blk_mq_free_rqs(set, tags, hctx_idx); - blk_mq_free_rq_map(tags, flags); + blk_mq_free_rq_map(tags); } } +static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, + unsigned int hctx_idx) +{ + if (!blk_mq_is_sbitmap_shared(set->flags)) + blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); + + set->tags[hctx_idx] = NULL; +} + static void blk_mq_map_swqueue(struct request_queue *q) { unsigned int i, j, hctx_idx; @@ -2976,10 +2991,8 @@ static void blk_mq_map_swqueue(struct request_queue *q) * fallback in case of a new remap fails * allocation */ - if (i && set->tags[i]) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; - } + if (i) + __blk_mq_free_map_and_rqs(set, i); hctx->tags = NULL; continue; @@ -3275,8 +3288,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, struct blk_mq_hw_ctx *hctx = hctxs[j]; if (hctx) { - blk_mq_free_map_and_rqs(set, set->tags[j], j); - set->tags[j] = NULL; + __blk_mq_free_map_and_rqs(set, j); blk_mq_exit_hctx(q, set, hctx, j); hctxs[j] = NULL; } @@ -3363,6 +3375,14 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; + if (blk_mq_is_sbitmap_shared(set->flags)) { + set->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + BLK_MQ_NO_HCTX_IDX, + set->queue_depth); + if (!set->shared_sbitmap_tags) + return -ENOMEM; + } + for (i = 0; i < set->nr_hw_queues; i++) { if (!__blk_mq_alloc_map_and_rqs(set, i)) goto out_unwind; @@ -3372,9 +3392,12 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) return 0; out_unwind: - while (--i >= 0) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; + while (--i >= 0) + __blk_mq_free_map_and_rqs(set, i); + + if (blk_mq_is_sbitmap_shared(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + BLK_MQ_NO_HCTX_IDX); } return -ENOMEM; @@ -3555,25 +3578,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) if (ret) goto out_free_mq_map; - if (blk_mq_is_sbitmap_shared(set->flags)) { - atomic_set(&set->active_queues_shared_sbitmap, 0); - - if (blk_mq_init_shared_sbitmap(set)) { - ret = -ENOMEM; - goto out_free_mq_rq_maps; - } - } - mutex_init(&set->tag_list_lock); INIT_LIST_HEAD(&set->tag_list); return 0; -out_free_mq_rq_maps: - for (i = 0; i < set->nr_hw_queues; i++) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; - } out_free_mq_map: for (i = 0; i < set->nr_maps; i++) { kfree(set->map[i].mq_map); @@ -3605,13 +3614,13 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) { int i, j; - for (i = 0; i < set->nr_hw_queues; i++) { - blk_mq_free_map_and_rqs(set, set->tags[i], i); - set->tags[i] = NULL; - } + for (i = 0; i < set->nr_hw_queues; i++) + __blk_mq_free_map_and_rqs(set, i); - if (blk_mq_is_sbitmap_shared(set->flags)) - blk_mq_exit_shared_sbitmap(set); + if (blk_mq_is_sbitmap_shared(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + BLK_MQ_NO_HCTX_IDX); + } for (j = 0; j < set->nr_maps; j++) { kfree(set->map[j].mq_map); @@ -3649,12 +3658,6 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) if (hctx->sched_tags) { ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, nr, true); - if (blk_mq_is_sbitmap_shared(set->flags)) { - hctx->sched_tags->bitmap_tags = - &q->sched_bitmap_tags; - hctx->sched_tags->breserved_tags = - &q->sched_breserved_tags; - } } else { ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, false); diff --git a/block/blk-mq.h b/block/blk-mq.h index bcb0ca89d37a6..8824ae03215a1 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -54,7 +54,7 @@ void blk_mq_put_rq_ref(struct request *rq); */ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, unsigned int hctx_idx); -void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); +void blk_mq_free_rq_map(struct blk_mq_tags *tags); struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx, unsigned int depth); void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, @@ -330,17 +330,16 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, if (blk_mq_is_sbitmap_shared(hctx->flags)) { struct request_queue *q = hctx->queue; - struct blk_mq_tag_set *set = q->tag_set; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return true; - users = atomic_read(&set->active_queues_shared_sbitmap); } else { if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return true; - users = atomic_read(&hctx->tags->active_queues); } + users = atomic_read(&hctx->tags->active_queues); + if (!users) return true; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 31cc41dfef6b4..faa20a19bfcc4 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -440,13 +440,11 @@ enum hctx_type { * @flags: Zero or more BLK_MQ_F_* flags. * @driver_data: Pointer to data owned by the block driver that created this * tag set. - * @active_queues_shared_sbitmap: - * number of active request queues per tag set. - * @__bitmap_tags: A shared tags sbitmap, used over all hctx's - * @__breserved_tags: - * A shared reserved tags sbitmap, used over all hctx's * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues * elements. + * @shared_sbitmap_tags: + * Shared sbitmap set of tags. Has @nr_hw_queues elements. If + * set, shared by all @tags. * @tag_list_lock: Serializes tag_list accesses. * @tag_list: List of the request queues that use this tag set. See also * request_queue.tag_set_list. @@ -463,12 +461,11 @@ struct blk_mq_tag_set { unsigned int timeout; unsigned int flags; void *driver_data; - atomic_t active_queues_shared_sbitmap; - struct sbitmap_queue __bitmap_tags; - struct sbitmap_queue __breserved_tags; struct blk_mq_tags **tags; + struct blk_mq_tags *shared_sbitmap_tags; + struct mutex tag_list_lock; struct list_head tag_list; }; @@ -640,6 +637,8 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +#define BLK_MQ_NO_HCTX_IDX (-1U) + struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, struct lock_class_key *lkclass); #define blk_mq_alloc_disk(set, queuedata) \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0e960d74615ec..cf92c13eb80ed 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -238,8 +238,7 @@ struct request_queue { atomic_t nr_active_requests_shared_sbitmap; - struct sbitmap_queue sched_bitmap_tags; - struct sbitmap_queue sched_breserved_tags; + struct blk_mq_tags *shared_sbitmap_tags; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP From ae0f1a732f4a5db284e2af02c305255734efd19c Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:38 +0800 Subject: [PATCH 036/205] blk-mq: Stop using pointers for blk_mq_tags bitmap tags Now that we use shared tags for shared sbitmap support, we don't require the tags sbitmap pointers, so drop them. This essentially reverts commit 222a5ae03cdd ("blk-mq: Use pointers for blk_mq_tags bitmap tags"). Function blk_mq_init_bitmap_tags() is removed also, since it would be only a wrappper for blk_mq_init_bitmaps(). Reviewed-by: Ming Lei Reviewed-by: Hannes Reinecke Signed-off-by: John Garry Link: https://lore.kernel.org/r/1633429419-228500-14-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/bfq-iosched.c | 4 +-- block/blk-mq-debugfs.c | 8 +++--- block/blk-mq-tag.c | 56 +++++++++++++++--------------------------- block/blk-mq-tag.h | 7 ++---- block/blk-mq.c | 8 +++--- block/kyber-iosched.c | 4 +-- block/mq-deadline.c | 2 +- 7 files changed, 35 insertions(+), 54 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index b5ef23f63d518..fec18118dc309 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -6884,8 +6884,8 @@ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) struct blk_mq_tags *tags = hctx->sched_tags; unsigned int min_shallow; - min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags); - sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow); + min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); } static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3b38d15723de1..3daea160d670e 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -453,11 +453,11 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m, atomic_read(&tags->active_queues)); seq_puts(m, "\nbitmap_tags:\n"); - sbitmap_queue_show(tags->bitmap_tags, m); + sbitmap_queue_show(&tags->bitmap_tags, m); if (tags->nr_reserved_tags) { seq_puts(m, "\nbreserved_tags:\n"); - sbitmap_queue_show(tags->breserved_tags, m); + sbitmap_queue_show(&tags->breserved_tags, m); } } @@ -488,7 +488,7 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m) if (res) goto out; if (hctx->tags) - sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m); + sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); mutex_unlock(&q->sysfs_lock); out: @@ -522,7 +522,7 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) if (res) goto out; if (hctx->sched_tags) - sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m); + sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); mutex_unlock(&q->sysfs_lock); out: diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 0e10e8404bf0e..211068a5f6769 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -44,9 +44,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) */ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { - sbitmap_queue_wake_all(tags->bitmap_tags); + sbitmap_queue_wake_all(&tags->bitmap_tags); if (include_reserve) - sbitmap_queue_wake_all(tags->breserved_tags); + sbitmap_queue_wake_all(&tags->breserved_tags); } /* @@ -100,10 +100,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) WARN_ON_ONCE(1); return BLK_MQ_NO_TAG; } - bt = tags->breserved_tags; + bt = &tags->breserved_tags; tag_offset = 0; } else { - bt = tags->bitmap_tags; + bt = &tags->bitmap_tags; tag_offset = tags->nr_reserved_tags; } @@ -149,9 +149,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) data->ctx); tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) - bt = tags->breserved_tags; + bt = &tags->breserved_tags; else - bt = tags->bitmap_tags; + bt = &tags->bitmap_tags; /* * If destination hw queue is changed, fake wake up on @@ -185,10 +185,10 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, const int real_tag = tag - tags->nr_reserved_tags; BUG_ON(real_tag >= tags->nr_tags); - sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); + sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu); } else { BUG_ON(tag >= tags->nr_reserved_tags); - sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); + sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu); } } @@ -339,9 +339,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); if (tags->nr_reserved_tags) - bt_tags_for_each(tags, tags->breserved_tags, fn, priv, + bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, flags | BT_TAG_ITER_RESERVED); - bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags); + bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags); } /** @@ -458,8 +458,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, continue; if (tags->nr_reserved_tags) - bt_for_each(hctx, tags->breserved_tags, fn, priv, true); - bt_for_each(hctx, tags->bitmap_tags, fn, priv, false); + bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); } blk_queue_exit(q); } @@ -491,24 +491,6 @@ int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, return -ENOMEM; } -static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, - int node, int alloc_policy) -{ - int ret; - - ret = blk_mq_init_bitmaps(&tags->__bitmap_tags, - &tags->__breserved_tags, - tags->nr_tags, tags->nr_reserved_tags, - node, alloc_policy); - if (ret) - return ret; - - tags->bitmap_tags = &tags->__bitmap_tags; - tags->breserved_tags = &tags->__breserved_tags; - - return 0; -} - struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, unsigned int reserved_tags, int node, int alloc_policy) @@ -528,7 +510,9 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, tags->nr_reserved_tags = reserved_tags; spin_lock_init(&tags->lock); - if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) { + if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags, + total_tags, reserved_tags, node, + alloc_policy) < 0) { kfree(tags); return NULL; } @@ -537,8 +521,8 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, void blk_mq_free_tags(struct blk_mq_tags *tags) { - sbitmap_queue_free(tags->bitmap_tags); - sbitmap_queue_free(tags->breserved_tags); + sbitmap_queue_free(&tags->bitmap_tags); + sbitmap_queue_free(&tags->breserved_tags); kfree(tags); } @@ -587,7 +571,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * Don't need (or can't) update reserved tags here, they * remain static and should never need resizing. */ - sbitmap_queue_resize(tags->bitmap_tags, + sbitmap_queue_resize(&tags->bitmap_tags, tdepth - tags->nr_reserved_tags); } @@ -598,12 +582,12 @@ void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int s { struct blk_mq_tags *tags = set->shared_sbitmap_tags; - sbitmap_queue_resize(&tags->__bitmap_tags, size - set->reserved_tags); + sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) { - sbitmap_queue_resize(q->shared_sbitmap_tags->bitmap_tags, + sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags, q->nr_requests - q->tag_set->reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index e43f7589b96c0..1052d69147ba7 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -13,11 +13,8 @@ struct blk_mq_tags { atomic_t active_queues; - struct sbitmap_queue *bitmap_tags; - struct sbitmap_queue *breserved_tags; - - struct sbitmap_queue __bitmap_tags; - struct sbitmap_queue __breserved_tags; + struct sbitmap_queue bitmap_tags; + struct sbitmap_queue breserved_tags; struct request **rqs; struct request **static_rqs; diff --git a/block/blk-mq.c b/block/blk-mq.c index 5537375f6400e..e026e5ebe2c9e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1071,14 +1071,14 @@ static inline unsigned int queued_to_index(unsigned int queued) static bool __blk_mq_get_driver_tag(struct request *rq) { - struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; + struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; int tag; blk_mq_tag_busy(rq->mq_hctx); if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { - bt = rq->mq_hctx->tags->breserved_tags; + bt = &rq->mq_hctx->tags->breserved_tags; tag_offset = 0; } else { if (!hctx_may_queue(rq->mq_hctx, bt)) @@ -1121,7 +1121,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, struct sbitmap_queue *sbq; list_del_init(&wait->entry); - sbq = hctx->tags->bitmap_tags; + sbq = &hctx->tags->bitmap_tags; atomic_dec(&sbq->ws_active); } spin_unlock(&hctx->dispatch_wait_lock); @@ -1139,7 +1139,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct sbitmap_queue *sbq = hctx->tags->bitmap_tags; + struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags; struct wait_queue_head *wq; wait_queue_entry_t *wait; bool ret; diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 53dafc4bcd72f..fdd74a4df56f1 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -453,11 +453,11 @@ static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx) { struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; struct blk_mq_tags *tags = hctx->sched_tags; - unsigned int shift = tags->bitmap_tags->sb.shift; + unsigned int shift = tags->bitmap_tags.sb.shift; kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; - sbitmap_queue_min_shallow_depth(tags->bitmap_tags, kqd->async_depth); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth); } static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 2001ac186f5cd..85d919bf60c78 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -567,7 +567,7 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) dd->async_depth = max(1UL, 3 * q->nr_requests / 4); - sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); + sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth); } /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ From 079a2e3e862548087041a1873bbffceb41a72a33 Mon Sep 17 00:00:00 2001 From: John Garry Date: Tue, 5 Oct 2021 18:23:39 +0800 Subject: [PATCH 037/205] blk-mq: Change shared sbitmap naming to shared tags Now that shared sbitmap support really means shared tags, rename symbols to match that. Signed-off-by: John Garry Link: https://lore.kernel.org/r/1633429419-228500-15-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-mq-sched.c | 32 ++++++++++++++++---------------- block/blk-mq-tag.c | 18 +++++++++--------- block/blk-mq-tag.h | 4 ++-- block/blk-mq.c | 32 ++++++++++++++++---------------- block/blk-mq.h | 16 ++++++++-------- block/elevator.c | 2 +- include/linux/blk-mq.h | 8 ++++---- include/linux/blkdev.h | 4 ++-- 9 files changed, 59 insertions(+), 59 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 0ed17547bfedd..b08aed26e6bb6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -554,7 +554,7 @@ struct request_queue *blk_alloc_queue(int node_id) q->node = node_id; - atomic_set(&q->nr_active_requests_shared_sbitmap, 0); + atomic_set(&q->nr_active_requests_shared_tags, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); INIT_WORK(&q->timeout_work, blk_timeout_work); diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 428da4949d803..27312da7d638a 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -519,8 +519,8 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { - hctx->sched_tags = q->shared_sbitmap_tags; + if (blk_mq_is_shared_tags(q->tag_set->flags)) { + hctx->sched_tags = q->sched_shared_tags; return 0; } @@ -532,10 +532,10 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, return 0; } -static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) +static void blk_mq_exit_sched_shared_tags(struct request_queue *queue) { - blk_mq_free_rq_map(queue->shared_sbitmap_tags); - queue->shared_sbitmap_tags = NULL; + blk_mq_free_rq_map(queue->sched_shared_tags); + queue->sched_shared_tags = NULL; } /* called in queue's release handler, tagset has gone away */ @@ -546,17 +546,17 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int fla queue_for_each_hw_ctx(q, hctx, i) { if (hctx->sched_tags) { - if (!blk_mq_is_sbitmap_shared(q->tag_set->flags)) + if (!blk_mq_is_shared_tags(q->tag_set->flags)) blk_mq_free_rq_map(hctx->sched_tags); hctx->sched_tags = NULL; } } - if (blk_mq_is_sbitmap_shared(flags)) - blk_mq_exit_sched_shared_sbitmap(q); + if (blk_mq_is_shared_tags(flags)) + blk_mq_exit_sched_shared_tags(q); } -static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) +static int blk_mq_init_sched_shared_tags(struct request_queue *queue) { struct blk_mq_tag_set *set = queue->tag_set; @@ -564,13 +564,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) * Set initial depth at max so that we don't need to reallocate for * updating nr_requests. */ - queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, MAX_SCHED_RQ); - if (!queue->shared_sbitmap_tags) + if (!queue->sched_shared_tags) return -ENOMEM; - blk_mq_tag_update_sched_shared_sbitmap(queue); + blk_mq_tag_update_sched_shared_tags(queue); return 0; } @@ -596,8 +596,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, BLKDEV_DEFAULT_RQ); - if (blk_mq_is_sbitmap_shared(flags)) { - ret = blk_mq_init_sched_shared_sbitmap(q); + if (blk_mq_is_shared_tags(flags)) { + ret = blk_mq_init_sched_shared_tags(q); if (ret) return ret; } @@ -647,8 +647,8 @@ void blk_mq_sched_free_rqs(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; - if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { - blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags, + if (blk_mq_is_shared_tags(q->tag_set->flags)) { + blk_mq_free_rqs(q->tag_set, q->sched_shared_tags, BLK_MQ_NO_HCTX_IDX); } else { queue_for_each_hw_ctx(q, hctx, i) { diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 211068a5f6769..72a2724a4eeea 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -24,7 +24,7 @@ */ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && @@ -57,19 +57,19 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) { struct blk_mq_tags *tags = hctx->tags; - if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) return; - atomic_dec(&tags->active_queues); } else { if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) return; - atomic_dec(&tags->active_queues); } + atomic_dec(&tags->active_queues); + blk_mq_tag_wakeup_all(tags, false); } @@ -557,7 +557,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, * Only the sbitmap needs resizing since we allocated the max * initially. */ - if (blk_mq_is_sbitmap_shared(set->flags)) + if (blk_mq_is_shared_tags(set->flags)) return 0; new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); @@ -578,16 +578,16 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, return 0; } -void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) +void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) { - struct blk_mq_tags *tags = set->shared_sbitmap_tags; + struct blk_mq_tags *tags = set->shared_tags; sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); } -void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) +void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) { - sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags, + sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags, q->nr_requests - q->tag_set->reserved_tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 1052d69147ba7..d8ce89fa1686c 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -43,9 +43,9 @@ extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); -extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, +extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size); -extern void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q); +extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, diff --git a/block/blk-mq.c b/block/blk-mq.c index e026e5ebe2c9e..f7428e11b109f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2235,7 +2235,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) blk_insert_flush(rq); blk_mq_run_hw_queue(data.hctx, true); } else if (plug && (q->nr_hw_queues == 1 || - blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) || + blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { /* * Use plugging if we have a ->commit_rqs() hook as well, as @@ -2353,8 +2353,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, struct blk_mq_tags *drv_tags; struct page *page; - if (blk_mq_is_sbitmap_shared(set->flags)) - drv_tags = set->shared_sbitmap_tags; + if (blk_mq_is_shared_tags(set->flags)) + drv_tags = set->shared_tags; else drv_tags = set->tags[hctx_idx]; @@ -2883,8 +2883,8 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, int hctx_idx) { - if (blk_mq_is_sbitmap_shared(set->flags)) { - set->tags[hctx_idx] = set->shared_sbitmap_tags; + if (blk_mq_is_shared_tags(set->flags)) { + set->tags[hctx_idx] = set->shared_tags; return true; } @@ -2908,7 +2908,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, unsigned int hctx_idx) { - if (!blk_mq_is_sbitmap_shared(set->flags)) + if (!blk_mq_is_shared_tags(set->flags)) blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); set->tags[hctx_idx] = NULL; @@ -3375,11 +3375,11 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) { int i; - if (blk_mq_is_sbitmap_shared(set->flags)) { - set->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, + if (blk_mq_is_shared_tags(set->flags)) { + set->shared_tags = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX, set->queue_depth); - if (!set->shared_sbitmap_tags) + if (!set->shared_tags) return -ENOMEM; } @@ -3395,8 +3395,8 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) while (--i >= 0) __blk_mq_free_map_and_rqs(set, i); - if (blk_mq_is_sbitmap_shared(set->flags)) { - blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + if (blk_mq_is_shared_tags(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } @@ -3617,8 +3617,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) for (i = 0; i < set->nr_hw_queues; i++) __blk_mq_free_map_and_rqs(set, i); - if (blk_mq_is_sbitmap_shared(set->flags)) { - blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, + if (blk_mq_is_shared_tags(set->flags)) { + blk_mq_free_map_and_rqs(set, set->shared_tags, BLK_MQ_NO_HCTX_IDX); } @@ -3669,11 +3669,11 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) } if (!ret) { q->nr_requests = nr; - if (blk_mq_is_sbitmap_shared(set->flags)) { + if (blk_mq_is_shared_tags(set->flags)) { if (q->elevator) - blk_mq_tag_update_sched_shared_sbitmap(q); + blk_mq_tag_update_sched_shared_tags(q); else - blk_mq_tag_resize_shared_sbitmap(set, nr); + blk_mq_tag_resize_shared_tags(set, nr); } } diff --git a/block/blk-mq.h b/block/blk-mq.h index 8824ae03215a1..171e8cdcff543 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -157,7 +157,7 @@ struct blk_mq_alloc_data { struct blk_mq_hw_ctx *hctx; }; -static inline bool blk_mq_is_sbitmap_shared(unsigned int flags) +static inline bool blk_mq_is_shared_tags(unsigned int flags) { return flags & BLK_MQ_F_TAG_HCTX_SHARED; } @@ -217,24 +217,24 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq) static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) - atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); + if (blk_mq_is_shared_tags(hctx->flags)) + atomic_inc(&hctx->queue->nr_active_requests_shared_tags); else atomic_inc(&hctx->nr_active); } static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) - atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); + if (blk_mq_is_shared_tags(hctx->flags)) + atomic_dec(&hctx->queue->nr_active_requests_shared_tags); else atomic_dec(&hctx->nr_active); } static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) { - if (blk_mq_is_sbitmap_shared(hctx->flags)) - return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); + if (blk_mq_is_shared_tags(hctx->flags)) + return atomic_read(&hctx->queue->nr_active_requests_shared_tags); return atomic_read(&hctx->nr_active); } static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, @@ -328,7 +328,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, if (bt->sb.depth == 1) return true; - if (blk_mq_is_sbitmap_shared(hctx->flags)) { + if (blk_mq_is_shared_tags(hctx->flags)) { struct request_queue *q = hctx->queue; if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) diff --git a/block/elevator.c b/block/elevator.c index 57be09cd7f6dd..1f39f6e8ebb96 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -637,7 +637,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) return NULL; if (q->nr_hw_queues != 1 && - !blk_mq_is_sbitmap_shared(q->tag_set->flags)) + !blk_mq_is_shared_tags(q->tag_set->flags)) return NULL; return elevator_get(q, "mq-deadline", false); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index faa20a19bfcc4..75d75657df212 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -442,9 +442,9 @@ enum hctx_type { * tag set. * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues * elements. - * @shared_sbitmap_tags: - * Shared sbitmap set of tags. Has @nr_hw_queues elements. If - * set, shared by all @tags. + * @shared_tags: + * Shared set of tags. Has @nr_hw_queues elements. If set, + * shared by all @tags. * @tag_list_lock: Serializes tag_list accesses. * @tag_list: List of the request queues that use this tag set. See also * request_queue.tag_set_list. @@ -464,7 +464,7 @@ struct blk_mq_tag_set { struct blk_mq_tags **tags; - struct blk_mq_tags *shared_sbitmap_tags; + struct blk_mq_tags *shared_tags; struct mutex tag_list_lock; struct list_head tag_list; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cf92c13eb80ed..b19172db7eef3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -236,9 +236,9 @@ struct request_queue { struct timer_list timeout; struct work_struct timeout_work; - atomic_t nr_active_requests_shared_sbitmap; + atomic_t nr_active_requests_shared_tags; - struct blk_mq_tags *shared_sbitmap_tags; + struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; #ifdef CONFIG_BLK_CGROUP From a7b36ee6ba299ffa5c3b36af187b4d0fb32a557c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 5 Oct 2021 09:11:56 -0600 Subject: [PATCH 038/205] block: move blk-throtl fast path inline Even if no policies are defined, we spend ~2% of the total IO time checking. Move the fast path inline. Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 1 + block/blk-core.c | 1 + block/blk-merge.c | 1 + block/blk-sysfs.c | 1 + block/blk-throttle.c | 161 +------------------------------------- block/blk-throttle.h | 182 +++++++++++++++++++++++++++++++++++++++++++ block/blk.h | 16 ---- 7 files changed, 189 insertions(+), 174 deletions(-) create mode 100644 block/blk-throttle.h diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9a1c5839dd469..166104d92b5ec 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -32,6 +32,7 @@ #include #include "blk.h" #include "blk-ioprio.h" +#include "blk-throttle.h" /* * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. diff --git a/block/blk-core.c b/block/blk-core.c index b08aed26e6bb6..3b5ee3f7cc1e7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -50,6 +50,7 @@ #include "blk-mq.h" #include "blk-mq-sched.h" #include "blk-pm.h" +#include "blk-throttle.h" struct dentry *blk_debugfs_root; diff --git a/block/blk-merge.c b/block/blk-merge.c index 5b4f23014df8a..bf7aedaad8f8e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -13,6 +13,7 @@ #include "blk.h" #include "blk-rq-qos.h" +#include "blk-throttle.h" static inline bool bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 614d9d47de36b..da883efcba33a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -17,6 +17,7 @@ #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-wbt.h" +#include "blk-throttle.h" struct queue_sysfs_entry { struct attribute attr; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 7c4e7993ba970..8cefd14deed5a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -13,6 +13,7 @@ #include #include "blk.h" #include "blk-cgroup-rwstat.h" +#include "blk-throttle.h" /* Max dispatch from a group in 1 round */ #define THROTL_GRP_QUANTUM 8 @@ -37,60 +38,9 @@ */ #define LATENCY_FILTERED_HD (1000L) /* 1ms */ -static struct blkcg_policy blkcg_policy_throtl; - /* A workqueue to queue throttle related work */ static struct workqueue_struct *kthrotld_workqueue; -/* - * To implement hierarchical throttling, throtl_grps form a tree and bios - * are dispatched upwards level by level until they reach the top and get - * issued. When dispatching bios from the children and local group at each - * level, if the bios are dispatched into a single bio_list, there's a risk - * of a local or child group which can queue many bios at once filling up - * the list starving others. - * - * To avoid such starvation, dispatched bios are queued separately - * according to where they came from. When they are again dispatched to - * the parent, they're popped in round-robin order so that no single source - * hogs the dispatch window. - * - * throtl_qnode is used to keep the queued bios separated by their sources. - * Bios are queued to throtl_qnode which in turn is queued to - * throtl_service_queue and then dispatched in round-robin order. - * - * It's also used to track the reference counts on blkg's. A qnode always - * belongs to a throtl_grp and gets queued on itself or the parent, so - * incrementing the reference of the associated throtl_grp when a qnode is - * queued and decrementing when dequeued is enough to keep the whole blkg - * tree pinned while bios are in flight. - */ -struct throtl_qnode { - struct list_head node; /* service_queue->queued[] */ - struct bio_list bios; /* queued bios */ - struct throtl_grp *tg; /* tg this qnode belongs to */ -}; - -struct throtl_service_queue { - struct throtl_service_queue *parent_sq; /* the parent service_queue */ - - /* - * Bios queued directly to this service_queue or dispatched from - * children throtl_grp's. - */ - struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ - unsigned int nr_queued[2]; /* number of queued bios */ - - /* - * RB tree of active children throtl_grp's, which are sorted by - * their ->disptime. - */ - struct rb_root_cached pending_tree; /* RB tree of active tgs */ - unsigned int nr_pending; /* # queued in the tree */ - unsigned long first_pending_disptime; /* disptime of the first tg */ - struct timer_list pending_timer; /* fires on first_pending_disptime */ -}; - enum tg_state_flags { THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ @@ -98,93 +48,6 @@ enum tg_state_flags { #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) -enum { - LIMIT_LOW, - LIMIT_MAX, - LIMIT_CNT, -}; - -struct throtl_grp { - /* must be the first member */ - struct blkg_policy_data pd; - - /* active throtl group service_queue member */ - struct rb_node rb_node; - - /* throtl_data this group belongs to */ - struct throtl_data *td; - - /* this group's service queue */ - struct throtl_service_queue service_queue; - - /* - * qnode_on_self is used when bios are directly queued to this - * throtl_grp so that local bios compete fairly with bios - * dispatched from children. qnode_on_parent is used when bios are - * dispatched from this throtl_grp into its parent and will compete - * with the sibling qnode_on_parents and the parent's - * qnode_on_self. - */ - struct throtl_qnode qnode_on_self[2]; - struct throtl_qnode qnode_on_parent[2]; - - /* - * Dispatch time in jiffies. This is the estimated time when group - * will unthrottle and is ready to dispatch more bio. It is used as - * key to sort active groups in service tree. - */ - unsigned long disptime; - - unsigned int flags; - - /* are there any throtl rules between this group and td? */ - bool has_rules[2]; - - /* internally used bytes per second rate limits */ - uint64_t bps[2][LIMIT_CNT]; - /* user configured bps limits */ - uint64_t bps_conf[2][LIMIT_CNT]; - - /* internally used IOPS limits */ - unsigned int iops[2][LIMIT_CNT]; - /* user configured IOPS limits */ - unsigned int iops_conf[2][LIMIT_CNT]; - - /* Number of bytes dispatched in current slice */ - uint64_t bytes_disp[2]; - /* Number of bio's dispatched in current slice */ - unsigned int io_disp[2]; - - unsigned long last_low_overflow_time[2]; - - uint64_t last_bytes_disp[2]; - unsigned int last_io_disp[2]; - - unsigned long last_check_time; - - unsigned long latency_target; /* us */ - unsigned long latency_target_conf; /* us */ - /* When did we start a new slice */ - unsigned long slice_start[2]; - unsigned long slice_end[2]; - - unsigned long last_finish_time; /* ns / 1024 */ - unsigned long checked_last_finish_time; /* ns / 1024 */ - unsigned long avg_idletime; /* ns / 1024 */ - unsigned long idletime_threshold; /* us */ - unsigned long idletime_threshold_conf; /* us */ - - unsigned int bio_cnt; /* total bios */ - unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ - unsigned long bio_cnt_reset_time; - - atomic_t io_split_cnt[2]; - atomic_t last_io_split_cnt[2]; - - struct blkg_rwstat stat_bytes; - struct blkg_rwstat stat_ios; -}; - /* We measure latency for request size from <= 4k to >= 1M */ #define LATENCY_BUCKET_SIZE 9 @@ -231,16 +94,6 @@ struct throtl_data static void throtl_pending_timer_fn(struct timer_list *t); -static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) -{ - return pd ? container_of(pd, struct throtl_grp, pd) : NULL; -} - -static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) -{ - return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); -} - static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) { return pd_to_blkg(&tg->pd); @@ -1794,7 +1647,7 @@ static void throtl_shutdown_wq(struct request_queue *q) cancel_work_sync(&td->dispatch_work); } -static struct blkcg_policy blkcg_policy_throtl = { +struct blkcg_policy blkcg_policy_throtl = { .dfl_cftypes = throtl_files, .legacy_cftypes = throtl_legacy_files, @@ -2208,7 +2061,7 @@ void blk_throtl_charge_bio_split(struct bio *bio) } while (parent); } -bool blk_throtl_bio(struct bio *bio) +bool __blk_throtl_bio(struct bio *bio) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; struct blkcg_gq *blkg = bio->bi_blkg; @@ -2221,19 +2074,12 @@ bool blk_throtl_bio(struct bio *bio) rcu_read_lock(); - /* see throtl_charge_bio() */ - if (bio_flagged(bio, BIO_THROTTLED)) - goto out; - if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) { blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, bio->bi_iter.bi_size); blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); } - if (!tg->has_rules[rw]) - goto out; - spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2317,7 +2163,6 @@ bool blk_throtl_bio(struct bio *bio) out_unlock: spin_unlock_irq(&q->queue_lock); -out: bio_set_flag(bio, BIO_THROTTLED); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW diff --git a/block/blk-throttle.h b/block/blk-throttle.h new file mode 100644 index 0000000000000..175f03abd9e41 --- /dev/null +++ b/block/blk-throttle.h @@ -0,0 +1,182 @@ +#ifndef BLK_THROTTLE_H +#define BLK_THROTTLE_H + +#include "blk-cgroup-rwstat.h" + +/* + * To implement hierarchical throttling, throtl_grps form a tree and bios + * are dispatched upwards level by level until they reach the top and get + * issued. When dispatching bios from the children and local group at each + * level, if the bios are dispatched into a single bio_list, there's a risk + * of a local or child group which can queue many bios at once filling up + * the list starving others. + * + * To avoid such starvation, dispatched bios are queued separately + * according to where they came from. When they are again dispatched to + * the parent, they're popped in round-robin order so that no single source + * hogs the dispatch window. + * + * throtl_qnode is used to keep the queued bios separated by their sources. + * Bios are queued to throtl_qnode which in turn is queued to + * throtl_service_queue and then dispatched in round-robin order. + * + * It's also used to track the reference counts on blkg's. A qnode always + * belongs to a throtl_grp and gets queued on itself or the parent, so + * incrementing the reference of the associated throtl_grp when a qnode is + * queued and decrementing when dequeued is enough to keep the whole blkg + * tree pinned while bios are in flight. + */ +struct throtl_qnode { + struct list_head node; /* service_queue->queued[] */ + struct bio_list bios; /* queued bios */ + struct throtl_grp *tg; /* tg this qnode belongs to */ +}; + +struct throtl_service_queue { + struct throtl_service_queue *parent_sq; /* the parent service_queue */ + + /* + * Bios queued directly to this service_queue or dispatched from + * children throtl_grp's. + */ + struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ + unsigned int nr_queued[2]; /* number of queued bios */ + + /* + * RB tree of active children throtl_grp's, which are sorted by + * their ->disptime. + */ + struct rb_root_cached pending_tree; /* RB tree of active tgs */ + unsigned int nr_pending; /* # queued in the tree */ + unsigned long first_pending_disptime; /* disptime of the first tg */ + struct timer_list pending_timer; /* fires on first_pending_disptime */ +}; + +enum { + LIMIT_LOW, + LIMIT_MAX, + LIMIT_CNT, +}; + +struct throtl_grp { + /* must be the first member */ + struct blkg_policy_data pd; + + /* active throtl group service_queue member */ + struct rb_node rb_node; + + /* throtl_data this group belongs to */ + struct throtl_data *td; + + /* this group's service queue */ + struct throtl_service_queue service_queue; + + /* + * qnode_on_self is used when bios are directly queued to this + * throtl_grp so that local bios compete fairly with bios + * dispatched from children. qnode_on_parent is used when bios are + * dispatched from this throtl_grp into its parent and will compete + * with the sibling qnode_on_parents and the parent's + * qnode_on_self. + */ + struct throtl_qnode qnode_on_self[2]; + struct throtl_qnode qnode_on_parent[2]; + + /* + * Dispatch time in jiffies. This is the estimated time when group + * will unthrottle and is ready to dispatch more bio. It is used as + * key to sort active groups in service tree. + */ + unsigned long disptime; + + unsigned int flags; + + /* are there any throtl rules between this group and td? */ + bool has_rules[2]; + + /* internally used bytes per second rate limits */ + uint64_t bps[2][LIMIT_CNT]; + /* user configured bps limits */ + uint64_t bps_conf[2][LIMIT_CNT]; + + /* internally used IOPS limits */ + unsigned int iops[2][LIMIT_CNT]; + /* user configured IOPS limits */ + unsigned int iops_conf[2][LIMIT_CNT]; + + /* Number of bytes dispatched in current slice */ + uint64_t bytes_disp[2]; + /* Number of bio's dispatched in current slice */ + unsigned int io_disp[2]; + + unsigned long last_low_overflow_time[2]; + + uint64_t last_bytes_disp[2]; + unsigned int last_io_disp[2]; + + unsigned long last_check_time; + + unsigned long latency_target; /* us */ + unsigned long latency_target_conf; /* us */ + /* When did we start a new slice */ + unsigned long slice_start[2]; + unsigned long slice_end[2]; + + unsigned long last_finish_time; /* ns / 1024 */ + unsigned long checked_last_finish_time; /* ns / 1024 */ + unsigned long avg_idletime; /* ns / 1024 */ + unsigned long idletime_threshold; /* us */ + unsigned long idletime_threshold_conf; /* us */ + + unsigned int bio_cnt; /* total bios */ + unsigned int bad_bio_cnt; /* bios exceeding latency threshold */ + unsigned long bio_cnt_reset_time; + + atomic_t io_split_cnt[2]; + atomic_t last_io_split_cnt[2]; + + struct blkg_rwstat stat_bytes; + struct blkg_rwstat stat_ios; +}; + +extern struct blkcg_policy blkcg_policy_throtl; + +static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd) +{ + return pd ? container_of(pd, struct throtl_grp, pd) : NULL; +} + +static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) +{ + return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl)); +} + +/* + * Internal throttling interface + */ +#ifndef CONFIG_BLK_DEV_THROTTLING +static inline int blk_throtl_init(struct request_queue *q) { return 0; } +static inline void blk_throtl_exit(struct request_queue *q) { } +static inline void blk_throtl_register_queue(struct request_queue *q) { } +static inline void blk_throtl_charge_bio_split(struct bio *bio) { } +static inline bool blk_throtl_bio(struct bio *bio) { return false; } +#else /* CONFIG_BLK_DEV_THROTTLING */ +int blk_throtl_init(struct request_queue *q); +void blk_throtl_exit(struct request_queue *q); +void blk_throtl_register_queue(struct request_queue *q); +void blk_throtl_charge_bio_split(struct bio *bio); +bool __blk_throtl_bio(struct bio *bio); +static inline bool blk_throtl_bio(struct bio *bio) +{ + struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg); + + if (bio_flagged(bio, BIO_THROTTLED)) + return false; + if (!tg->has_rules[bio_data_dir(bio)]) + return false; + + return __blk_throtl_bio(bio); +} +#endif /* CONFIG_BLK_DEV_THROTTLING */ + +#endif diff --git a/block/blk.h b/block/blk.h index 8cdee4844d8a3..7aaccbc26db48 100644 --- a/block/blk.h +++ b/block/blk.h @@ -325,22 +325,6 @@ void ioc_clear_queue(struct request_queue *q); int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); -/* - * Internal throttling interface - */ -#ifdef CONFIG_BLK_DEV_THROTTLING -extern int blk_throtl_init(struct request_queue *q); -extern void blk_throtl_exit(struct request_queue *q); -extern void blk_throtl_register_queue(struct request_queue *q); -extern void blk_throtl_charge_bio_split(struct bio *bio); -bool blk_throtl_bio(struct bio *bio); -#else /* CONFIG_BLK_DEV_THROTTLING */ -static inline int blk_throtl_init(struct request_queue *q) { return 0; } -static inline void blk_throtl_exit(struct request_queue *q) { } -static inline void blk_throtl_register_queue(struct request_queue *q) { } -static inline void blk_throtl_charge_bio_split(struct bio *bio) { } -static inline bool blk_throtl_bio(struct bio *bio) { return false; } -#endif /* CONFIG_BLK_DEV_THROTTLING */ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, From 0006707723233cb2a9a23ca19fc3d0864835704c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 5 Oct 2021 09:23:59 -0600 Subject: [PATCH 039/205] block: inherit request start time from bio for BLK_CGROUP Doing high IOPS testing with blk-cgroups enabled spends ~15-20% of the time just doing ktime_get_ns() -> readtsc. We essentially read and set the start time twice, one for the bio and then again when that bio is mapped to a request. Given that the time between the two is very short, inherit the bio start time instead of reading it again. This cuts 1/3rd of the overhead of the time keeping. Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- block/blk-mq.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index f7428e11b109f..8e41d88fcb8a2 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -725,7 +725,14 @@ void blk_mq_start_request(struct request *rq) trace_block_rq_issue(rq); if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { - rq->io_start_time_ns = ktime_get_ns(); + u64 start_time; +#ifdef CONFIG_BLK_CGROUP + if (rq->bio) + start_time = bio_issue_time(&rq->bio->bi_issue); + else +#endif + start_time = ktime_get_ns(); + rq->io_start_time_ns = start_time; rq->stats_sectors = blk_rq_sectors(rq); rq->rq_flags |= RQF_STATS; rq_qos_issue(q, rq); From ba0ffdd8ce48ad7f7e85191cd29f9674caca3745 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 12:01:07 -0600 Subject: [PATCH 040/205] block: bump max plugged deferred size from 16 to 32 Particularly for NVMe with efficient deferred submission for many requests, there are nice benefits to be seen by bumping the default max plug count from 16 to 32. This is especially true for virtualized setups, where the submit part is more expensive. But can be noticed even on native hardware. Reduce the multiple queue factor from 4 to 2, since we're changing the default size. While changing it, move the defines into the block layer private header. These aren't values that anyone outside of the block layer uses, or should use. Signed-off-by: Jens Axboe --- block/blk-mq.c | 4 ++-- block/blk.h | 6 ++++++ include/linux/blkdev.h | 2 -- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 8e41d88fcb8a2..d861a969b2e0a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2152,14 +2152,14 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) } /* - * Allow 4x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple + * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple * queues. This is important for md arrays to benefit from merging * requests. */ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) { if (plug->multiple_queues) - return BLK_MAX_REQUEST_COUNT * 4; + return BLK_MAX_REQUEST_COUNT * 2; return BLK_MAX_REQUEST_COUNT; } diff --git a/block/blk.h b/block/blk.h index 7aaccbc26db48..019c50a140bab 100644 --- a/block/blk.h +++ b/block/blk.h @@ -224,6 +224,12 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, void blk_account_io_start(struct request *req); void blk_account_io_done(struct request *req, u64 now); +/* + * Plug flush limits + */ +#define BLK_MAX_REQUEST_COUNT 32 +#define BLK_PLUG_FLUSH_SIZE (128 * 1024) + /* * Internal elevator interface */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b19172db7eef3..472b4ab007c6b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -727,8 +727,6 @@ struct blk_plug { bool multiple_queues; bool nowait; }; -#define BLK_MAX_REQUEST_COUNT 16 -#define BLK_PLUG_FLUSH_SIZE (128 * 1024) struct blk_plug_cb; typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool); From 47c122e35d7e43b14129ceb9ed3a7e67599978fa Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 06:34:11 -0600 Subject: [PATCH 041/205] block: pre-allocate requests if plug is started and is a batch The caller typically has a good (or even exact) idea of how many requests it needs to submit. We can make the request/tag allocation a lot more efficient if we just allocate N requests/tags upfront when we queue the first bio from the batch. Provide a new plug start helper that allows the caller to specify how many IOs are expected. This sets plug->nr_ios, and we can use that for smarter request allocation. The plug provides a holding spot for requests, and request allocation will check it before calling into the normal request allocation path. The blk_finish_plug() is called, check if there are unused requests and free them. This should not happen in normal operations. The exception is if we get merging, then we may be left with requests that need freeing when done. This raises the per-core performance on my setup from ~5.8M to ~6.1M IOPS. Signed-off-by: Jens Axboe --- block/blk-core.c | 47 ++++++++++++++++------------ block/blk-mq.c | 70 ++++++++++++++++++++++++++++++++++-------- block/blk-mq.h | 5 +++ include/linux/blk-mq.h | 5 ++- include/linux/blkdev.h | 15 ++++++++- 5 files changed, 109 insertions(+), 33 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 3b5ee3f7cc1e7..e25a1c3f8b760 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1632,6 +1632,31 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, } EXPORT_SYMBOL(kblockd_mod_delayed_work_on); +void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios) +{ + struct task_struct *tsk = current; + + /* + * If this is a nested plug, don't actually assign it. + */ + if (tsk->plug) + return; + + INIT_LIST_HEAD(&plug->mq_list); + plug->cached_rq = NULL; + plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT); + plug->rq_count = 0; + plug->multiple_queues = false; + plug->nowait = false; + INIT_LIST_HEAD(&plug->cb_list); + + /* + * Store ordering should not be needed here, since a potential + * preempt will imply a full memory barrier + */ + tsk->plug = plug; +} + /** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized @@ -1657,25 +1682,7 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on); */ void blk_start_plug(struct blk_plug *plug) { - struct task_struct *tsk = current; - - /* - * If this is a nested plug, don't actually assign it. - */ - if (tsk->plug) - return; - - INIT_LIST_HEAD(&plug->mq_list); - INIT_LIST_HEAD(&plug->cb_list); - plug->rq_count = 0; - plug->multiple_queues = false; - plug->nowait = false; - - /* - * Store ordering should not be needed here, since a potential - * preempt will imply a full memory barrier - */ - tsk->plug = plug; + blk_start_plug_nr_ios(plug, 1); } EXPORT_SYMBOL(blk_start_plug); @@ -1727,6 +1734,8 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) if (!list_empty(&plug->mq_list)) blk_mq_flush_plug_list(plug, from_schedule); + if (unlikely(!from_schedule && plug->cached_rq)) + blk_mq_free_plug_rqs(plug); } /** diff --git a/block/blk-mq.c b/block/blk-mq.c index d861a969b2e0a..d9f14d3c2b8cf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -359,6 +359,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) struct request_queue *q = data->q; struct elevator_queue *e = q->elevator; u64 alloc_time_ns = 0; + struct request *rq; unsigned int tag; /* alloc_time includes depth and tag waits */ @@ -392,10 +393,21 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) * case just retry the hctx assignment and tag allocation as CPU hotplug * should have migrated us to an online CPU by now. */ - tag = blk_mq_get_tag(data); - if (tag == BLK_MQ_NO_TAG) { + do { + tag = blk_mq_get_tag(data); + if (tag != BLK_MQ_NO_TAG) { + rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + if (!--data->nr_tags) + return rq; + if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + return rq; + rq->rq_next = *data->cached_rq; + *data->cached_rq = rq; + data->flags |= BLK_MQ_REQ_NOWAIT; + continue; + } if (data->flags & BLK_MQ_REQ_NOWAIT) - return NULL; + break; /* * Give up the CPU and sleep for a random short time to ensure @@ -404,8 +416,15 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) */ msleep(3); goto retry; + } while (1); + + if (data->cached_rq) { + rq = *data->cached_rq; + *data->cached_rq = rq->rq_next; + return rq; } - return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + + return NULL; } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, @@ -415,6 +434,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, .q = q, .flags = flags, .cmd_flags = op, + .nr_tags = 1, }; struct request *rq; int ret; @@ -443,6 +463,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, .q = q, .flags = flags, .cmd_flags = op, + .nr_tags = 1, }; u64 alloc_time_ns = 0; unsigned int cpu; @@ -544,6 +565,18 @@ void blk_mq_free_request(struct request *rq) } EXPORT_SYMBOL_GPL(blk_mq_free_request); +void blk_mq_free_plug_rqs(struct blk_plug *plug) +{ + while (plug->cached_rq) { + struct request *rq; + + rq = plug->cached_rq; + plug->cached_rq = rq->rq_next; + percpu_ref_get(&rq->q->q_usage_counter); + blk_mq_free_request(rq); + } +} + inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { u64 now = 0; @@ -2185,6 +2218,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) const int is_flush_fua = op_is_flush(bio->bi_opf); struct blk_mq_alloc_data data = { .q = q, + .nr_tags = 1, }; struct request *rq; struct blk_plug *plug; @@ -2211,13 +2245,26 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) hipri = bio->bi_opf & REQ_HIPRI; - data.cmd_flags = bio->bi_opf; - rq = __blk_mq_alloc_request(&data); - if (unlikely(!rq)) { - rq_qos_cleanup(q, bio); - if (bio->bi_opf & REQ_NOWAIT) - bio_wouldblock_error(bio); - goto queue_exit; + plug = blk_mq_plug(q, bio); + if (plug && plug->cached_rq) { + rq = plug->cached_rq; + plug->cached_rq = rq->rq_next; + INIT_LIST_HEAD(&rq->queuelist); + data.hctx = rq->mq_hctx; + } else { + data.cmd_flags = bio->bi_opf; + if (plug) { + data.nr_tags = plug->nr_ios; + plug->nr_ios = 1; + data.cached_rq = &plug->cached_rq; + } + rq = __blk_mq_alloc_request(&data); + if (unlikely(!rq)) { + rq_qos_cleanup(q, bio); + if (bio->bi_opf & REQ_NOWAIT) + bio_wouldblock_error(bio); + goto queue_exit; + } } trace_block_getrq(bio); @@ -2236,7 +2283,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) return BLK_QC_T_NONE; } - plug = blk_mq_plug(q, bio); if (unlikely(is_flush_fua)) { /* Bypass scheduler for flush requests */ blk_insert_flush(rq); diff --git a/block/blk-mq.h b/block/blk-mq.h index 171e8cdcff543..5da970bb88659 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -125,6 +125,7 @@ extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); extern int blk_mq_sysfs_register(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); +void blk_mq_free_plug_rqs(struct blk_plug *plug); void blk_mq_release(struct request_queue *q); @@ -152,6 +153,10 @@ struct blk_mq_alloc_data { unsigned int shallow_depth; unsigned int cmd_flags; + /* allocate multiple requests/tags in one go */ + unsigned int nr_tags; + struct request **cached_rq; + /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 75d75657df212..0e941f2175784 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -90,7 +90,10 @@ struct request { struct bio *bio; struct bio *biotail; - struct list_head queuelist; + union { + struct list_head queuelist; + struct request *rq_next; + }; /* * The hash is used inside the scheduler, and killed once the diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 472b4ab007c6b..17705c970d7e1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -722,10 +722,17 @@ extern void blk_set_queue_dying(struct request_queue *); */ struct blk_plug { struct list_head mq_list; /* blk-mq requests */ - struct list_head cb_list; /* md requires an unplug callback */ + + /* if ios_left is > 1, we can batch tag/rq allocations */ + struct request *cached_rq; + unsigned short nr_ios; + unsigned short rq_count; + bool multiple_queues; bool nowait; + + struct list_head cb_list; /* md requires an unplug callback */ }; struct blk_plug_cb; @@ -738,6 +745,7 @@ struct blk_plug_cb { extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data, int size); extern void blk_start_plug(struct blk_plug *); +extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short); extern void blk_finish_plug(struct blk_plug *); extern void blk_flush_plug_list(struct blk_plug *, bool); @@ -772,6 +780,11 @@ long nr_blockdev_pages(void); struct blk_plug { }; +static inline void blk_start_plug_nr_ios(struct blk_plug *plug, + unsigned short nr_ios) +{ +} + static inline void blk_start_plug(struct blk_plug *plug) { } From b90cfaed3789ecdc5580027fc91e3056bc6b3216 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:40:44 +0200 Subject: [PATCH 042/205] blk-mq: cleanup and rename __blk_mq_alloc_request The newly added loop for the cached requests in __blk_mq_alloc_request is a little too convoluted for my taste, so unwind it a bit. Also rename the function to __blk_mq_alloc_requests now that it can allocate more than a single request. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104045.658051-2-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 56 +++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index d9f14d3c2b8cf..98a5d0850b959 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -354,7 +354,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, return rq; } -static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) +static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; struct elevator_queue *e = q->elevator; @@ -395,36 +395,36 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) */ do { tag = blk_mq_get_tag(data); - if (tag != BLK_MQ_NO_TAG) { - rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); - if (!--data->nr_tags) - return rq; - if (e || data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) - return rq; - rq->rq_next = *data->cached_rq; - *data->cached_rq = rq; - data->flags |= BLK_MQ_REQ_NOWAIT; - continue; + if (tag == BLK_MQ_NO_TAG) { + if (data->flags & BLK_MQ_REQ_NOWAIT) + break; + /* + * Give up the CPU and sleep for a random short time to + * ensure that thread using a realtime scheduling class + * are migrated off the CPU, and thus off the hctx that + * is going away. + */ + msleep(3); + goto retry; } - if (data->flags & BLK_MQ_REQ_NOWAIT) - break; - /* - * Give up the CPU and sleep for a random short time to ensure - * that thread using a realtime scheduling class are migrated - * off the CPU, and thus off the hctx that is going away. - */ - msleep(3); - goto retry; + rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + if (!--data->nr_tags || e || + (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) + return rq; + + /* link into the cached list */ + rq->rq_next = *data->cached_rq; + *data->cached_rq = rq; + data->flags |= BLK_MQ_REQ_NOWAIT; } while (1); - if (data->cached_rq) { - rq = *data->cached_rq; - *data->cached_rq = rq->rq_next; - return rq; - } + if (!data->cached_rq) + return NULL; - return NULL; + rq = *data->cached_rq; + *data->cached_rq = rq->rq_next; + return rq; } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, @@ -443,7 +443,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, if (ret) return ERR_PTR(ret); - rq = __blk_mq_alloc_request(&data); + rq = __blk_mq_alloc_requests(&data); if (!rq) goto out_queue_exit; rq->__data_len = 0; @@ -2258,7 +2258,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) plug->nr_ios = 1; data.cached_rq = &plug->cached_rq; } - rq = __blk_mq_alloc_request(&data); + rq = __blk_mq_alloc_requests(&data); if (unlikely(!rq)) { rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) From 0f38d76646157357fcfa02f50575ea044830c494 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:40:45 +0200 Subject: [PATCH 043/205] blk-mq: cleanup blk_mq_submit_bio Move the blk_mq_alloc_data stack allocation only into the branch that actually needs it, and use rq->mq_hctx instead of data.hctx to refer to the hctx. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104045.658051-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 98a5d0850b959..87dc2debedfbf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2216,10 +2216,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) struct request_queue *q = bio->bi_bdev->bd_disk->queue; const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); - struct blk_mq_alloc_data data = { - .q = q, - .nr_tags = 1, - }; struct request *rq; struct blk_plug *plug; struct request *same_queue_rq = NULL; @@ -2250,9 +2246,13 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq = plug->cached_rq; plug->cached_rq = rq->rq_next; INIT_LIST_HEAD(&rq->queuelist); - data.hctx = rq->mq_hctx; } else { - data.cmd_flags = bio->bi_opf; + struct blk_mq_alloc_data data = { + .q = q, + .nr_tags = 1, + .cmd_flags = bio->bi_opf, + }; + if (plug) { data.nr_tags = plug->nr_ios; plug->nr_ios = 1; @@ -2271,7 +2271,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_track(q, rq, bio); - cookie = request_to_qc_t(data.hctx, rq); + cookie = request_to_qc_t(rq->mq_hctx, rq); blk_mq_bio_to_request(rq, bio, nr_segs); @@ -2286,7 +2286,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) if (unlikely(is_flush_fua)) { /* Bypass scheduler for flush requests */ blk_insert_flush(rq); - blk_mq_run_hw_queue(data.hctx, true); + blk_mq_run_hw_queue(rq->mq_hctx, true); } else if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { @@ -2333,18 +2333,17 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) trace_block_plug(q); if (same_queue_rq) { - data.hctx = same_queue_rq->mq_hctx; trace_block_unplug(q, 1, true); - blk_mq_try_issue_directly(data.hctx, same_queue_rq, - &cookie); + blk_mq_try_issue_directly(same_queue_rq->mq_hctx, + same_queue_rq, &cookie); } } else if ((q->nr_hw_queues > 1 && is_sync) || - !data.hctx->dispatch_busy) { + !rq->mq_hctx->dispatch_busy) { /* * There is no scheduler and we can try to send directly * to the hardware. */ - blk_mq_try_issue_directly(data.hctx, rq, &cookie); + blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie); } else { /* Default case. */ blk_mq_sched_insert_request(rq, false, true, true); From 4a60f360a5c9533d77db011db6766448f763c86a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 16 Oct 2021 07:34:49 -0600 Subject: [PATCH 044/205] block: don't dereference request after flush insertion We could have a race here, where the request gets freed before we call into blk_mq_run_hw_queue(). If this happens, we cannot rely on the state of the request. Grab the hardware context before inserting the flush. Fixes: 0f38d7664615 ("blk-mq: cleanup blk_mq_submit_bio") Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 87dc2debedfbf..b7b8437f0a046 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2284,9 +2284,10 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) } if (unlikely(is_flush_fua)) { + struct blk_mq_hw_ctx *hctx = rq->mq_hctx; /* Bypass scheduler for flush requests */ blk_insert_flush(rq); - blk_mq_run_hw_queue(rq->mq_hctx, true); + blk_mq_run_hw_queue(hctx, true); } else if (plug && (q->nr_hw_queues == 1 || blk_mq_is_shared_tags(rq->mq_hctx->flags) || q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { From fea349b037869beda6db5ed0c150f8a2a9a317f6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:44:48 +0200 Subject: [PATCH 045/205] block: unexport blkdev_ioctl With the raw driver gone, there is no modular user left. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104450.659013-2-hch@lst.de Signed-off-by: Jens Axboe --- block/ioctl.c | 1 - 1 file changed, 1 deletion(-) diff --git a/block/ioctl.c b/block/ioctl.c index eb0491e90b9a0..0f823444cc557 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -588,7 +588,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -ENOTTY; return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); } -EXPORT_SYMBOL_GPL(blkdev_ioctl); /* for /dev/raw */ #ifdef CONFIG_COMPAT From 84b8514b46b417e299746b1297d3d0899e8539f3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:44:49 +0200 Subject: [PATCH 046/205] block: move the *blkdev_ioctl declarations out of blkdev.h These are only used inside of block/. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104450.659013-3-hch@lst.de Signed-off-by: Jens Axboe --- block/blk.h | 4 ++++ include/linux/genhd.h | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/block/blk.h b/block/blk.h index 019c50a140bab..eadb2a640a695 100644 --- a/block/blk.h +++ b/block/blk.h @@ -405,6 +405,10 @@ static inline void bio_clear_hipri(struct bio *bio) bio->bi_opf &= ~REQ_HIPRI; } +int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + unsigned long arg); +long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); + extern const struct address_space_operations def_blk_aops; #endif /* BLK_INTERNAL_H */ diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 0b48a0cf42624..cd4038fd57436 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -281,10 +281,6 @@ bool bdev_check_media_change(struct block_device *bdev); int __invalidate_device(struct block_device *bdev, bool kill_dirty); void set_capacity(struct gendisk *disk, sector_t size); -/* for drivers/char/raw.c: */ -int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); -long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); - #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); From 8a709512eae7ccd5ba73e8634474caadbc9ebe76 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 12:44:50 +0200 Subject: [PATCH 047/205] block: merge block_ioctl into blkdev_ioctl Simplify the ioctl path and match the code structure on the compat side. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012104450.659013-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk.h | 3 +-- block/fops.c | 19 +------------------ block/ioctl.c | 18 ++++++++++++++---- 3 files changed, 16 insertions(+), 24 deletions(-) diff --git a/block/blk.h b/block/blk.h index eadb2a640a695..618482b296712 100644 --- a/block/blk.h +++ b/block/blk.h @@ -405,8 +405,7 @@ static inline void bio_clear_hipri(struct bio *bio) bio->bi_opf &= ~REQ_HIPRI; } -int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg); +long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); extern const struct address_space_operations def_blk_aops; diff --git a/block/fops.c b/block/fops.c index 1e970c247e0eb..7bb9581a146cf 100644 --- a/block/fops.c +++ b/block/fops.c @@ -461,23 +461,6 @@ static int blkdev_close(struct inode *inode, struct file *filp) return 0; } -static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) -{ - struct block_device *bdev = I_BDEV(bdev_file_inode(file)); - fmode_t mode = file->f_mode; - - /* - * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have - * to updated it before every ioctl. - */ - if (file->f_flags & O_NDELAY) - mode |= FMODE_NDELAY; - else - mode &= ~FMODE_NDELAY; - - return blkdev_ioctl(bdev, mode, cmd, arg); -} - /* * Write data to the block device. Only intended for the block device itself * and the raw driver which basically is a fake block device. @@ -621,7 +604,7 @@ const struct file_operations def_blk_fops = { .iopoll = blkdev_iopoll, .mmap = generic_file_mmap, .fsync = blkdev_fsync, - .unlocked_ioctl = block_ioctl, + .unlocked_ioctl = blkdev_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = compat_blkdev_ioctl, #endif diff --git a/block/ioctl.c b/block/ioctl.c index 0f823444cc557..77b1b2453f395 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -538,12 +538,22 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode, * * New commands must be compatible and go into blkdev_common_ioctl */ -int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg) +long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) { - int ret; - loff_t size; + struct block_device *bdev = I_BDEV(file->f_mapping->host); void __user *argp = (void __user *)arg; + fmode_t mode = file->f_mode; + loff_t size; + int ret; + + /* + * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have + * to updated it before every ioctl. + */ + if (file->f_flags & O_NDELAY) + mode |= FMODE_NDELAY; + else + mode &= ~FMODE_NDELAY; switch (cmd) { /* These need separate implementations for the data structure */ From be6bfe36db1795babe9d92178a47b2e02193cb0f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 13:25:41 +0100 Subject: [PATCH 048/205] block: inline hot paths of blk_account_io_*() Extract hot paths of __blk_account_io_start() and __blk_account_io_done() into inline functions, so we don't always pay for function calls. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b0662a636bd4cc7b4f84c9d0a41efa46a688ef13.1633781740.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-core.c | 30 +++++++++--------------------- block/blk.h | 24 +++++++++++++++++++++--- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index e25a1c3f8b760..80fa7aa394c7e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1180,8 +1180,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * if (blk_crypto_insert_cloned_request(rq)) return BLK_STS_IOERR; - if (blk_queue_io_stat(q)) - blk_account_io_start(rq); + blk_account_io_start(rq); /* * Since we have a scheduler attached on the top device, @@ -1260,30 +1259,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) } } -void blk_account_io_done(struct request *req, u64 now) +void __blk_account_io_done(struct request *req, u64 now) { - /* - * Account IO completion. flush_rq isn't accounted as a - * normal IO on queueing nor completion. Accounting the - * containing request is enough. - */ - if (req->part && blk_do_io_stat(req) && - !(req->rq_flags & RQF_FLUSH_SEQ)) { - const int sgrp = op_stat_group(req_op(req)); + const int sgrp = op_stat_group(req_op(req)); - part_stat_lock(); - update_io_ticks(req->part, jiffies, true); - part_stat_inc(req->part, ios[sgrp]); - part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); - part_stat_unlock(); - } + part_stat_lock(); + update_io_ticks(req->part, jiffies, true); + part_stat_inc(req->part, ios[sgrp]); + part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); + part_stat_unlock(); } -void blk_account_io_start(struct request *rq) +void __blk_account_io_start(struct request *rq) { - if (!blk_do_io_stat(rq)) - return; - /* passthrough requests can hold bios that do not have ->bi_bdev set */ if (rq->bio && rq->bio->bi_bdev) rq->part = rq->bio->bi_bdev; diff --git a/block/blk.h b/block/blk.h index 618482b296712..cab8d659d8a69 100644 --- a/block/blk.h +++ b/block/blk.h @@ -221,8 +221,8 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); -void blk_account_io_start(struct request *req); -void blk_account_io_done(struct request *req, u64 now); +void __blk_account_io_start(struct request *req); +void __blk_account_io_done(struct request *req, u64 now); /* * Plug flush limits @@ -286,7 +286,25 @@ int blk_dev_init(void); */ static inline bool blk_do_io_stat(struct request *rq) { - return rq->rq_disk && (rq->rq_flags & RQF_IO_STAT); + return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk; +} + +static inline void blk_account_io_done(struct request *req, u64 now) +{ + /* + * Account IO completion. flush_rq isn't accounted as a + * normal IO on queueing nor completion. Accounting the + * containing request is enough. + */ + if (blk_do_io_stat(req) && req->part && + !(req->rq_flags & RQF_FLUSH_SEQ)) + __blk_account_io_done(req, now); +} + +static inline void blk_account_io_start(struct request *req) +{ + if (blk_do_io_stat(req)) + __blk_account_io_start(req); } static inline void req_set_nomerge(struct request_queue *q, struct request *req) From e9ea15963f3b9d979e1d6d758b8e407775ae6588 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 13:25:42 +0100 Subject: [PATCH 049/205] blk-mq: inline hot part of __blk_mq_sched_restart Extract a fast check out of __block_mq_sched_restart() and inline it for performance reasons. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/894abaa0998e5999f2fe18f271e5efdfc2c32bd2.1633781740.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq-sched.c | 4 +--- block/blk-mq-sched.h | 8 +++++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 27312da7d638a..efc1374b88318 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -57,10 +57,8 @@ void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) } EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx); -void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) +void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) { - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) - return; clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); /* diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index e4d367e0b2a30..c97b816c38005 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -17,7 +17,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq, struct list_head *free); void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx); -void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); +void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx); void blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue, bool async); @@ -31,6 +31,12 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); void blk_mq_sched_free_rqs(struct request_queue *q); +static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) +{ + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) + __blk_mq_sched_restart(hctx); +} + static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) From 9e8c0d0d4d21d2c2c60132e2c18a73d08a230b42 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:17:57 +0200 Subject: [PATCH 050/205] block: remove BIO_BUG_ON BIO_DEBUG is always defined, so just switch the two instances to use BUG_ON directly. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-2-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 4 ++-- include/linux/bio.h | 8 -------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/block/bio.c b/block/bio.c index a6fb6a0b42955..35b875563c8ba 100644 --- a/block/bio.c +++ b/block/bio.c @@ -156,7 +156,7 @@ static void bio_put_slab(struct bio_set *bs) void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) { - BIO_BUG_ON(nr_vecs > BIO_MAX_VECS); + BUG_ON(nr_vecs > BIO_MAX_VECS); if (nr_vecs == BIO_MAX_VECS) mempool_free(bv, pool); @@ -677,7 +677,7 @@ static void bio_alloc_cache_destroy(struct bio_set *bs) void bio_put(struct bio *bio) { if (unlikely(bio_flagged(bio, BIO_REFFED))) { - BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); + BUG_ON(!atomic_read(&bio->__bi_cnt)); if (!atomic_dec_and_test(&bio->__bi_cnt)) return; } diff --git a/include/linux/bio.h b/include/linux/bio.h index 00952e92eae1b..65a356fa71109 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -11,14 +11,6 @@ #include #include -#define BIO_DEBUG - -#ifdef BIO_DEBUG -#define BIO_BUG_ON BUG_ON -#else -#define BIO_BUG_ON -#endif - #define BIO_MAX_VECS 256U static inline unsigned int bio_max_segs(unsigned int nr_segs) From 11d9cab1ca6ed08747c6c5a274c6a8e8307aefbc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:17:58 +0200 Subject: [PATCH 051/205] block: don't include in bio.h doesn't need any of the definitions from ioprio.h. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-3-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bio.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/bio.h b/include/linux/bio.h index 65a356fa71109..ae94794d07c9d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -6,7 +6,6 @@ #define __LINUX_BIO_H #include -#include /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include #include From 8addffd657a956944c1b8a74a1c9aabcfc5b4530 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:17:59 +0200 Subject: [PATCH 052/205] block: move bio_mergeable out of bio.h bio_mergeable is only needed by I/O schedulers, so move it to blk-mq-sched.h. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-4-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq-sched.h | 5 +++++ include/linux/bio.h | 8 -------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index c97b816c38005..fe252278ed9a5 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -37,6 +37,11 @@ static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx) __blk_mq_sched_restart(hctx); } +static inline bool bio_mergeable(struct bio *bio) +{ + return !(bio->bi_opf & REQ_NOMERGE_FLAGS); +} + static inline bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) diff --git a/include/linux/bio.h b/include/linux/bio.h index ae94794d07c9d..ec255f2829948 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -69,14 +69,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio) bio_op(bio) == REQ_OP_WRITE_ZEROES; } -static inline bool bio_mergeable(struct bio *bio) -{ - if (bio->bi_opf & REQ_NOMERGE_FLAGS) - return false; - - return true; -} - static inline unsigned int bio_cur_bytes(struct bio *bio) { if (bio_has_data(bio)) From b6559d8f9fdd7f0e139161cffea2645bd8d084c6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:00 +0200 Subject: [PATCH 053/205] block: fold bio_cur_bytes into blk_rq_cur_bytes Fold bio_cur_bytes into the only caller. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-5-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bio.h | 8 -------- include/linux/blk-mq.h | 6 +++++- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/include/linux/bio.h b/include/linux/bio.h index ec255f2829948..cffd5eba401ed 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -69,14 +69,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio) bio_op(bio) == REQ_OP_WRITE_ZEROES; } -static inline unsigned int bio_cur_bytes(struct bio *bio) -{ - if (bio_has_data(bio)) - return bio_iovec(bio).bv_len; - else /* dataless requests such as discard */ - return bio->bi_iter.bi_size; -} - static inline void *bio_data(struct bio *bio) { if (bio_has_data(bio)) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0e941f2175784..2219e92771186 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -927,7 +927,11 @@ static inline unsigned int blk_rq_bytes(const struct request *rq) static inline int blk_rq_cur_bytes(const struct request *rq) { - return rq->bio ? bio_cur_bytes(rq->bio) : 0; + if (!rq->bio) + return 0; + if (!bio_has_data(rq->bio)) /* dataless requests such as discard */ + return rq->bio->bi_iter.bi_size; + return bio_iovec(rq->bio).bv_len; } unsigned int blk_rq_err_bytes(const struct request *rq); From 9a6083becbe113ed1e28059ce659dc8ae71b33c3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:01 +0200 Subject: [PATCH 054/205] block: move bio_full out of bio.h bio_full is only used in bio.c, so move it there. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-6-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 17 +++++++++++++++++ include/linux/bio.h | 19 ------------------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/block/bio.c b/block/bio.c index 35b875563c8ba..7e2899071de26 100644 --- a/block/bio.c +++ b/block/bio.c @@ -772,6 +772,23 @@ const char *bio_devname(struct bio *bio, char *buf) } EXPORT_SYMBOL(bio_devname); +/** + * bio_full - check if the bio is full + * @bio: bio to check + * @len: length of one segment to be added + * + * Return true if @bio is full and one segment with @len bytes can't be + * added to the bio, otherwise return false + */ +static inline bool bio_full(struct bio *bio, unsigned len) +{ + if (bio->bi_vcnt >= bio->bi_max_vecs) + return true; + if (bio->bi_iter.bi_size > UINT_MAX - len) + return true; + return false; +} + static inline bool page_is_mergeable(const struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) diff --git a/include/linux/bio.h b/include/linux/bio.h index cffd5eba401ed..2ffc7c768091c 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -77,25 +77,6 @@ static inline void *bio_data(struct bio *bio) return NULL; } -/** - * bio_full - check if the bio is full - * @bio: bio to check - * @len: length of one segment to be added - * - * Return true if @bio is full and one segment with @len bytes can't be - * added to the bio, otherwise return false - */ -static inline bool bio_full(struct bio *bio, unsigned len) -{ - if (bio->bi_vcnt >= bio->bi_max_vecs) - return true; - - if (bio->bi_iter.bi_size > UINT_MAX - len) - return true; - - return false; -} - static inline bool bio_next_segment(const struct bio *bio, struct bvec_iter_all *iter) { From 9774b39175fe3606ce2a836a47134c53b75681c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:02 +0200 Subject: [PATCH 055/205] block: mark __bio_try_merge_page static Mark __bio_try_merge_page static and move it up a bit to avoid the need for a forward declaration. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-7-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 77 ++++++++++++++++++++++----------------------- include/linux/bio.h | 2 -- 2 files changed, 38 insertions(+), 41 deletions(-) diff --git a/block/bio.c b/block/bio.c index 7e2899071de26..c89e402356a59 100644 --- a/block/bio.c +++ b/block/bio.c @@ -808,6 +808,44 @@ static inline bool page_is_mergeable(const struct bio_vec *bv, return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); } +/** + * __bio_try_merge_page - try appending data to an existing bvec. + * @bio: destination bio + * @page: start page to add + * @len: length of the data to add + * @off: offset of the data relative to @page + * @same_page: return if the segment has been merged inside the same page + * + * Try to add the data at @page + @off to the last bvec of @bio. This is a + * useful optimisation for file systems with a block size smaller than the + * page size. + * + * Warn if (@len, @off) crosses pages in case that @same_page is true. + * + * Return %true on success or %false on failure. + */ +static bool __bio_try_merge_page(struct bio *bio, struct page *page, + unsigned int len, unsigned int off, bool *same_page) +{ + if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) + return false; + + if (bio->bi_vcnt > 0) { + struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + if (page_is_mergeable(bv, page, len, off, same_page)) { + if (bio->bi_iter.bi_size > UINT_MAX - len) { + *same_page = false; + return false; + } + bv->bv_len += len; + bio->bi_iter.bi_size += len; + return true; + } + } + return false; +} + /* * Try to merge a page into a segment, while obeying the hardware segment * size limit. This is not for normal read/write bios, but for passthrough @@ -939,45 +977,6 @@ int bio_add_zone_append_page(struct bio *bio, struct page *page, } EXPORT_SYMBOL_GPL(bio_add_zone_append_page); -/** - * __bio_try_merge_page - try appending data to an existing bvec. - * @bio: destination bio - * @page: start page to add - * @len: length of the data to add - * @off: offset of the data relative to @page - * @same_page: return if the segment has been merged inside the same page - * - * Try to add the data at @page + @off to the last bvec of @bio. This is a - * useful optimisation for file systems with a block size smaller than the - * page size. - * - * Warn if (@len, @off) crosses pages in case that @same_page is true. - * - * Return %true on success or %false on failure. - */ -bool __bio_try_merge_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off, bool *same_page) -{ - if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) - return false; - - if (bio->bi_vcnt > 0) { - struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; - - if (page_is_mergeable(bv, page, len, off, same_page)) { - if (bio->bi_iter.bi_size > UINT_MAX - len) { - *same_page = false; - return false; - } - bv->bv_len += len; - bio->bi_iter.bi_size += len; - return true; - } - } - return false; -} -EXPORT_SYMBOL_GPL(__bio_try_merge_page); - /** * __bio_add_page - add page(s) to a bio in a new segment * @bio: destination bio diff --git a/include/linux/bio.h b/include/linux/bio.h index 2ffc7c768091c..faa0a2fabaf08 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -425,8 +425,6 @@ extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); int bio_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); -bool __bio_try_merge_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off, bool *same_page); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); From ff18d77b5f0cf3e25aa13741eed4d788a13c04d7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:03 +0200 Subject: [PATCH 056/205] block: move bio_get_{first,last}_bvec out of bio.h bio_get_first_bvec and bio_get_last_bvec are only used in blk-merge.c, so move them there. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-8-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-merge.c | 31 +++++++++++++++++++++++++++++++ include/linux/bio.h | 31 ------------------------------- 2 files changed, 31 insertions(+), 31 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index bf7aedaad8f8e..14ce19607cd8a 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -15,6 +15,37 @@ #include "blk-rq-qos.h" #include "blk-throttle.h" +static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) +{ + *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); +} + +static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) +{ + struct bvec_iter iter = bio->bi_iter; + int idx; + + bio_get_first_bvec(bio, bv); + if (bv->bv_len == bio->bi_iter.bi_size) + return; /* this bio only has a single bvec */ + + bio_advance_iter(bio, &iter, iter.bi_size); + + if (!iter.bi_bvec_done) + idx = iter.bi_idx - 1; + else /* in the middle of bvec */ + idx = iter.bi_idx; + + *bv = bio->bi_io_vec[idx]; + + /* + * iter.bi_bvec_done records actual length of the last bvec + * if this bio ends in the middle of one io vector + */ + if (iter.bi_bvec_done) + bv->bv_len = iter.bi_bvec_done; +} + static inline bool bio_will_gap(struct request_queue *q, struct request *prev_rq, struct bio *prev, struct bio *next) { diff --git a/include/linux/bio.h b/include/linux/bio.h index faa0a2fabaf08..4c5106f2ed57e 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -221,37 +221,6 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } -static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) -{ - *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); -} - -static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) -{ - struct bvec_iter iter = bio->bi_iter; - int idx; - - bio_get_first_bvec(bio, bv); - if (bv->bv_len == bio->bi_iter.bi_size) - return; /* this bio only has a single bvec */ - - bio_advance_iter(bio, &iter, iter.bi_size); - - if (!iter.bi_bvec_done) - idx = iter.bi_idx - 1; - else /* in the middle of bvec */ - idx = iter.bi_idx; - - *bv = bio->bi_io_vec[idx]; - - /* - * iter.bi_bvec_done records actual length of the last bvec - * if this bio ends in the middle of one io vector - */ - if (iter.bi_bvec_done) - bv->bv_len = iter.bi_bvec_done; -} - static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); From 4f7ab09a1ca0bc955635705c359ab3021b405fd0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 18:18:04 +0200 Subject: [PATCH 057/205] block: mark bio_truncate static bio_truncate is only used in bio.c, so mark it static. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012161804.991559-9-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 2 +- include/linux/bio.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/block/bio.c b/block/bio.c index c89e402356a59..d5120451c36a3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -546,7 +546,7 @@ EXPORT_SYMBOL(zero_fill_bio); * REQ_OP_READ, zero the truncated part. This function should only * be used for handling corner cases, such as bio eod. */ -void bio_truncate(struct bio *bio, unsigned new_size) +static void bio_truncate(struct bio *bio, unsigned new_size) { struct bio_vec bv; struct bvec_iter iter; diff --git a/include/linux/bio.h b/include/linux/bio.h index 4c5106f2ed57e..d8d27742a75f9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -405,7 +405,6 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); extern void bio_free_pages(struct bio *bio); -void bio_truncate(struct bio *bio, unsigned new_size); void guard_bio_eod(struct bio *bio); void zero_fill_bio(struct bio *bio); From 8971a3b7f1bf2b3096e558a0362a469dee77f426 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 13 Oct 2021 09:57:13 +0100 Subject: [PATCH 058/205] blk-mq: optimise *end_request non-stat path We already have a blk_mq_need_time_stamp() check in __blk_mq_end_request() to get a timestamp, hide all the statistics accounting under it. It cuts some cycles for requests that don't need stats, and is free otherwise. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/e0f2ea812e93a8adcd07101212e7d7e70ca304e7.1634115360.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index b7b8437f0a046..568773de9a7fe 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -579,20 +579,18 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug) inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { - u64 now = 0; + if (blk_mq_need_time_stamp(rq)) { + u64 now = ktime_get_ns(); - if (blk_mq_need_time_stamp(rq)) - now = ktime_get_ns(); + if (rq->rq_flags & RQF_STATS) { + blk_mq_poll_stats_start(rq->q); + blk_stat_add(rq, now); + } - if (rq->rq_flags & RQF_STATS) { - blk_mq_poll_stats_start(rq->q); - blk_stat_add(rq, now); + blk_mq_sched_completed_request(rq, now); + blk_account_io_done(rq, now); } - blk_mq_sched_completed_request(rq, now); - - blk_account_io_done(rq, now); - if (rq->end_io) { rq_qos_done(rq->q, rq); rq->end_io(rq, error); From 9672b0d43782047b1825a96bafee1b6aefa35bc2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 9 Oct 2021 13:02:23 -0600 Subject: [PATCH 059/205] sbitmap: add __sbitmap_queue_get_batch() The block layer tag allocation batching still calls into sbitmap to get each tag, but we can improve on that. Add __sbitmap_queue_get_batch(), which returns a mask of tags all at once, along with an offset for those tags. An example return would be 0xff, where bits 0..7 are set, with tag_offset == 128. The valid tags in this case would be 128..135. A batch is specific to an individual sbitmap_map, hence it cannot be larger than that. The requested number of tags is automatically reduced to the max that can be satisfied with a single map. On failure, 0 is returned. Caller should fall back to single tag allocation at that point/ Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 13 +++++++++++ lib/sbitmap.c | 51 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 2713e689ad66d..e30b56023eadf 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -426,6 +426,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); */ int __sbitmap_queue_get(struct sbitmap_queue *sbq); +/** + * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits + * @sbq: Bitmap queue to allocate from. + * @nr_tags: number of tags requested + * @offset: offset to add to returned bits + * + * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is + * a bit in the mask returned, and the caller must add @offset to the value to + * get the absolute tag value. + */ +unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, + unsigned int *offset); + /** * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct * sbitmap_queue, limiting the depth used from each word, with preemption diff --git a/lib/sbitmap.c b/lib/sbitmap.c index b25db9be938af..f398e0ae548e1 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -489,6 +489,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq) } EXPORT_SYMBOL_GPL(__sbitmap_queue_get); +unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, + unsigned int *offset) +{ + struct sbitmap *sb = &sbq->sb; + unsigned int hint, depth; + unsigned long index, nr; + int i; + + if (unlikely(sb->round_robin)) + return 0; + + depth = READ_ONCE(sb->depth); + hint = update_alloc_hint_before_get(sb, depth); + + index = SB_NR_TO_INDEX(sb, hint); + + for (i = 0; i < sb->map_nr; i++) { + struct sbitmap_word *map = &sb->map[index]; + unsigned long get_mask; + + sbitmap_deferred_clear(map); + if (map->word == (1UL << (map->depth - 1)) - 1) + continue; + + nr = find_first_zero_bit(&map->word, map->depth); + if (nr + nr_tags <= map->depth) { + atomic_long_t *ptr = (atomic_long_t *) &map->word; + int map_tags = min_t(int, nr_tags, map->depth); + unsigned long val, ret; + + get_mask = ((1UL << map_tags) - 1) << nr; + do { + val = READ_ONCE(map->word); + ret = atomic_long_cmpxchg(ptr, val, get_mask | val); + } while (ret != val); + get_mask = (get_mask & ~ret) >> nr; + if (get_mask) { + *offset = nr + (index << sb->shift); + update_alloc_hint_after_get(sb, depth, hint, + *offset + map_tags - 1); + return get_mask; + } + } + /* Jump to next index. */ + if (++index >= sb->map_nr) + index = 0; + } + + return 0; +} + int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, unsigned int shallow_depth) { From 349302da83529539040d2516de1deec1e09f491c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 9 Oct 2021 13:10:39 -0600 Subject: [PATCH 060/205] block: improve batched tag allocation Add a blk_mq_get_tags() helper, which uses the new sbitmap API for allocating a batch of tags all at once. This both simplifies the block code for batched allocation, and it is also more efficient than just doing repeated calls into __sbitmap_queue_get(). This reduces the sbitmap overhead in peak runs from ~3% to ~1% and yields a performanc increase from 6.6M IOPS to 6.8M IOPS for a single CPU core. Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 15 ++++++++ block/blk-mq-tag.h | 2 ++ block/blk-mq.c | 87 +++++++++++++++++++++++++++++----------------- 3 files changed, 73 insertions(+), 31 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 72a2724a4eeea..c43b972011617 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -86,6 +86,21 @@ static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, return __sbitmap_queue_get(bt); } +unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, + unsigned int *offset) +{ + struct blk_mq_tags *tags = blk_mq_tags_from_data(data); + struct sbitmap_queue *bt = &tags->bitmap_tags; + unsigned long ret; + + if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || + data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) + return 0; + ret = __sbitmap_queue_get_batch(bt, nr_tags, offset); + *offset += tags->nr_reserved_tags; + return ret; +} + unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) { struct blk_mq_tags *tags = blk_mq_tags_from_data(data); diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index d8ce89fa1686c..71c2f7d8e9b74 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -38,6 +38,8 @@ extern int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, int node, int alloc_policy); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); +unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, + unsigned int *offset); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, diff --git a/block/blk-mq.c b/block/blk-mq.c index 568773de9a7fe..97b911866de1d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -354,6 +354,38 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, return rq; } +static inline struct request * +__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, + u64 alloc_time_ns) +{ + unsigned int tag, tag_offset; + struct request *rq; + unsigned long tags; + int i, nr = 0; + + tags = blk_mq_get_tags(data, data->nr_tags, &tag_offset); + if (unlikely(!tags)) + return NULL; + + for (i = 0; tags; i++) { + if (!(tags & (1UL << i))) + continue; + tag = tag_offset + i; + tags &= ~(1UL << i); + rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); + rq->rq_next = *data->cached_rq; + *data->cached_rq = rq; + } + data->nr_tags -= nr; + + if (!data->cached_rq) + return NULL; + + rq = *data->cached_rq; + *data->cached_rq = rq->rq_next; + return rq; +} + static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) { struct request_queue *q = data->q; @@ -388,43 +420,36 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) if (!e) blk_mq_tag_busy(data->hctx); + /* + * Try batched alloc if we want more than 1 tag. + */ + if (data->nr_tags > 1) { + rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns); + if (rq) + return rq; + data->nr_tags = 1; + } + /* * Waiting allocations only fail because of an inactive hctx. In that * case just retry the hctx assignment and tag allocation as CPU hotplug * should have migrated us to an online CPU by now. */ - do { - tag = blk_mq_get_tag(data); - if (tag == BLK_MQ_NO_TAG) { - if (data->flags & BLK_MQ_REQ_NOWAIT) - break; - /* - * Give up the CPU and sleep for a random short time to - * ensure that thread using a realtime scheduling class - * are migrated off the CPU, and thus off the hctx that - * is going away. - */ - msleep(3); - goto retry; - } - - rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); - if (!--data->nr_tags || e || - (data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) - return rq; - - /* link into the cached list */ - rq->rq_next = *data->cached_rq; - *data->cached_rq = rq; - data->flags |= BLK_MQ_REQ_NOWAIT; - } while (1); - - if (!data->cached_rq) - return NULL; + tag = blk_mq_get_tag(data); + if (tag == BLK_MQ_NO_TAG) { + if (data->flags & BLK_MQ_REQ_NOWAIT) + return NULL; + /* + * Give up the CPU and sleep for a random short time to + * ensure that thread using a realtime scheduling class + * are migrated off the CPU, and thus off the hctx that + * is going away. + */ + msleep(3); + goto retry; + } - rq = *data->cached_rq; - *data->cached_rq = rq->rq_next; - return rq; + return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); } struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, From df252bde82ac19324b26192ea5e7527fbc1b6033 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 22:59:57 +0900 Subject: [PATCH 061/205] block: remove redundant =y from BLK_CGROUP dependency CONFIG_BLK_CGROUP is a boolean option, that is, its value is 'y' or 'n'. The comparison to 'y' is redundant. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-2-masahiroy@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index 8e28ae7718bd2..1d83504749e75 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -73,7 +73,7 @@ config BLK_DEV_ZONED config BLK_DEV_THROTTLING bool "Block layer bio throttling support" - depends on BLK_CGROUP=y + depends on BLK_CGROUP select BLK_CGROUP_RWSTAT help Block layer bio throttling support. It can be used to limit @@ -112,7 +112,7 @@ config BLK_WBT_MQ config BLK_CGROUP_IOLATENCY bool "Enable support for latency based cgroup IO protection" - depends on BLK_CGROUP=y + depends on BLK_CGROUP help Enabling this option enables the .latency interface for IO throttling. The IO controller will attempt to maintain average IO latencies below @@ -132,7 +132,7 @@ config BLK_CGROUP_FC_APPID config BLK_CGROUP_IOCOST bool "Enable support for cost model based cgroup IO controller" - depends on BLK_CGROUP=y + depends on BLK_CGROUP select BLK_RQ_IO_DATA_LEN select BLK_RQ_ALLOC_TIME help From c50fca55d4395ae27a57dee820f6df9b6a26c295 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 22:59:58 +0900 Subject: [PATCH 062/205] block: simplify Kconfig files Everything under block/ depends on BLOCK. BLOCK_HOLDER_DEPRECATED is selected from drivers/md/Kconfig, which is entirely dependent on BLOCK. Extend the 'if BLOCK' ... 'endif' so it covers the whole block/Kconfig. Also, clean up the definition of BLOCK_COMPAT and BLK_MQ_PCI because COMPAT and PCI are boolean. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-3-masahiroy@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 18 +++++++----------- block/Kconfig.iosched | 4 ---- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index 1d83504749e75..c4d35829ea4f1 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -196,33 +196,29 @@ source "block/partitions/Kconfig" endmenu -endif # BLOCK - config BLOCK_COMPAT - bool - depends on BLOCK && COMPAT - default y + def_bool COMPAT config BLK_MQ_PCI - bool - depends on BLOCK && PCI - default y + def_bool PCI config BLK_MQ_VIRTIO bool - depends on BLOCK && VIRTIO + depends on VIRTIO default y config BLK_MQ_RDMA bool - depends on BLOCK && INFINIBAND + depends on INFINIBAND default y config BLK_PM - def_bool BLOCK && PM + def_bool PM # do not use in new code config BLOCK_HOLDER_DEPRECATED bool source "block/Kconfig.iosched" + +endif # BLOCK diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 2f2158e05a91c..885fee86dfcae 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -1,6 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -if BLOCK - menu "IO Schedulers" config MQ_IOSCHED_DEADLINE @@ -45,5 +43,3 @@ config BFQ_CGROUP_DEBUG files in a cgroup which can be useful for debugging. endmenu - -endif From b8b98a6225c9140ff5c2b3dce99a70ffba98e6d3 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 22:59:59 +0900 Subject: [PATCH 063/205] block: move menu "Partition type" to block/partitions/Kconfig Move the menu to the relevant place. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-4-masahiroy@kernel.org Signed-off-by: Jens Axboe --- block/Kconfig | 4 ---- block/partitions/Kconfig | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index c4d35829ea4f1..c6ce41a5e5b27 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -190,12 +190,8 @@ config BLK_INLINE_ENCRYPTION_FALLBACK by falling back to the kernel crypto API when inline encryption hardware is not present. -menu "Partition Types" - source "block/partitions/Kconfig" -endmenu - config BLOCK_COMPAT def_bool COMPAT diff --git a/block/partitions/Kconfig b/block/partitions/Kconfig index 278593b8e4e95..7aff4eb81c60f 100644 --- a/block/partitions/Kconfig +++ b/block/partitions/Kconfig @@ -2,6 +2,8 @@ # # Partition configuration # +menu "Partition Types" + config PARTITION_ADVANCED bool "Advanced partition selection" help @@ -267,3 +269,5 @@ config CMDLINE_PARTITION help Say Y here if you want to read the partition table from bootargs. The format for the command line is just like mtdparts. + +endmenu From 4c928904ff771a8e830773b71a080047365324a5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 27 Sep 2021 23:00:00 +0900 Subject: [PATCH 064/205] block: move CONFIG_BLOCK guard to top Makefile Every object under block/ depends on CONFIG_BLOCK. Move the guard to the top Makefile since there is no point to descend into block/ if CONFIG_BLOCK=n. Signed-off-by: Masahiro Yamada Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20210927140000.866249-5-masahiroy@kernel.org Signed-off-by: Jens Axboe --- Makefile | 3 ++- block/Makefile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 91297670da8e0..95ba2c34dc336 100644 --- a/Makefile +++ b/Makefile @@ -1115,7 +1115,8 @@ export MODORDER := $(extmod_prefix)modules.order export MODULES_NSDEPS := $(extmod_prefix)modules.nsdeps ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ +core-$(CONFIG_BLOCK) += block/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ diff --git a/block/Makefile b/block/Makefile index 41aa1ba69c900..74df168729ecb 100644 --- a/block/Makefile +++ b/block/Makefile @@ -3,7 +3,7 @@ # Makefile for the kernel block layer # -obj-$(CONFIG_BLOCK) := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ +obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-exec.o blk-merge.o blk-timeout.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ From d38a9c04c0d5637a828269dccb9703d42d40d42b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 07:24:07 -0600 Subject: [PATCH 065/205] block: only check previous entry for plug merge attempt Currently we scan the entire plug list, which is potentially very expensive. In an IOPS bound workload, we can drive about 5.6M IOPS with merging enabled, and profiling shows that the plug merge check is the (by far) most expensive thing we're doing: Overhead Command Shared Object Symbol + 20.89% io_uring [kernel.vmlinux] [k] blk_attempt_plug_merge + 4.98% io_uring [kernel.vmlinux] [k] io_submit_sqes + 4.78% io_uring [kernel.vmlinux] [k] blkdev_direct_IO + 4.61% io_uring [kernel.vmlinux] [k] blk_mq_submit_bio Instead of browsing the whole list, just check the previously inserted entry. That is enough for a naive merge check and will catch most cases, and for devices that need full merging, the IO scheduler attached to such devices will do that anyway. The plug merge is meant to be an inexpensive check to avoid getting a request, but if we repeatedly scan the list for every single insert, it is very much not a cheap check. With this patch, the workload instead runs at ~7.0M IOPS, providing a 25% improvement. Disabling merging entirely yields another 5% improvement. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index 14ce19607cd8a..9b77b4d6c2a1f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1084,8 +1084,8 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, * another request associated with @q is found on the plug list * (optional, may be %NULL) * - * Determine whether @bio being queued on @q can be merged with a request - * on %current's plugged list. Returns %true if merge was successful, + * Determine whether @bio being queued on @q can be merged with the previous + * request on %current's plugged list. Returns %true if merge was successful, * otherwise %false. * * Plugging coalesces IOs from the same issuer for the same purpose without @@ -1102,32 +1102,22 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, { struct blk_plug *plug; struct request *rq; - struct list_head *plug_list; plug = blk_mq_plug(q, bio); - if (!plug) + if (!plug || list_empty(&plug->mq_list)) return false; - plug_list = &plug->mq_list; - - list_for_each_entry_reverse(rq, plug_list, queuelist) { - if (rq->q == q && same_queue_rq) { - /* - * Only blk-mq multiple hardware queues case checks the - * rq in the same queue, there should be only one such - * rq in a queue - **/ - *same_queue_rq = rq; - } - - if (rq->q != q) - continue; - - if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == - BIO_MERGE_OK) - return true; + /* check the previously added entry for a quick merge attempt */ + rq = list_last_entry(&plug->mq_list, struct request, queuelist); + if (rq->q == q && same_queue_rq) { + /* + * Only blk-mq multiple hardware queues case checks the rq in + * the same queue, there should be only one such rq in a queue + */ + *same_queue_rq = rq; } - + if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == BIO_MERGE_OK) + return true; return false; } From 94c2ed58d0d856a35c04365bdb39fee6e77547de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:11 +0200 Subject: [PATCH 066/205] direct-io: remove blk_poll support The polling support in the legacy direct-io support is a little crufty. It already doesn't support the asynchronous polling needed for io_uring polling, and is hard to adopt to upcoming changes in the polling interfaces. Given that all the major file systems already use the iomap direct I/O code, just drop the polling support. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-2-hch@lst.de Signed-off-by: Jens Axboe --- fs/direct-io.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/fs/direct-io.c b/fs/direct-io.c index b2e86e739d7a1..453dcff0e7f56 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -119,7 +119,6 @@ struct dio { int flags; /* doesn't change */ int op; int op_flags; - blk_qc_t bio_cookie; struct gendisk *bio_disk; struct inode *inode; loff_t i_size; /* i_size when submitted */ @@ -438,11 +437,10 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) dio->bio_disk = bio->bi_bdev->bd_disk; - if (sdio->submit_io) { + if (sdio->submit_io) sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); - dio->bio_cookie = BLK_QC_T_NONE; - } else - dio->bio_cookie = submit_bio(bio); + else + submit_bio(bio); sdio->bio = NULL; sdio->boundary = 0; @@ -481,9 +479,7 @@ static struct bio *dio_await_one(struct dio *dio) __set_current_state(TASK_UNINTERRUPTIBLE); dio->waiter = current; spin_unlock_irqrestore(&dio->bio_lock, flags); - if (!(dio->iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true)) - blk_io_schedule(); + blk_io_schedule(); /* wake up sets us TASK_RUNNING */ spin_lock_irqsave(&dio->bio_lock, flags); dio->waiter = NULL; @@ -1214,8 +1210,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, } else { dio->op = REQ_OP_READ; } - if (iocb->ki_flags & IOCB_HIPRI) - dio->op_flags |= REQ_HIPRI; /* * For AIO O_(D)SYNC writes we need to defer completions to a workqueue From 71fc3f5e2c00c966e6a2ffebadfbcc6914249d32 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:12 +0200 Subject: [PATCH 067/205] block: don't try to poll multi-bio I/Os in __blkdev_direct_IO If an iocb is split into multiple bios we can't poll for both. So don't even bother to try to poll in that case. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012111226.760968-3-hch@lst.de Signed-off-by: Jens Axboe --- block/fops.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/block/fops.c b/block/fops.c index 7bb9581a146cf..15324f2e5a914 100644 --- a/block/fops.c +++ b/block/fops.c @@ -197,7 +197,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; - bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; + bool do_poll = (iocb->ki_flags & IOCB_HIPRI); bool is_read = (iov_iter_rw(iter) == READ), is_sync; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; @@ -226,7 +226,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, * Don't plug for HIPRI/polled IO, as those should go straight * to issue */ - if (!is_poll) + if (!(iocb->ki_flags & IOCB_HIPRI)) blk_start_plug(&plug); for (;;) { @@ -260,20 +260,13 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); if (!nr_pages) { - bool polled = false; - - if (iocb->ki_flags & IOCB_HIPRI) { + if (do_poll) bio_set_polled(bio, iocb); - polled = true; - } - qc = submit_bio(bio); - - if (polled) + if (do_poll) WRITE_ONCE(iocb->ki_cookie, qc); break; } - if (!dio->multi_bio) { /* * AIO needs an extra reference to ensure the dio @@ -284,6 +277,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio_get(bio); dio->multi_bio = true; atomic_set(&dio->ref, 2); + do_poll = false; } else { atomic_inc(&dio->ref); } @@ -292,7 +286,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio = bio_alloc(GFP_KERNEL, nr_pages); } - if (!is_poll) + if (!(iocb->ki_flags & IOCB_HIPRI)) blk_finish_plug(&plug); if (!is_sync) @@ -303,8 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, true)) + if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From f79d474905fec0bfae1244e75571b7916fe02ea2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:13 +0200 Subject: [PATCH 068/205] iomap: don't try to poll multi-bio I/Os in __iomap_dio_rw If an iocb is split into multiple bios we can't poll for both. So don't bother to even try to poll in that case. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-4-hch@lst.de Signed-off-by: Jens Axboe --- fs/iomap/direct-io.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 4ecd255e0511c..560ae967f70e8 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -282,6 +282,13 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, if (!iov_iter_count(dio->submit.iter)) goto out; + /* + * We can only poll for single bio I/Os. + */ + if (need_zeroout || + ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) + dio->iocb->ki_flags &= ~IOCB_HIPRI; + if (need_zeroout) { /* zero out from the start of the block to the write offset */ pad = pos & (fs_block_size - 1); @@ -339,6 +346,11 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); + /* + * We can only poll for single bio I/Os. + */ + if (nr_pages) + dio->iocb->ki_flags &= ~IOCB_HIPRI; iomap_dio_submit_bio(iter, dio, bio, pos); pos += n; } while (nr_pages); @@ -565,8 +577,15 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, inode_dio_begin(inode); blk_start_plug(&plug); - while ((ret = iomap_iter(&iomi, ops)) > 0) + while ((ret = iomap_iter(&iomi, ops)) > 0) { iomi.processed = iomap_dio_iter(&iomi, dio); + + /* + * We can only poll for single bio I/Os. + */ + iocb->ki_flags &= ~IOCB_HIPRI; + } + blk_finish_plug(&plug); /* From 30da1b45b130c70945b033900f45c9d61d6f3b4a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:14 +0200 Subject: [PATCH 069/205] io_uring: fix a layering violation in io_iopoll_req_issued syscall-level code can't just poke into the details of the poll cookie, which is private information of the block layer. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20211012111226.760968-5-hch@lst.de Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e68d27829bb21..d2e86788c8720 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2738,19 +2738,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req) ctx->poll_multi_queue = false; } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; - unsigned int queue_num0, queue_num1; list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, inflight_entry); - if (list_req->file != req->file) { + if (list_req->file != req->file) ctx->poll_multi_queue = true; - } else { - queue_num0 = blk_qc_t_to_queue_num(list_req->rw.kiocb.ki_cookie); - queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); - if (queue_num0 != queue_num1) - ctx->poll_multi_queue = true; - } } /* From f70299f0d58e0e21f7f5f5ab27e601e8d3f0373e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:15 +0200 Subject: [PATCH 070/205] blk-mq: factor out a blk_qc_to_hctx helper Add a helper to get the hctx from a request_queue and cookie, and fold the blk_qc_t_to_queue_num helper into it as no other callers are left. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-6-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 +++++++- include/linux/blk_types.h | 5 ----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 97b911866de1d..35b2ab0a373ac 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -65,6 +65,12 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) return bucket; } +static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, + blk_qc_t qc) +{ + return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -4071,7 +4077,7 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; + hctx = blk_qc_to_hctx(q, cookie); /* * If we sleep, have the caller restart the poll loop to reset diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 3b967053e9f5a..000351c5312af 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -505,11 +505,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie) return cookie != BLK_QC_T_NONE; } -static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) -{ - return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT; -} - static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) { return cookie & ((1u << BLK_QC_T_SHIFT) - 1); From c6699d6fe0ffe4d9fdc652d1acf5a94b4f9627ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:16 +0200 Subject: [PATCH 071/205] blk-mq: factor out a "classic" poll helper Factor the code to do the classic full metal polling out of blk_poll into a separate blk_mq_poll_classic helper. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-7-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 120 +++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 64 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 35b2ab0a373ac..636773056874b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; } +static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, + blk_qc_t qc) +{ + if (blk_qc_t_is_internal(qc)) + return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); + return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -3975,15 +3983,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, return ret; } -static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, - struct request *rq) +static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) { + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc); + struct request *rq = blk_qc_to_rq(hctx, qc); struct hrtimer_sleeper hs; enum hrtimer_mode mode; unsigned int nsecs; ktime_t kt; - if (rq->rq_flags & RQF_MQ_POLL_SLEPT) + /* + * If a request has completed on queue that uses an I/O scheduler, we + * won't get back a request from blk_qc_to_rq. + */ + if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT)) return false; /* @@ -4025,32 +4038,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, __set_current_state(TASK_RUNNING); destroy_hrtimer_on_stack(&hs.timer); + + /* + * If we sleep, have the caller restart the poll loop to reset the + * state. Like for the other success return cases, the caller is + * responsible for checking if the IO completed. If the IO isn't + * complete, we'll get called again and will go straight to the busy + * poll loop. + */ return true; } -static bool blk_mq_poll_hybrid(struct request_queue *q, - struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) +static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, + bool spin) { - struct request *rq; + struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); + long state = get_current_state(); + int ret; - if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) - return false; + hctx->poll_considered++; - if (!blk_qc_t_is_internal(cookie)) - rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); - else { - rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); - /* - * With scheduling, if the request has completed, we'll - * get a NULL return here, as we clear the sched tag when - * that happens. The request still remains valid, like always, - * so we should be safe with just the NULL check. - */ - if (!rq) - return false; - } + do { + hctx->poll_invoked++; - return blk_mq_poll_hybrid_sleep(q, rq); + ret = q->mq_ops->poll(hctx); + if (ret > 0) { + hctx->poll_success++; + __set_current_state(TASK_RUNNING); + return ret; + } + + if (signal_pending_state(state, current)) + __set_current_state(TASK_RUNNING); + if (task_is_running(current)) + return 1; + + if (ret < 0 || !spin) + break; + cpu_relax(); + } while (!need_resched()); + + __set_current_state(TASK_RUNNING); + return 0; } /** @@ -4067,9 +4096,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - struct blk_mq_hw_ctx *hctx; - unsigned int state; - if (!blk_qc_t_valid(cookie) || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; @@ -4077,46 +4103,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - hctx = blk_qc_to_hctx(q, cookie); - - /* - * If we sleep, have the caller restart the poll loop to reset - * the state. Like for the other success return cases, the - * caller is responsible for checking if the IO completed. If - * the IO isn't complete, we'll get called again and will go - * straight to the busy poll loop. If specified not to spin, - * we also should not sleep. - */ - if (spin && blk_mq_poll_hybrid(q, hctx, cookie)) - return 1; - - hctx->poll_considered++; - - state = get_current_state(); - do { - int ret; - - hctx->poll_invoked++; - - ret = q->mq_ops->poll(hctx); - if (ret > 0) { - hctx->poll_success++; - __set_current_state(TASK_RUNNING); - return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - - if (task_is_running(current)) + /* If specified not to spin, we also should not sleep. */ + if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (blk_mq_poll_hybrid(q, cookie)) return 1; - if (ret < 0 || !spin) - break; - cpu_relax(); - } while (!need_resched()); - - __set_current_state(TASK_RUNNING); - return 0; + } + return blk_mq_poll_classic(q, cookie, spin); } EXPORT_SYMBOL_GPL(blk_poll); From efbabbe121f96d4b1a98a2c2ef5d2e8f7fb41c87 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:17 +0200 Subject: [PATCH 072/205] blk-mq: remove blk_qc_t_to_tag and blk_qc_t_is_internal Merge both functions into their only caller to keep the blk-mq tag to blk_qc_t mapping as private as possible in blk-mq.c. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-8-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 8 +++++--- include/linux/blk_types.h | 10 ---------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 636773056874b..24a10c973f79f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -74,9 +74,11 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, blk_qc_t qc) { - if (blk_qc_t_is_internal(qc)) - return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc)); - return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc)); + unsigned int tag = qc & ((1U << BLK_QC_T_SHIFT) - 1); + + if (qc & BLK_QC_T_INTERNAL) + return blk_mq_tag_to_rq(hctx->sched_tags, tag); + return blk_mq_tag_to_rq(hctx->tags, tag); } /* diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 000351c5312af..fb7c1477617b8 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -505,16 +505,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie) return cookie != BLK_QC_T_NONE; } -static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie) -{ - return cookie & ((1u << BLK_QC_T_SHIFT) - 1); -} - -static inline bool blk_qc_t_is_internal(blk_qc_t cookie) -{ - return (cookie & BLK_QC_T_INTERNAL) != 0; -} - struct blk_rq_stat { u64 mean; u64 min; From 28a1ae6b9daba6ac65700eeb38479bd6fadec089 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:18 +0200 Subject: [PATCH 073/205] blk-mq: remove blk_qc_t_valid Move the trivial check into the only caller. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-9-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- include/linux/blk_types.h | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 24a10c973f79f..7d0d947921a6d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4098,7 +4098,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, */ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) { - if (!blk_qc_t_valid(cookie) || + if (cookie == BLK_QC_T_NONE || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index fb7c1477617b8..5017ba8fc5392 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -500,11 +500,6 @@ typedef unsigned int blk_qc_t; #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) -static inline bool blk_qc_t_valid(blk_qc_t cookie) -{ - return cookie != BLK_QC_T_NONE; -} - struct blk_rq_stat { u64 mean; u64 min; From ef99b2d37666b7a600baab9e1c4944436652b0a2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:19 +0200 Subject: [PATCH 074/205] block: replace the spin argument to blk_iopoll with a flags argument Switch the boolean spin argument to blk_poll to passing a set of flags instead. This will allow to control polling behavior in a more fine grained way. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de [axboe: adapt to changed io_uring iopoll] Signed-off-by: Jens Axboe --- block/blk-exec.c | 2 +- block/blk-mq.c | 17 +++++++---------- block/fops.c | 8 ++++---- fs/io_uring.c | 9 +++++---- fs/iomap/direct-io.c | 6 +++--- include/linux/blkdev.h | 4 +++- include/linux/fs.h | 2 +- include/linux/iomap.h | 2 +- mm/page_io.c | 2 +- 9 files changed, 26 insertions(+), 26 deletions(-) diff --git a/block/blk-exec.c b/block/blk-exec.c index d6cd501c0d348..1fa7f25e57262 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq) static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { - blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true); + blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0); cond_resched(); } while (!completion_done(wait)); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 7d0d947921a6d..6609e10657a83 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) } static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, - bool spin) + unsigned int flags) { struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); long state = get_current_state(); @@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, if (task_is_running(current)) return 1; - if (ret < 0 || !spin) + if (ret < 0 || (flags & BLK_POLL_ONESHOT)) break; cpu_relax(); } while (!need_resched()); @@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, * blk_poll - poll for IO completions * @q: the queue * @cookie: cookie passed back at IO submission time - * @spin: whether to spin for completions + * @flags: BLK_POLL_* flags that control the behavior * * Description: * Poll for completions on the passed in queue. Returns number of - * completed entries found. If @spin is true, then blk_poll will continue - * looping until at least one completion is found, unless the task is - * otherwise marked running (or we need to reschedule). + * completed entries found. */ -int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) +int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) { if (cookie == BLK_QC_T_NONE || !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) @@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) if (current->plug) blk_flush_plug_list(current->plug, false); - /* If specified not to spin, we also should not sleep. */ - if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } - return blk_mq_poll_classic(q, cookie, spin); + return blk_mq_poll_classic(q, cookie, flags); } EXPORT_SYMBOL_GPL(blk_poll); diff --git a/block/fops.c b/block/fops.c index 15324f2e5a914..db8f2fe68dd27 100644 --- a/block/fops.c +++ b/block/fops.c @@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, if (!READ_ONCE(bio.bi_private)) break; if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, true)) + !blk_poll(bdev_get_queue(bdev), qc, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -141,12 +141,12 @@ struct blkdev_dio { static struct bio_set blkdev_dio_pool; -static int blkdev_iopoll(struct kiocb *kiocb, bool wait) +static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags) { struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); struct request_queue *q = bdev_get_queue(bdev); - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); + return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); } static void blkdev_bio_end_io(struct bio *bio) @@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true)) + if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/fs/io_uring.c b/fs/io_uring.c index d2e86788c8720..541fec2bd49ad 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { struct io_kiocb *req, *tmp; + unsigned int poll_flags = 0; LIST_HEAD(done); - bool spin; /* * Only spin for completions if we don't have multiple devices hanging * off our complete list, and we're under the requested amount. */ - spin = !ctx->poll_multi_queue && *nr_events < min; + if (ctx->poll_multi_queue || *nr_events >= min) + poll_flags |= BLK_POLL_ONESHOT; list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; @@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin); + ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) - spin = false; + poll_flags |= BLK_POLL_ONESHOT; /* iopoll may have completed current req */ if (READ_ONCE(req->iopoll_completed)) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 560ae967f70e8..236aba256cd17 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -49,13 +49,13 @@ struct iomap_dio { }; }; -int iomap_dio_iopoll(struct kiocb *kiocb, bool spin) +int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags) { struct request_queue *q = READ_ONCE(kiocb->private); if (!q) return 0; - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); + return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); } EXPORT_SYMBOL_GPL(iomap_dio_iopoll); @@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (!(iocb->ki_flags & IOCB_HIPRI) || !dio->submit.last_queue || !blk_poll(dio->submit.last_queue, - dio->submit.cookie, true)) + dio->submit.cookie, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 17705c970d7e1..e177346bc0208 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -564,7 +564,9 @@ extern const char *blk_op_str(unsigned int op); int blk_status_to_errno(blk_status_t status); blk_status_t errno_to_blk_status(int errno); -int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); +/* only poll the hardware once, don't continue until a completion was found */ +#define BLK_POLL_ONESHOT (1 << 0) +int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { diff --git a/include/linux/fs.h b/include/linux/fs.h index e7a633353fd20..c443cddf414fd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2075,7 +2075,7 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); - int (*iopoll)(struct kiocb *kiocb, bool spin); + int (*iopoll)(struct kiocb *kiocb, unsigned int flags); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 24f8489583ca7..1e86b65567c21 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -337,7 +337,7 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags); ssize_t iomap_dio_complete(struct iomap_dio *dio); -int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); +int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags); #ifdef CONFIG_SWAP struct file; diff --git a/mm/page_io.c b/mm/page_io.c index c493ce9ebcf50..5d5543fcefa4e 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -428,7 +428,7 @@ int swap_readpage(struct page *page, bool synchronous) if (!READ_ONCE(bio->bi_private)) break; - if (!blk_poll(disk->queue, qc, true)) + if (!blk_poll(disk->queue, qc, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From d729cf9acb9311956c8a37113dcfa0160a2d9665 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:20 +0200 Subject: [PATCH 075/205] io_uring: don't sleep when polling for I/O There is no point in sleeping for the expected I/O completion timeout in the io_uring async polling model as we never poll for a specific I/O. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-11-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 ++- fs/io_uring.c | 2 +- include/linux/blkdev.h | 2 ++ 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 6609e10657a83..97c24e461d0aa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4103,7 +4103,8 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) if (current->plug) blk_flush_plug_list(current->plug, false); - if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) { + if (!(flags & BLK_POLL_NOSLEEP) && + q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } diff --git a/fs/io_uring.c b/fs/io_uring.c index 541fec2bd49ad..c5066146b8de3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2457,7 +2457,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { struct io_kiocb *req, *tmp; - unsigned int poll_flags = 0; + unsigned int poll_flags = BLK_POLL_NOSLEEP; LIST_HEAD(done); /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e177346bc0208..2b80c98fc373e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -566,6 +566,8 @@ blk_status_t errno_to_blk_status(int errno); /* only poll the hardware once, don't continue until a completion was found */ #define BLK_POLL_ONESHOT (1 << 0) +/* do not sleep to wait for the expected completion time */ +#define BLK_POLL_NOSLEEP (1 << 1) int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) From 6ce913fe3eee14f40f778e85999c9e599dda8c6b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:21 +0200 Subject: [PATCH 076/205] block: rename REQ_HIPRI to REQ_POLLED Unlike the RWF_HIPRI userspace ABI which is intentionally kept vague, the bio flag is specific to the polling implementation, so rename and document it properly. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Chaitanya Kulkarni Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-12-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-merge.c | 3 +-- block/blk-mq-debugfs.c | 2 +- block/blk-mq.c | 4 ++-- block/blk-mq.h | 4 ++-- block/blk.h | 4 ++-- drivers/nvme/host/core.c | 2 +- drivers/scsi/scsi_debug.c | 10 +++++----- include/linux/bio.h | 2 +- include/linux/blk_types.h | 4 ++-- mm/page_io.c | 2 +- 11 files changed, 19 insertions(+), 20 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 80fa7aa394c7e..8eb0e08d5395c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -842,7 +842,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) } if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - bio_clear_hipri(bio); + bio_clear_polled(bio); switch (bio_op(bio)) { case REQ_OP_DISCARD: diff --git a/block/blk-merge.c b/block/blk-merge.c index 9b77b4d6c2a1f..f88d7863f997c 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -318,8 +318,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, * iopoll in direct IO routine. Given performance gain of iopoll for * big IO can be trival, disable iopoll when split needed. */ - bio_clear_hipri(bio); - + bio_clear_polled(bio); return bio_split(bio, sectors, GFP_NOIO, bs); } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 3daea160d670e..409a8256d9ffa 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -287,7 +287,7 @@ static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(NOWAIT), CMD_FLAG_NAME(NOUNMAP), - CMD_FLAG_NAME(HIPRI), + CMD_FLAG_NAME(POLLED), }; #undef CMD_FLAG_NAME diff --git a/block/blk-mq.c b/block/blk-mq.c index 97c24e461d0aa..a34ffcf861c31 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -732,7 +732,7 @@ bool blk_mq_complete_request_remote(struct request *rq) * For a polled request, always complete locallly, it's pointless * to redirect the completion. */ - if (rq->cmd_flags & REQ_HIPRI) + if (rq->cmd_flags & REQ_POLLED) return false; if (blk_mq_complete_need_ipi(rq)) { @@ -2278,7 +2278,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_throttle(q, bio); - hipri = bio->bi_opf & REQ_HIPRI; + hipri = bio->bi_opf & REQ_POLLED; plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { diff --git a/block/blk-mq.h b/block/blk-mq.h index 5da970bb88659..a9fe01e149516 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -106,9 +106,9 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, enum hctx_type type = HCTX_TYPE_DEFAULT; /* - * The caller ensure that if REQ_HIPRI, poll must be enabled. + * The caller ensure that if REQ_POLLED, poll must be enabled. */ - if (flags & REQ_HIPRI) + if (flags & REQ_POLLED) type = HCTX_TYPE_POLL; else if ((flags & REQ_OP_MASK) == REQ_OP_READ) type = HCTX_TYPE_READ; diff --git a/block/blk.h b/block/blk.h index cab8d659d8a69..fa05d3f07976f 100644 --- a/block/blk.h +++ b/block/blk.h @@ -416,11 +416,11 @@ extern struct device_attribute dev_attr_events; extern struct device_attribute dev_attr_events_async; extern struct device_attribute dev_attr_events_poll_msecs; -static inline void bio_clear_hipri(struct bio *bio) +static inline void bio_clear_polled(struct bio *bio) { /* can't support alloc cache if we turn off polling */ bio_clear_flag(bio, BIO_PERCPU_CACHE); - bio->bi_opf &= ~REQ_HIPRI; + bio->bi_opf &= ~REQ_POLLED; } long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 3d444b13cd697..ae15cb7145963 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -632,7 +632,7 @@ static inline void nvme_init_request(struct request *req, req->cmd_flags |= REQ_FAILFAST_DRIVER; if (req->mq_hctx->type == HCTX_TYPE_POLL) - req->cmd_flags |= REQ_HIPRI; + req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 66f507469a31a..40b473eea357f 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, { bool new_sd_dp; bool inject = false; - bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI; + bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED; int k, num_in_q, qdepth; unsigned long iflags; u64 ns_from_boot = 0; @@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (sdebug_host_max_queue) sd_dp->hc_idx = get_tag(cmnd); - if (hipri) + if (polled) ns_from_boot = ktime_get_boottime_ns(); /* one of the resp_*() response functions is called here */ @@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, kt -= d; } } - if (hipri) { + if (polled) { sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt); spin_lock_irqsave(&sqp->qc_lock, iflags); if (!sd_dp->init_poll) { @@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) && atomic_read(&sdeb_inject_pending))) sd_dp->aborted = true; - if (hipri) { + if (polled) { sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot); spin_lock_irqsave(&sqp->qc_lock, iflags); if (!sd_dp->init_poll) { @@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) if (kt_from_boot < sd_dp->cmpl_ts) continue; - } else /* ignoring non REQ_HIPRI requests */ + } else /* ignoring non REQ_POLLED requests */ continue; devip = (struct sdebug_dev_info *)scp->device->hostdata; if (likely(devip)) diff --git a/include/linux/bio.h b/include/linux/bio.h index d8d27742a75f9..c7a2d880e927d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -706,7 +706,7 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, */ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) { - bio->bi_opf |= REQ_HIPRI; + bio->bi_opf |= REQ_POLLED; if (!is_sync_kiocb(kiocb)) bio->bi_opf |= REQ_NOWAIT; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5017ba8fc5392..f8b9fce688346 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -384,7 +384,7 @@ enum req_flag_bits { /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ - __REQ_HIPRI, + __REQ_POLLED, /* caller polls for completion using blk_poll */ /* for driver use */ __REQ_DRV, @@ -409,7 +409,7 @@ enum req_flag_bits { #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) -#define REQ_HIPRI (1ULL << __REQ_HIPRI) +#define REQ_POLLED (1ULL << __REQ_POLLED) #define REQ_DRV (1ULL << __REQ_DRV) #define REQ_SWAP (1ULL << __REQ_SWAP) diff --git a/mm/page_io.c b/mm/page_io.c index 5d5543fcefa4e..ed2eded74f3ad 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -416,7 +416,7 @@ int swap_readpage(struct page *page, bool synchronous) * attempt to access it in the page fault retry time check. */ if (synchronous) { - bio->bi_opf |= REQ_HIPRI; + bio->bi_opf |= REQ_POLLED; get_task_struct(current); bio->bi_private = current; } From 1a7e76e4f130332b5d3b0c72c4f664e59deb1239 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:22 +0200 Subject: [PATCH 077/205] block: use SLAB_TYPESAFE_BY_RCU for the bio slab This flags ensures that the pages will not be reused for non-bio allocations before the end of an RCU grace period. With that we can safely use a RCU lookup for bio polling as long as we are fine with occasionally polling the wrong device. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-13-hch@lst.de Signed-off-by: Jens Axboe --- block/bio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/block/bio.c b/block/bio.c index d5120451c36a3..df45f4b996aca 100644 --- a/block/bio.c +++ b/block/bio.c @@ -87,7 +87,8 @@ static struct bio_slab *create_bio_slab(unsigned int size) snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); bslab->slab = kmem_cache_create(bslab->name, size, - ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL); + ARCH_KMALLOC_MINALIGN, + SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); if (!bslab->slab) goto fail_alloc_slab; From 19416123ab3e1348b3532347af221d8f60838431 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 12 Oct 2021 13:12:23 +0200 Subject: [PATCH 078/205] block: define 'struct bvec_iter' as packed 'struct bvec_iter' is embedded into 'struct bio', define it as packed so that we can get one extra 4bytes for other uses without expanding bio. 'struct bvec_iter' is often allocated on stack, so making it packed doesn't affect performance. Also I have run io_uring on both nvme/null_blk, and not observe performance effect in this way. Suggested-by: Christoph Hellwig Signed-off-by: Ming Lei Reviewed-by: Sagi Grimberg Reviewed-by: Hannes Reinecke Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-14-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bvec.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 0e9bdd42dafb6..35c25dff651a5 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -44,7 +44,7 @@ struct bvec_iter { unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ -}; +} __packed; struct bvec_iter_all { struct bio_vec bv; From 3e08773c3841e9db7a520908cc2b136a77d275ff Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:24 +0200 Subject: [PATCH 079/205] block: switch polling to be bio based Replace the blk_poll interface that requires the caller to keep a queue and cookie from the submissions with polling based on the bio. Polling for the bio itself leads to a few advantages: - the cookie construction can made entirely private in blk-mq.c - the caller does not need to remember the request_queue and cookie separately and thus sidesteps their lifetime issues - keeping the device and the cookie inside the bio allows to trivially support polling BIOs remapping by stacking drivers - a lot of code to propagate the cookie back up the submission path can be removed entirely. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-15-hch@lst.de Signed-off-by: Jens Axboe --- arch/m68k/emu/nfblock.c | 3 +- arch/xtensa/platforms/iss/simdisk.c | 3 +- block/bio.c | 1 + block/blk-core.c | 127 +++++++++++++++++++++------- block/blk-exec.c | 10 ++- block/blk-mq.c | 72 +++++----------- block/blk-mq.h | 2 + block/fops.c | 25 ++---- drivers/block/brd.c | 12 ++- drivers/block/drbd/drbd_int.h | 2 +- drivers/block/drbd/drbd_req.c | 3 +- drivers/block/n64cart.c | 12 ++- drivers/block/null_blk/main.c | 3 +- drivers/block/pktcdvd.c | 7 +- drivers/block/ps3vram.c | 6 +- drivers/block/rsxx/dev.c | 7 +- drivers/block/zram/zram_drv.c | 10 +-- drivers/md/bcache/request.c | 13 ++- drivers/md/bcache/request.h | 4 +- drivers/md/dm.c | 28 +++--- drivers/md/md.c | 10 +-- drivers/nvdimm/blk.c | 5 +- drivers/nvdimm/btt.c | 5 +- drivers/nvdimm/pmem.c | 3 +- drivers/nvme/host/multipath.c | 6 +- drivers/s390/block/dcssblk.c | 7 +- fs/btrfs/inode.c | 8 +- fs/ext4/file.c | 2 +- fs/gfs2/file.c | 4 +- fs/iomap/direct-io.c | 36 +++----- fs/xfs/xfs_file.c | 2 +- fs/zonefs/super.c | 2 +- include/linux/bio.h | 2 +- include/linux/blk-mq.h | 15 +--- include/linux/blk_types.h | 12 ++- include/linux/blkdev.h | 8 +- include/linux/fs.h | 6 +- include/linux/iomap.h | 5 +- mm/page_io.c | 8 +- 39 files changed, 232 insertions(+), 264 deletions(-) diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 9a8394e96388a..4ef457ba52209 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -58,7 +58,7 @@ struct nfhd_device { struct gendisk *disk; }; -static blk_qc_t nfhd_submit_bio(struct bio *bio) +static void nfhd_submit_bio(struct bio *bio) { struct nfhd_device *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; @@ -76,7 +76,6 @@ static blk_qc_t nfhd_submit_bio(struct bio *bio) sec += len; } bio_endio(bio); - return BLK_QC_T_NONE; } static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c index 3cdfa00738e07..ddd1fe3db4744 100644 --- a/arch/xtensa/platforms/iss/simdisk.c +++ b/arch/xtensa/platforms/iss/simdisk.c @@ -100,7 +100,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, spin_unlock(&dev->lock); } -static blk_qc_t simdisk_submit_bio(struct bio *bio) +static void simdisk_submit_bio(struct bio *bio) { struct simdisk *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; @@ -118,7 +118,6 @@ static blk_qc_t simdisk_submit_bio(struct bio *bio) } bio_endio(bio); - return BLK_QC_T_NONE; } static int simdisk_open(struct block_device *bdev, fmode_t mode) diff --git a/block/bio.c b/block/bio.c index df45f4b996aca..a3c9ff23a036f 100644 --- a/block/bio.c +++ b/block/bio.c @@ -282,6 +282,7 @@ void bio_init(struct bio *bio, struct bio_vec *table, atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_cnt, 1); + bio->bi_cookie = BLK_QC_T_NONE; bio->bi_max_vecs = max_vecs; bio->bi_io_vec = table; diff --git a/block/blk-core.c b/block/blk-core.c index 8eb0e08d5395c..f008c38ae9675 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -915,25 +915,22 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio) return false; } -static blk_qc_t __submit_bio(struct bio *bio) +static void __submit_bio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; - blk_qc_t ret = BLK_QC_T_NONE; if (unlikely(bio_queue_enter(bio) != 0)) - return BLK_QC_T_NONE; + return; if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio)) goto queue_exit; - if (disk->fops->submit_bio) { - ret = disk->fops->submit_bio(bio); - goto queue_exit; + if (!disk->fops->submit_bio) { + blk_mq_submit_bio(bio); + return; } - return blk_mq_submit_bio(bio); - + disk->fops->submit_bio(bio); queue_exit: blk_queue_exit(disk->queue); - return ret; } /* @@ -955,10 +952,9 @@ static blk_qc_t __submit_bio(struct bio *bio) * bio_list_on_stack[1] contains bios that were submitted before the current * ->submit_bio_bio, but that haven't been processed yet. */ -static blk_qc_t __submit_bio_noacct(struct bio *bio) +static void __submit_bio_noacct(struct bio *bio) { struct bio_list bio_list_on_stack[2]; - blk_qc_t ret = BLK_QC_T_NONE; BUG_ON(bio->bi_next); @@ -975,7 +971,7 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio) bio_list_on_stack[1] = bio_list_on_stack[0]; bio_list_init(&bio_list_on_stack[0]); - ret = __submit_bio(bio); + __submit_bio(bio); /* * Sort new bios into those for a lower level and those for the @@ -998,22 +994,19 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio) } while ((bio = bio_list_pop(&bio_list_on_stack[0]))); current->bio_list = NULL; - return ret; } -static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) +static void __submit_bio_noacct_mq(struct bio *bio) { struct bio_list bio_list[2] = { }; - blk_qc_t ret; current->bio_list = bio_list; do { - ret = __submit_bio(bio); + __submit_bio(bio); } while ((bio = bio_list_pop(&bio_list[0]))); current->bio_list = NULL; - return ret; } /** @@ -1025,7 +1018,7 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) * systems and other upper level users of the block layer should use * submit_bio() instead. */ -blk_qc_t submit_bio_noacct(struct bio *bio) +void submit_bio_noacct(struct bio *bio) { /* * We only want one ->submit_bio to be active at a time, else stack @@ -1033,14 +1026,12 @@ blk_qc_t submit_bio_noacct(struct bio *bio) * to collect a list of requests submited by a ->submit_bio method while * it is active, and then process them after it returned. */ - if (current->bio_list) { + if (current->bio_list) bio_list_add(¤t->bio_list[0], bio); - return BLK_QC_T_NONE; - } - - if (!bio->bi_bdev->bd_disk->fops->submit_bio) - return __submit_bio_noacct_mq(bio); - return __submit_bio_noacct(bio); + else if (!bio->bi_bdev->bd_disk->fops->submit_bio) + __submit_bio_noacct_mq(bio); + else + __submit_bio_noacct(bio); } EXPORT_SYMBOL(submit_bio_noacct); @@ -1057,10 +1048,10 @@ EXPORT_SYMBOL(submit_bio_noacct); * in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has * been called. */ -blk_qc_t submit_bio(struct bio *bio) +void submit_bio(struct bio *bio) { if (blkcg_punt_bio_submit(bio)) - return BLK_QC_T_NONE; + return; /* * If it's a regular read/write or a barrier with data attached, @@ -1092,19 +1083,91 @@ blk_qc_t submit_bio(struct bio *bio) if (unlikely(bio_op(bio) == REQ_OP_READ && bio_flagged(bio, BIO_WORKINGSET))) { unsigned long pflags; - blk_qc_t ret; psi_memstall_enter(&pflags); - ret = submit_bio_noacct(bio); + submit_bio_noacct(bio); psi_memstall_leave(&pflags); - - return ret; + return; } - return submit_bio_noacct(bio); + submit_bio_noacct(bio); } EXPORT_SYMBOL(submit_bio); +/** + * bio_poll - poll for BIO completions + * @bio: bio to poll for + * @flags: BLK_POLL_* flags that control the behavior + * + * Poll for completions on queue associated with the bio. Returns number of + * completed entries found. + * + * Note: the caller must either be the context that submitted @bio, or + * be in a RCU critical section to prevent freeing of @bio. + */ +int bio_poll(struct bio *bio, unsigned int flags) +{ + struct request_queue *q = bio->bi_bdev->bd_disk->queue; + blk_qc_t cookie = READ_ONCE(bio->bi_cookie); + int ret; + + if (cookie == BLK_QC_T_NONE || + !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + return 0; + + if (current->plug) + blk_flush_plug_list(current->plug, false); + + if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)) + return 0; + if (WARN_ON_ONCE(!queue_is_mq(q))) + ret = 0; /* not yet implemented, should not happen */ + else + ret = blk_mq_poll(q, cookie, flags); + blk_queue_exit(q); + return ret; +} +EXPORT_SYMBOL_GPL(bio_poll); + +/* + * Helper to implement file_operations.iopoll. Requires the bio to be stored + * in iocb->private, and cleared before freeing the bio. + */ +int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) +{ + struct bio *bio; + int ret = 0; + + /* + * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can + * point to a freshly allocated bio at this point. If that happens + * we have a few cases to consider: + * + * 1) the bio is beeing initialized and bi_bdev is NULL. We can just + * simply nothing in this case + * 2) the bio points to a not poll enabled device. bio_poll will catch + * this and return 0 + * 3) the bio points to a poll capable device, including but not + * limited to the one that the original bio pointed to. In this + * case we will call into the actual poll method and poll for I/O, + * even if we don't need to, but it won't cause harm either. + * + * For cases 2) and 3) above the RCU grace period ensures that bi_bdev + * is still allocated. Because partitions hold a reference to the whole + * device bdev and thus disk, the disk is also still valid. Grabbing + * a reference to the queue in bio_poll() ensures the hctxs and requests + * are still valid as well. + */ + rcu_read_lock(); + bio = READ_ONCE(kiocb->private); + if (bio && bio->bi_bdev) + ret = bio_poll(bio, flags); + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(iocb_bio_iopoll); + /** * blk_cloned_rq_check_limits - Helper function to check a cloned request * for the new queue limits diff --git a/block/blk-exec.c b/block/blk-exec.c index 1fa7f25e57262..55f0cd34b37b9 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,13 +65,19 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); static bool blk_rq_is_poll(struct request *rq) { - return rq->mq_hctx && rq->mq_hctx->type == HCTX_TYPE_POLL; + if (!rq->mq_hctx) + return false; + if (rq->mq_hctx->type != HCTX_TYPE_POLL) + return false; + if (WARN_ON_ONCE(!rq->bio)) + return false; + return true; } static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { - blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0); + bio_poll(rq->bio, 0); cond_resched(); } while (!completion_done(wait)); } diff --git a/block/blk-mq.c b/block/blk-mq.c index a34ffcf861c31..0860f622099f6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -65,6 +65,9 @@ static int blk_mq_poll_stats_bkt(const struct request *rq) return bucket; } +#define BLK_QC_T_SHIFT 16 +#define BLK_QC_T_INTERNAL (1U << 31) + static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, blk_qc_t qc) { @@ -81,6 +84,13 @@ static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx, return blk_mq_tag_to_rq(hctx->tags, tag); } +static inline blk_qc_t blk_rq_to_qc(struct request *rq) +{ + return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) | + (rq->tag != -1 ? + rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL)); +} + /* * Check if any of the ctx, dispatch list or elevator * have pending work in this hardware queue. @@ -819,6 +829,8 @@ void blk_mq_start_request(struct request *rq) if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) q->integrity.profile->prepare_fn(rq); #endif + if (rq->bio && rq->bio->bi_opf & REQ_POLLED) + WRITE_ONCE(rq->bio->bi_cookie, blk_rq_to_qc(rq)); } EXPORT_SYMBOL(blk_mq_start_request); @@ -2045,19 +2057,15 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, } static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, - blk_qc_t *cookie, bool last) + struct request *rq, bool last) { struct request_queue *q = rq->q; struct blk_mq_queue_data bd = { .rq = rq, .last = last, }; - blk_qc_t new_cookie; blk_status_t ret; - new_cookie = request_to_qc_t(hctx, rq); - /* * For OK queue, we are done. For error, caller may kill it. * Any other error (busy), just add it to our list as we @@ -2067,7 +2075,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, switch (ret) { case BLK_STS_OK: blk_mq_update_dispatch_busy(hctx, false); - *cookie = new_cookie; break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE: @@ -2076,7 +2083,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, break; default: blk_mq_update_dispatch_busy(hctx, false); - *cookie = BLK_QC_T_NONE; break; } @@ -2085,7 +2091,6 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, struct request *rq, - blk_qc_t *cookie, bool bypass_insert, bool last) { struct request_queue *q = rq->q; @@ -2119,7 +2124,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, goto insert; } - return __blk_mq_issue_directly(hctx, rq, cookie, last); + return __blk_mq_issue_directly(hctx, rq, last); insert: if (bypass_insert) return BLK_STS_RESOURCE; @@ -2133,7 +2138,6 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, * blk_mq_try_issue_directly - Try to send a request directly to device driver. * @hctx: Pointer of the associated hardware queue. * @rq: Pointer to request to be sent. - * @cookie: Request queue cookie. * * If the device has enough resources to accept a new request now, send the * request directly to device driver. Else, insert at hctx->dispatch queue, so @@ -2141,7 +2145,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, * queue have higher priority. */ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, - struct request *rq, blk_qc_t *cookie) + struct request *rq) { blk_status_t ret; int srcu_idx; @@ -2150,7 +2154,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); + ret = __blk_mq_try_issue_directly(hctx, rq, false, true); if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) blk_mq_request_bypass_insert(rq, false, true); else if (ret != BLK_STS_OK) @@ -2163,11 +2167,10 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) { blk_status_t ret; int srcu_idx; - blk_qc_t unused_cookie; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; hctx_lock(hctx, &srcu_idx); - ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); + ret = __blk_mq_try_issue_directly(hctx, rq, true, last); hctx_unlock(hctx, srcu_idx); return ret; @@ -2247,10 +2250,8 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) * * It will not queue the request if there is an error with the bio, or at the * request creation. - * - * Returns: Request queue cookie. */ -blk_qc_t blk_mq_submit_bio(struct bio *bio) +void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; const int is_sync = op_is_sync(bio->bi_opf); @@ -2259,9 +2260,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) struct blk_plug *plug; struct request *same_queue_rq = NULL; unsigned int nr_segs; - blk_qc_t cookie; blk_status_t ret; - bool hipri; blk_queue_bounce(q, &bio); __blk_queue_split(&bio, &nr_segs); @@ -2278,8 +2277,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_throttle(q, bio); - hipri = bio->bi_opf & REQ_POLLED; - plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { rq = plug->cached_rq; @@ -2310,8 +2307,6 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) rq_qos_track(q, rq, bio); - cookie = request_to_qc_t(rq->mq_hctx, rq); - blk_mq_bio_to_request(rq, bio, nr_segs); ret = blk_crypto_init_request(rq); @@ -2319,7 +2314,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) bio->bi_status = ret; bio_endio(bio); blk_mq_free_request(rq); - return BLK_QC_T_NONE; + return; } if (unlikely(is_flush_fua)) { @@ -2375,7 +2370,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) if (same_queue_rq) { trace_block_unplug(q, 1, true); blk_mq_try_issue_directly(same_queue_rq->mq_hctx, - same_queue_rq, &cookie); + same_queue_rq); } } else if ((q->nr_hw_queues > 1 && is_sync) || !rq->mq_hctx->dispatch_busy) { @@ -2383,18 +2378,15 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) * There is no scheduler and we can try to send directly * to the hardware. */ - blk_mq_try_issue_directly(rq->mq_hctx, rq, &cookie); + blk_mq_try_issue_directly(rq->mq_hctx, rq); } else { /* Default case. */ blk_mq_sched_insert_request(rq, false, true, true); } - if (!hipri) - return BLK_QC_T_NONE; - return cookie; + return; queue_exit: blk_queue_exit(q); - return BLK_QC_T_NONE; } static size_t order_to_size(unsigned int order) @@ -4084,25 +4076,8 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, return 0; } -/** - * blk_poll - poll for IO completions - * @q: the queue - * @cookie: cookie passed back at IO submission time - * @flags: BLK_POLL_* flags that control the behavior - * - * Description: - * Poll for completions on the passed in queue. Returns number of - * completed entries found. - */ -int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) { - if (cookie == BLK_QC_T_NONE || - !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) - return 0; - - if (current->plug) - blk_flush_plug_list(current->plug, false); - if (!(flags & BLK_POLL_NOSLEEP) && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) @@ -4110,7 +4085,6 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) } return blk_mq_poll_classic(q, cookie, flags); } -EXPORT_SYMBOL_GPL(blk_poll); unsigned int blk_mq_rq_cpu(struct request *rq) { diff --git a/block/blk-mq.h b/block/blk-mq.h index a9fe01e149516..8be447995106c 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -37,6 +37,8 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; +void blk_mq_submit_bio(struct bio *bio); +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); diff --git a/block/fops.c b/block/fops.c index db8f2fe68dd27..ce1255529ba2f 100644 --- a/block/fops.c +++ b/block/fops.c @@ -61,7 +61,6 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, bool should_dirty = false; struct bio bio; ssize_t ret; - blk_qc_t qc; if ((pos | iov_iter_alignment(iter)) & (bdev_logical_block_size(bdev) - 1)) @@ -102,13 +101,12 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, if (iocb->ki_flags & IOCB_HIPRI) bio_set_polled(&bio, iocb); - qc = submit_bio(&bio); + submit_bio(&bio); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio.bi_private)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !blk_poll(bdev_get_queue(bdev), qc, 0)) + if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -141,14 +139,6 @@ struct blkdev_dio { static struct bio_set blkdev_dio_pool; -static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags) -{ - struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); - struct request_queue *q = bdev_get_queue(bdev); - - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); -} - static void blkdev_bio_end_io(struct bio *bio) { struct blkdev_dio *dio = bio->bi_private; @@ -162,6 +152,8 @@ static void blkdev_bio_end_io(struct bio *bio) struct kiocb *iocb = dio->iocb; ssize_t ret; + WRITE_ONCE(iocb->private, NULL); + if (likely(!dio->bio.bi_status)) { ret = dio->size; iocb->ki_pos += ret; @@ -200,7 +192,6 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bool do_poll = (iocb->ki_flags & IOCB_HIPRI); bool is_read = (iov_iter_rw(iter) == READ), is_sync; loff_t pos = iocb->ki_pos; - blk_qc_t qc = BLK_QC_T_NONE; int ret = 0; if ((pos | iov_iter_alignment(iter)) & @@ -262,9 +253,9 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!nr_pages) { if (do_poll) bio_set_polled(bio, iocb); - qc = submit_bio(bio); + submit_bio(bio); if (do_poll) - WRITE_ONCE(iocb->ki_cookie, qc); + WRITE_ONCE(iocb->private, bio); break; } if (!dio->multi_bio) { @@ -297,7 +288,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0)) + if (!do_poll || !bio_poll(bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -594,7 +585,7 @@ const struct file_operations def_blk_fops = { .llseek = blkdev_llseek, .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, - .iopoll = blkdev_iopoll, + .iopoll = iocb_bio_iopoll, .mmap = generic_file_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = blkdev_ioctl, diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 530b312402031..aa0472718dcec 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -282,7 +282,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, return err; } -static blk_qc_t brd_submit_bio(struct bio *bio) +static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; sector_t sector = bio->bi_iter.bi_sector; @@ -299,16 +299,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio) err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, bio_op(bio), sector); - if (err) - goto io_error; + if (err) { + bio_io_error(bio); + return; + } sector += len >> SECTOR_SHIFT; } bio_endio(bio); - return BLK_QC_T_NONE; -io_error: - bio_io_error(bio); - return BLK_QC_T_NONE; } static int brd_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 5d9181382ce19..6674a0b883415 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1448,7 +1448,7 @@ extern void conn_free_crypto(struct drbd_connection *connection); /* drbd_req */ extern void do_submit(struct work_struct *ws); extern void __drbd_make_request(struct drbd_device *, struct bio *); -extern blk_qc_t drbd_submit_bio(struct bio *bio); +void drbd_submit_bio(struct bio *bio); extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req); extern int is_valid_ar_handle(struct drbd_request *, sector_t); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5ca233644d705..3235532ae0778 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1596,7 +1596,7 @@ void do_submit(struct work_struct *ws) } } -blk_qc_t drbd_submit_bio(struct bio *bio) +void drbd_submit_bio(struct bio *bio) { struct drbd_device *device = bio->bi_bdev->bd_disk->private_data; @@ -1609,7 +1609,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio) inc_ap_bio(device); __drbd_make_request(device, bio); - return BLK_QC_T_NONE; } static bool net_timeout_reached(struct drbd_request *net_req, diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c index 26798da661bd4..b168ca25b6c90 100644 --- a/drivers/block/n64cart.c +++ b/drivers/block/n64cart.c @@ -84,7 +84,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos) return true; } -static blk_qc_t n64cart_submit_bio(struct bio *bio) +static void n64cart_submit_bio(struct bio *bio) { struct bio_vec bvec; struct bvec_iter iter; @@ -92,16 +92,14 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio) u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT; bio_for_each_segment(bvec, bio, iter) { - if (!n64cart_do_bvec(dev, &bvec, pos)) - goto io_error; + if (!n64cart_do_bvec(dev, &bvec, pos)) { + bio_io_error(bio); + return; + } pos += bvec.bv_len; } bio_endio(bio); - return BLK_QC_T_NONE; -io_error: - bio_io_error(bio); - return BLK_QC_T_NONE; } static const struct block_device_operations n64cart_fops = { diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 187d779c8ca08..e5cbcf5822333 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1422,7 +1422,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb) return &nullb->queues[index]; } -static blk_qc_t null_submit_bio(struct bio *bio) +static void null_submit_bio(struct bio *bio) { sector_t sector = bio->bi_iter.bi_sector; sector_t nr_sectors = bio_sectors(bio); @@ -1434,7 +1434,6 @@ static blk_qc_t null_submit_bio(struct bio *bio) cmd->bio = bio; null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio)); - return BLK_QC_T_NONE; } static bool should_timeout_request(struct request *rq) diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 0f26b2510a756..cb52cce6fb039 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2400,7 +2400,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio) } } -static blk_qc_t pkt_submit_bio(struct bio *bio) +static void pkt_submit_bio(struct bio *bio) { struct pktcdvd_device *pd; char b[BDEVNAME_SIZE]; @@ -2423,7 +2423,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio) */ if (bio_data_dir(bio) == READ) { pkt_make_request_read(pd, bio); - return BLK_QC_T_NONE; + return; } if (!test_bit(PACKET_WRITABLE, &pd->flags)) { @@ -2455,10 +2455,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio) pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); } while (split != bio); - return BLK_QC_T_NONE; + return; end_io: bio_io_error(bio); - return BLK_QC_T_NONE; } static void pkt_init_queue(struct pktcdvd_device *pd) diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index c7b19e128b03c..d1ebf193cb9ab 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c @@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev, return next; } -static blk_qc_t ps3vram_submit_bio(struct bio *bio) +static void ps3vram_submit_bio(struct bio *bio) { struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data; struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); @@ -594,13 +594,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio) spin_unlock_irq(&priv->lock); if (busy) - return BLK_QC_T_NONE; + return; do { bio = ps3vram_do_bio(dev, bio); } while (bio); - - return BLK_QC_T_NONE; } static const struct block_device_operations ps3vram_fops = { diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 1cc40b0ea7619..268252380e88f 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -50,7 +50,7 @@ struct rsxx_bio_meta { static struct kmem_cache *bio_meta_pool; -static blk_qc_t rsxx_submit_bio(struct bio *bio); +static void rsxx_submit_bio(struct bio *bio); /*----------------- Block Device Operations -----------------*/ static int rsxx_blkdev_ioctl(struct block_device *bdev, @@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, } } -static blk_qc_t rsxx_submit_bio(struct bio *bio) +static void rsxx_submit_bio(struct bio *bio) { struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data; struct rsxx_bio_meta *bio_meta; @@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio) if (st) goto queue_err; - return BLK_QC_T_NONE; + return; queue_err: kmem_cache_free(bio_meta_pool, bio_meta); @@ -177,7 +177,6 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio) if (st) bio->bi_status = st; bio_endio(bio); - return BLK_QC_T_NONE; } /*----------------- Device Setup -------------------*/ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index fcaf2750f68f7..a68297fb51a2f 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1598,22 +1598,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) /* * Handler function for all zram I/O requests. */ -static blk_qc_t zram_submit_bio(struct bio *bio) +static void zram_submit_bio(struct bio *bio) { struct zram *zram = bio->bi_bdev->bd_disk->private_data; if (!valid_io_request(zram, bio->bi_iter.bi_sector, bio->bi_iter.bi_size)) { atomic64_inc(&zram->stats.invalid_io); - goto error; + bio_io_error(bio); + return; } __zram_make_request(zram, bio); - return BLK_QC_T_NONE; - -error: - bio_io_error(bio); - return BLK_QC_T_NONE; } static void zram_slot_free_notify(struct block_device *bdev, diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 6d1de889baeb1..23b28edae90fd 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1163,7 +1163,7 @@ static void quit_max_writeback_rate(struct cache_set *c, /* Cached devices - read & write stuff */ -blk_qc_t cached_dev_submit_bio(struct bio *bio) +void cached_dev_submit_bio(struct bio *bio) { struct search *s; struct block_device *orig_bdev = bio->bi_bdev; @@ -1176,7 +1176,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) dc->io_disable)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } if (likely(d->c)) { @@ -1222,8 +1222,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) } else /* I/O request sent to backing device */ detached_dev_do_request(d, bio, orig_bdev, start_time); - - return BLK_QC_T_NONE; } static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, @@ -1273,7 +1271,7 @@ static void flash_dev_nodata(struct closure *cl) continue_at(cl, search_free, NULL); } -blk_qc_t flash_dev_submit_bio(struct bio *bio) +void flash_dev_submit_bio(struct bio *bio) { struct search *s; struct closure *cl; @@ -1282,7 +1280,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); @@ -1298,7 +1296,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) continue_at_nobarrier(&s->cl, flash_dev_nodata, bcache_wq); - return BLK_QC_T_NONE; + return; } else if (bio_data_dir(bio)) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_iter.bi_sector, 0), @@ -1314,7 +1312,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) } continue_at(cl, search_free, NULL); - return BLK_QC_T_NONE; } static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 82b38366a95de..38ab4856eaab0 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c); void bch_data_insert(struct closure *cl); void bch_cached_dev_request_init(struct cached_dev *dc); -blk_qc_t cached_dev_submit_bio(struct bio *bio); +void cached_dev_submit_bio(struct bio *bio); void bch_flash_dev_request_init(struct bcache_device *d); -blk_qc_t flash_dev_submit_bio(struct bio *bio); +void flash_dev_submit_bio(struct bio *bio); extern struct kmem_cache *bch_search_cache; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 76d9da49fda75..7870e6460633f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1183,14 +1183,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) mutex_unlock(&md->swap_bios_lock); } -static blk_qc_t __map_bio(struct dm_target_io *tio) +static void __map_bio(struct dm_target_io *tio) { int r; sector_t sector; struct bio *clone = &tio->clone; struct dm_io *io = tio->io; struct dm_target *ti = tio->ti; - blk_qc_t ret = BLK_QC_T_NONE; clone->bi_end_io = clone_endio; @@ -1226,7 +1225,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) case DM_MAPIO_REMAPPED: /* the bio has been remapped so dispatch it */ trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector); - ret = submit_bio_noacct(clone); + submit_bio_noacct(clone); break; case DM_MAPIO_KILL: if (unlikely(swap_bios_limit(ti, clone))) { @@ -1248,8 +1247,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) DMWARN("unimplemented target map return value: %d", r); BUG(); } - - return ret; } static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) @@ -1336,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, } } -static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, +static void __clone_and_map_simple_bio(struct clone_info *ci, struct dm_target_io *tio, unsigned *len) { struct bio *clone = &tio->clone; @@ -1346,8 +1343,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci, __bio_clone_fast(clone, ci->bio); if (len) bio_setup_sector(clone, ci->sector, *len); - - return __map_bio(tio); + __map_bio(tio); } static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, @@ -1361,7 +1357,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, while ((bio = bio_list_pop(&blist))) { tio = container_of(bio, struct dm_target_io, clone); - (void) __clone_and_map_simple_bio(ci, tio, len); + __clone_and_map_simple_bio(ci, tio, len); } } @@ -1405,7 +1401,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, free_tio(tio); return r; } - (void) __map_bio(tio); + __map_bio(tio); return 0; } @@ -1520,11 +1516,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md, /* * Entry point to split a bio into clones and submit them to the targets. */ -static blk_qc_t __split_and_process_bio(struct mapped_device *md, +static void __split_and_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { struct clone_info ci; - blk_qc_t ret = BLK_QC_T_NONE; int error = 0; init_clone_info(&ci, md, map, bio); @@ -1567,19 +1562,17 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, bio_chain(b, bio); trace_block_split(b, bio->bi_iter.bi_sector); - ret = submit_bio_noacct(bio); + submit_bio_noacct(bio); } } /* drop the extra reference count */ dm_io_dec_pending(ci.io, errno_to_blk_status(error)); - return ret; } -static blk_qc_t dm_submit_bio(struct bio *bio) +static void dm_submit_bio(struct bio *bio) { struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; - blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; struct dm_table *map; @@ -1609,10 +1602,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio) if (is_abnormal_io(bio)) blk_queue_split(&bio); - ret = __split_and_process_bio(md, map, bio); + __split_and_process_bio(md, map, bio); out: dm_put_live_table(md, srcu_idx); - return ret; } /*----------------------------------------------------------------- diff --git a/drivers/md/md.c b/drivers/md/md.c index ec09083ff0eff..22310d5d8d418 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -443,19 +443,19 @@ void md_handle_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_handle_request); -static blk_qc_t md_submit_bio(struct bio *bio) +static void md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); - return BLK_QC_T_NONE; + return; } if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); - return BLK_QC_T_NONE; + return; } blk_queue_split(&bio); @@ -464,15 +464,13 @@ static blk_qc_t md_submit_bio(struct bio *bio) if (bio_sectors(bio) != 0) bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; md_handle_request(mddev, bio); - - return BLK_QC_T_NONE; } /* mddev_suspend makes sure no new requests are submitted diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 088d3dd6f6fac..b6c6866f92592 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk, return err; } -static blk_qc_t nd_blk_submit_bio(struct bio *bio) +static void nd_blk_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip; struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data; @@ -173,7 +173,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio) bool do_acct; if (!bio_integrity_prep(bio)) - return BLK_QC_T_NONE; + return; bip = bio_integrity(bio); rw = bio_data_dir(bio); @@ -199,7 +199,6 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio) bio_end_io_acct(bio, start); bio_endio(bio); - return BLK_QC_T_NONE; } static int nsblk_rw_bytes(struct nd_namespace_common *ndns, diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 92dec49522972..4295fa8094209 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1440,7 +1440,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, return ret; } -static blk_qc_t btt_submit_bio(struct bio *bio) +static void btt_submit_bio(struct bio *bio) { struct bio_integrity_payload *bip = bio_integrity(bio); struct btt *btt = bio->bi_bdev->bd_disk->private_data; @@ -1451,7 +1451,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio) bool do_acct; if (!bio_integrity_prep(bio)) - return BLK_QC_T_NONE; + return; do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); if (do_acct) @@ -1483,7 +1483,6 @@ static blk_qc_t btt_submit_bio(struct bio *bio) bio_end_io_acct(bio, start); bio_endio(bio); - return BLK_QC_T_NONE; } static int btt_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index ef4950f808326..a67a3ad1d4137 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -190,7 +190,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, return rc; } -static blk_qc_t pmem_submit_bio(struct bio *bio) +static void pmem_submit_bio(struct bio *bio) { int ret = 0; blk_status_t rc = 0; @@ -229,7 +229,6 @@ static blk_qc_t pmem_submit_bio(struct bio *bio) bio->bi_status = errno_to_blk_status(ret); bio_endio(bio); - return BLK_QC_T_NONE; } static int pmem_rw_page(struct block_device *bdev, sector_t sector, diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index fba06618c6c23..ab78aa5d28c6e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -312,12 +312,11 @@ static bool nvme_available_path(struct nvme_ns_head *head) return false; } -static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) +static void nvme_ns_head_submit_bio(struct bio *bio) { struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; struct device *dev = disk_to_dev(head->disk); struct nvme_ns *ns; - blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; /* @@ -334,7 +333,7 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) bio->bi_opf |= REQ_NVME_MPATH; trace_block_bio_remap(bio, disk_devt(ns->head->disk), bio->bi_iter.bi_sector); - ret = submit_bio_noacct(bio); + submit_bio_noacct(bio); } else if (nvme_available_path(head)) { dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); @@ -349,7 +348,6 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) } srcu_read_unlock(&head->srcu, srcu_idx); - return ret; } static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 5be3d1c39a78e..59e513d34b0f2 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -30,7 +30,7 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); -static blk_qc_t dcssblk_submit_bio(struct bio *bio); +static void dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); @@ -854,7 +854,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) up_write(&dcssblk_devices_sem); } -static blk_qc_t +static void dcssblk_submit_bio(struct bio *bio) { struct dcssblk_dev_info *dev_info; @@ -907,10 +907,9 @@ dcssblk_submit_bio(struct bio *bio) bytes_done += bvec.bv_len; } bio_endio(bio); - return BLK_QC_T_NONE; + return; fail: bio_io_error(bio); - return BLK_QC_T_NONE; } static long diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4a9077c524448..04090ba0ef737 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8248,7 +8248,7 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, return dip; } -static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, +static void btrfs_submit_direct(const struct iomap_iter *iter, struct bio *dio_bio, loff_t file_offset) { struct inode *inode = iter->inode; @@ -8278,7 +8278,7 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, } dio_bio->bi_status = BLK_STS_RESOURCE; bio_endio(dio_bio); - return BLK_QC_T_NONE; + return; } if (!write) { @@ -8372,15 +8372,13 @@ static blk_qc_t btrfs_submit_direct(const struct iomap_iter *iter, free_extent_map(em); } while (submit_len > 0); - return BLK_QC_T_NONE; + return; out_err_em: free_extent_map(em); out_err: dip->dio_bio->bi_status = status; btrfs_dio_private_put(dip); - - return BLK_QC_T_NONE; } const struct iomap_ops btrfs_dio_iomap_ops = { diff --git a/fs/ext4/file.c b/fs/ext4/file.c index ac0e11bbb4450..9c5559faacdaa 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -915,7 +915,7 @@ const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, .read_iter = ext4_file_read_iter, .write_iter = ext4_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index c559827cb6f91..635f0e3f10ec7 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1353,7 +1353,7 @@ const struct file_operations gfs2_file_fops = { .llseek = gfs2_llseek, .read_iter = gfs2_file_read_iter, .write_iter = gfs2_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = gfs2_ioctl, .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, @@ -1386,7 +1386,7 @@ const struct file_operations gfs2_file_fops_nolock = { .llseek = gfs2_llseek, .read_iter = gfs2_file_read_iter, .write_iter = gfs2_file_write_iter, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = gfs2_ioctl, .compat_ioctl = gfs2_compat_ioctl, .mmap = gfs2_mmap, diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 236aba256cd17..8efab177011dd 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -38,8 +38,7 @@ struct iomap_dio { struct { struct iov_iter *iter; struct task_struct *waiter; - struct request_queue *last_queue; - blk_qc_t cookie; + struct bio *poll_bio; } submit; /* used for aio completion: */ @@ -49,29 +48,20 @@ struct iomap_dio { }; }; -int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags) -{ - struct request_queue *q = READ_ONCE(kiocb->private); - - if (!q) - return 0; - return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags); -} -EXPORT_SYMBOL_GPL(iomap_dio_iopoll); - static void iomap_dio_submit_bio(const struct iomap_iter *iter, struct iomap_dio *dio, struct bio *bio, loff_t pos) { atomic_inc(&dio->ref); - if (dio->iocb->ki_flags & IOCB_HIPRI) + if (dio->iocb->ki_flags & IOCB_HIPRI) { bio_set_polled(bio, dio->iocb); + dio->submit.poll_bio = bio; + } - dio->submit.last_queue = bdev_get_queue(iter->iomap.bdev); if (dio->dops && dio->dops->submit_io) - dio->submit.cookie = dio->dops->submit_io(iter, bio, pos); + dio->dops->submit_io(iter, bio, pos); else - dio->submit.cookie = submit_bio(bio); + submit_bio(bio); } ssize_t iomap_dio_complete(struct iomap_dio *dio) @@ -164,9 +154,11 @@ static void iomap_dio_bio_end_io(struct bio *bio) } else if (dio->flags & IOMAP_DIO_WRITE) { struct inode *inode = file_inode(dio->iocb->ki_filp); + WRITE_ONCE(dio->iocb->private, NULL); INIT_WORK(&dio->aio.work, iomap_dio_complete_work); queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work); } else { + WRITE_ONCE(dio->iocb->private, NULL); iomap_dio_complete_work(&dio->aio.work); } } @@ -497,8 +489,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, dio->submit.iter = iter; dio->submit.waiter = current; - dio->submit.cookie = BLK_QC_T_NONE; - dio->submit.last_queue = NULL; + dio->submit.poll_bio = NULL; if (iov_iter_rw(iter) == READ) { if (iomi.pos >= dio->i_size) @@ -611,8 +602,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (dio->flags & IOMAP_DIO_WRITE_FUA) dio->flags &= ~IOMAP_DIO_NEED_SYNC; - WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie); - WRITE_ONCE(iocb->private, dio->submit.last_queue); + WRITE_ONCE(iocb->private, dio->submit.poll_bio); /* * We are about to drop our additional submission reference, which @@ -639,10 +629,8 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->submit.waiter)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || - !dio->submit.last_queue || - !blk_poll(dio->submit.last_queue, - dio->submit.cookie, 0)) + if (!dio->submit.poll_bio || + !bio_poll(dio->submit.poll_bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 7aa943edfc02f..62e7fbe4e54c3 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1452,7 +1452,7 @@ const struct file_operations xfs_file_operations = { .write_iter = xfs_file_write_iter, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = xfs_file_compat_ioctl, diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index ddc346a9df9ba..3ce5f47338cb6 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -1128,7 +1128,7 @@ static const struct file_operations zonefs_file_operations = { .write_iter = zonefs_file_write_iter, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, - .iopoll = iomap_dio_iopoll, + .iopoll = iocb_bio_iopoll, }; static struct kmem_cache *zonefs_inode_cachep; diff --git a/include/linux/bio.h b/include/linux/bio.h index c7a2d880e927d..62d684b7dd4c3 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -349,7 +349,7 @@ static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned short nr_iovecs) return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set); } -extern blk_qc_t submit_bio(struct bio *); +void submit_bio(struct bio *bio); extern void bio_endio(struct bio *); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 2219e92771186..a9c1d08825503 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -359,9 +359,9 @@ struct blk_mq_hw_ctx { /** @kobj: Kernel object for sysfs. */ struct kobject kobj; - /** @poll_considered: Count times blk_poll() was called. */ + /** @poll_considered: Count times blk_mq_poll() was called. */ unsigned long poll_considered; - /** @poll_invoked: Count how many requests blk_poll() polled. */ + /** @poll_invoked: Count how many requests blk_mq_poll() polled. */ unsigned long poll_invoked; /** @poll_success: Count how many polled requests were completed. */ unsigned long poll_success; @@ -815,16 +815,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) for ((i) = 0; (i) < (hctx)->nr_ctx && \ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++) -static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, - struct request *rq) -{ - if (rq->tag != -1) - return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT); - - return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) | - BLK_QC_T_INTERNAL; -} - static inline void blk_mq_cleanup_rq(struct request *rq) { if (rq->q->mq_ops->cleanup_rq) @@ -843,7 +833,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, rq->rq_disk = bio->bi_bdev->bd_disk; } -blk_qc_t blk_mq_submit_bio(struct bio *bio); void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, struct lock_class_key *key); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f8b9fce688346..72736b4c057c6 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -208,6 +208,9 @@ static inline void bio_issue_init(struct bio_issue *issue, ((u64)size << BIO_ISSUE_SIZE_SHIFT)); } +typedef unsigned int blk_qc_t; +#define BLK_QC_T_NONE -1U + /* * main unit of I/O for the block layer and lower layers (ie drivers and * stacking drivers) @@ -227,8 +230,8 @@ struct bio { struct bvec_iter bi_iter; + blk_qc_t bi_cookie; bio_end_io_t *bi_end_io; - void *bi_private; #ifdef CONFIG_BLK_CGROUP /* @@ -384,7 +387,7 @@ enum req_flag_bits { /* command specific flags for REQ_OP_WRITE_ZEROES: */ __REQ_NOUNMAP, /* do not free blocks when zeroing */ - __REQ_POLLED, /* caller polls for completion using blk_poll */ + __REQ_POLLED, /* caller polls for completion using bio_poll */ /* for driver use */ __REQ_DRV, @@ -495,11 +498,6 @@ static inline int op_stat_group(unsigned int op) return op_is_write(op); } -typedef unsigned int blk_qc_t; -#define BLK_QC_T_NONE -1U -#define BLK_QC_T_SHIFT 16 -#define BLK_QC_T_INTERNAL (1U << 31) - struct blk_rq_stat { u64 mean; u64 min; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2b80c98fc373e..2a8689e949b4c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -25,6 +25,7 @@ struct request; struct sg_io_hdr; struct blkcg_gq; struct blk_flush_queue; +struct kiocb; struct pr_ops; struct rq_qos; struct blk_queue_stats; @@ -550,7 +551,7 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); -blk_qc_t submit_bio_noacct(struct bio *bio); +void submit_bio_noacct(struct bio *bio); extern int blk_lld_busy(struct request_queue *q); extern void blk_queue_split(struct bio **); @@ -568,7 +569,8 @@ blk_status_t errno_to_blk_status(int errno); #define BLK_POLL_ONESHOT (1 << 0) /* do not sleep to wait for the expected completion time */ #define BLK_POLL_NOSLEEP (1 << 1) -int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); +int bio_poll(struct bio *bio, unsigned int flags); +int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -1176,7 +1178,7 @@ static inline void blk_ksm_unregister(struct request_queue *q) { } struct block_device_operations { - blk_qc_t (*submit_bio) (struct bio *bio); + void (*submit_bio)(struct bio *bio); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); diff --git a/include/linux/fs.h b/include/linux/fs.h index c443cddf414fd..f595f4097cb75 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -334,11 +334,7 @@ struct kiocb { int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ - union { - unsigned int ki_cookie; /* for ->iopoll */ - struct wait_page_queue *ki_waitq; /* for async buffered IO */ - }; - + struct wait_page_queue *ki_waitq; /* for async buffered IO */ randomized_struct_fields_end }; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 1e86b65567c21..63f4ea4dac9b1 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -313,8 +313,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); - blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, - loff_t file_offset); + void (*submit_io)(const struct iomap_iter *iter, struct bio *bio, + loff_t file_offset); }; /* @@ -337,7 +337,6 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags); ssize_t iomap_dio_complete(struct iomap_dio *dio); -int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags); #ifdef CONFIG_SWAP struct file; diff --git a/mm/page_io.c b/mm/page_io.c index ed2eded74f3ad..a68faab5b3105 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -358,8 +358,6 @@ int swap_readpage(struct page *page, bool synchronous) struct bio *bio; int ret = 0; struct swap_info_struct *sis = page_swap_info(page); - blk_qc_t qc; - struct gendisk *disk; unsigned long pflags; VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page); @@ -409,8 +407,6 @@ int swap_readpage(struct page *page, bool synchronous) bio->bi_iter.bi_sector = swap_page_sector(page); bio->bi_end_io = end_swap_bio_read; bio_add_page(bio, page, thp_size(page), 0); - - disk = bio->bi_bdev->bd_disk; /* * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. @@ -422,13 +418,13 @@ int swap_readpage(struct page *page, bool synchronous) } count_vm_event(PSWPIN); bio_get(bio); - qc = submit_bio(bio); + submit_bio(bio); while (synchronous) { set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio->bi_private)) break; - if (!blk_poll(disk->queue, qc, 0)) + if (!bio_poll(bio, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From a614dd2280356df0c79300c49d82b7e0c8b31f24 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:25 +0200 Subject: [PATCH 080/205] block: don't allow writing to the poll queue attribute The poll attribute is a historic artefact from before when we had explicit poll queues that require driver specific configuration. Just print a warning when writing to the attribute. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-16-hch@lst.de Signed-off-by: Jens Axboe --- block/blk-sysfs.c | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index da883efcba33a..36f14d658e816 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -433,26 +433,11 @@ static ssize_t queue_poll_show(struct request_queue *q, char *page) static ssize_t queue_poll_store(struct request_queue *q, const char *page, size_t count) { - unsigned long poll_on; - ssize_t ret; - - if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || - !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) + if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return -EINVAL; - - ret = queue_var_store(&poll_on, page, count); - if (ret < 0) - return ret; - - if (poll_on) { - blk_queue_flag_set(QUEUE_FLAG_POLL, q); - } else { - blk_mq_freeze_queue(q); - blk_queue_flag_clear(QUEUE_FLAG_POLL, q); - blk_mq_unfreeze_queue(q); - } - - return ret; + pr_info_ratelimited("writes to the poll attribute are ignored.\n"); + pr_info_ratelimited("please use driver specific parameters instead.\n"); + return count; } static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) From c712dccc64357b94f93e57882373e1365f0e2a56 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 12 Oct 2021 13:12:26 +0200 Subject: [PATCH 081/205] nvme-multipath: enable polled I/O Set the poll queue flag to enable polling, given that the multipath node just dispatches the bios to a lower queue. Signed-off-by: Christoph Hellwig Tested-by: Mark Wunderlich Link: https://lore.kernel.org/r/20211012111226.760968-17-hch@lst.de Signed-off-by: Jens Axboe --- drivers/nvme/host/multipath.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index ab78aa5d28c6e..11440c86881e8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -85,8 +85,13 @@ void nvme_failover_req(struct request *req) } spin_lock_irqsave(&ns->head->requeue_lock, flags); - for (bio = req->bio; bio; bio = bio->bi_next) + for (bio = req->bio; bio; bio = bio->bi_next) { bio_set_dev(bio, ns->head->disk->part0); + if (bio->bi_opf & REQ_POLLED) { + bio->bi_opf &= ~REQ_POLLED; + bio->bi_cookie = BLK_QC_T_NONE; + } + } blk_steal_bios(&ns->head->requeue_list, req); spin_unlock_irqrestore(&ns->head->requeue_lock, flags); @@ -477,6 +482,15 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); + /* + * This assumes all controllers that refer to a namespace either + * support poll queues or not. That is not a strict guarantee, + * but if the assumption is wrong the effect is only suboptimal + * performance but not correctness problem. + */ + if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && + ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) + blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); /* set to a default value of 512 until the disk is validated */ blk_queue_logical_block_size(head->disk->queue, 512); From fac7c6d529acf2b5428ad08c1b1127e29e570790 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 13 Oct 2021 09:57:11 +0100 Subject: [PATCH 082/205] block: cache bdev in struct file for raw bdev IO bdev = &BDEV_I(file->f_mapping->host)->bdev Getting struct block_device from a file requires 2 memory dereferences as illustrated above, that takes a toll on performance, so cache it in yet unused file->private_data. That gives a noticeable peak performance improvement. Signed-off-by: Pavel Begunkov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/8415f9fe12e544b9da89593dfbca8de2b52efe03.1634115360.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/block/fops.c b/block/fops.c index ce1255529ba2f..551b71af6d907 100644 --- a/block/fops.c +++ b/block/fops.c @@ -17,7 +17,7 @@ #include #include "blk.h" -static struct inode *bdev_file_inode(struct file *file) +static inline struct inode *bdev_file_inode(struct file *file) { return file->f_mapping->host; } @@ -54,8 +54,7 @@ static void blkdev_bio_end_io_simple(struct bio *bio) static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, unsigned int nr_pages) { - struct file *file = iocb->ki_filp; - struct block_device *bdev = I_BDEV(bdev_file_inode(file)); + struct block_device *bdev = iocb->ki_filp->private_data; struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; loff_t pos = iocb->ki_pos; bool should_dirty = false; @@ -183,9 +182,7 @@ static void blkdev_bio_end_io(struct bio *bio) static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, unsigned int nr_pages) { - struct file *file = iocb->ki_filp; - struct inode *inode = bdev_file_inode(file); - struct block_device *bdev = I_BDEV(inode); + struct block_device *bdev = iocb->ki_filp->private_data; struct blk_plug plug; struct blkdev_dio *dio; struct bio *bio; @@ -389,8 +386,7 @@ static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { - struct inode *bd_inode = bdev_file_inode(filp); - struct block_device *bdev = I_BDEV(bd_inode); + struct block_device *bdev = filp->private_data; int error; error = file_write_and_wait_range(filp, start, end); @@ -432,6 +428,8 @@ static int blkdev_open(struct inode *inode, struct file *filp) bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp); if (IS_ERR(bdev)) return PTR_ERR(bdev); + + filp->private_data = bdev; filp->f_mapping = bdev->bd_inode->i_mapping; filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); return 0; @@ -439,7 +437,7 @@ static int blkdev_open(struct inode *inode, struct file *filp) static int blkdev_close(struct inode *inode, struct file *filp) { - struct block_device *bdev = I_BDEV(bdev_file_inode(filp)); + struct block_device *bdev = filp->private_data; blkdev_put(bdev, filp->f_mode); return 0; @@ -454,14 +452,14 @@ static int blkdev_close(struct inode *inode, struct file *filp) */ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct file *file = iocb->ki_filp; - struct inode *bd_inode = bdev_file_inode(file); + struct block_device *bdev = iocb->ki_filp->private_data; + struct inode *bd_inode = bdev->bd_inode; loff_t size = i_size_read(bd_inode); struct blk_plug plug; size_t shorted = 0; ssize_t ret; - if (bdev_read_only(I_BDEV(bd_inode))) + if (bdev_read_only(bdev)) return -EPERM; if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) @@ -493,9 +491,8 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct file *file = iocb->ki_filp; - struct inode *bd_inode = bdev_file_inode(file); - loff_t size = i_size_read(bd_inode); + struct block_device *bdev = iocb->ki_filp->private_data; + loff_t size = i_size_read(bdev->bd_inode); loff_t pos = iocb->ki_pos; size_t shorted = 0; ssize_t ret; From 09ce8744253a038eb658c14f9dc3b77fa021fc9f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 11:17:43 -0600 Subject: [PATCH 083/205] block: use flags instead of bit fields for blkdev_dio This generates a lot better code for me, and bumps performance from 7650K IOPS to 7750K IOPS. Looking at profiles for the run and running perf diff, it confirms that we're now sending a lot less time there: 6.38% -2.80% [kernel.vmlinux] [k] blkdev_direct_IO Taking it from the 2nd most cycle consumer to only the 9th most at 3.35% of the CPU time. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/fops.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/block/fops.c b/block/fops.c index 551b71af6d907..1d4f862950bbb 100644 --- a/block/fops.c +++ b/block/fops.c @@ -123,6 +123,12 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, return ret; } +enum { + DIO_MULTI_BIO = 1, + DIO_SHOULD_DIRTY = 2, + DIO_IS_SYNC = 4, +}; + struct blkdev_dio { union { struct kiocb *iocb; @@ -130,9 +136,7 @@ struct blkdev_dio { }; size_t size; atomic_t ref; - bool multi_bio : 1; - bool should_dirty : 1; - bool is_sync : 1; + unsigned int flags; struct bio bio; }; @@ -141,13 +145,13 @@ static struct bio_set blkdev_dio_pool; static void blkdev_bio_end_io(struct bio *bio) { struct blkdev_dio *dio = bio->bi_private; - bool should_dirty = dio->should_dirty; + bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; if (bio->bi_status && !dio->bio.bi_status) dio->bio.bi_status = bio->bi_status; - if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { - if (!dio->is_sync) { + if (!(dio->flags & DIO_MULTI_BIO) || atomic_dec_and_test(&dio->ref)) { + if (!(dio->flags & DIO_IS_SYNC)) { struct kiocb *iocb = dio->iocb; ssize_t ret; @@ -161,7 +165,7 @@ static void blkdev_bio_end_io(struct bio *bio) } dio->iocb->ki_complete(iocb, ret, 0); - if (dio->multi_bio) + if (dio->flags & DIO_MULTI_BIO) bio_put(&dio->bio); } else { struct task_struct *waiter = dio->waiter; @@ -198,17 +202,19 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); - dio->is_sync = is_sync = is_sync_kiocb(iocb); - if (dio->is_sync) { + is_sync = is_sync_kiocb(iocb); + if (is_sync) { + dio->flags = DIO_IS_SYNC; dio->waiter = current; bio_get(bio); } else { + dio->flags = 0; dio->iocb = iocb; } dio->size = 0; - dio->multi_bio = false; - dio->should_dirty = is_read && iter_is_iovec(iter); + if (is_read && iter_is_iovec(iter)) + dio->flags |= DIO_SHOULD_DIRTY; /* * Don't plug for HIPRI/polled IO, as those should go straight @@ -234,7 +240,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (is_read) { bio->bi_opf = REQ_OP_READ; - if (dio->should_dirty) + if (dio->flags & DIO_SHOULD_DIRTY) bio_set_pages_dirty(bio); } else { bio->bi_opf = dio_bio_write_op(iocb); @@ -255,7 +261,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, WRITE_ONCE(iocb->private, bio); break; } - if (!dio->multi_bio) { + if (!(dio->flags & DIO_MULTI_BIO)) { /* * AIO needs an extra reference to ensure the dio * structure which is embedded into the first bio @@ -263,7 +269,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, */ if (!is_sync) bio_get(bio); - dio->multi_bio = true; + dio->flags |= DIO_MULTI_BIO; atomic_set(&dio->ref, 2); do_poll = false; } else { From abd45c159df5fcb7ac820e2825dac85de7c01c21 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 12:43:41 -0600 Subject: [PATCH 084/205] block: handle fast path of bio splitting inline The fast path is no splitting needed. Separate the handling into a check part we can inline, and an out-of-line handling path if we do need to split. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-merge.c | 24 ++++++------------------ block/blk-mq.c | 5 +++-- block/blk.h | 27 ++++++++++++++++++++++++++- 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/block/blk-merge.c b/block/blk-merge.c index f88d7863f997c..ec727234ac482 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -324,6 +324,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, /** * __blk_queue_split - split a bio and submit the second half + * @q: [in] request_queue new bio is being queued at * @bio: [in, out] bio to be split * @nr_segs: [out] number of segments in the first bio * @@ -334,9 +335,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, * of the caller to ensure that q->bio_split is only released after processing * of the split bio has finished. */ -void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) +void __blk_queue_split(struct request_queue *q, struct bio **bio, + unsigned int *nr_segs) { - struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; struct bio *split = NULL; switch (bio_op(*bio)) { @@ -353,21 +354,6 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) nr_segs); break; default: - /* - * All drivers must accept single-segments bios that are <= - * PAGE_SIZE. This is a quick and dirty check that relies on - * the fact that bi_io_vec[0] is always valid if a bio has data. - * The check might lead to occasional false negatives when bios - * are cloned, but compared to the performance impact of cloned - * bios themselves the loop below doesn't matter anyway. - */ - if (!q->limits.chunk_sectors && - (*bio)->bi_vcnt == 1 && - ((*bio)->bi_io_vec[0].bv_len + - (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) { - *nr_segs = 1; - break; - } split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); break; } @@ -397,9 +383,11 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) */ void blk_queue_split(struct bio **bio) { + struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; unsigned int nr_segs; - __blk_queue_split(bio, &nr_segs); + if (blk_may_split(q, *bio)) + __blk_queue_split(q, bio, &nr_segs); } EXPORT_SYMBOL(blk_queue_split); diff --git a/block/blk-mq.c b/block/blk-mq.c index 0860f622099f6..09219080855f0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2259,11 +2259,12 @@ void blk_mq_submit_bio(struct bio *bio) struct request *rq; struct blk_plug *plug; struct request *same_queue_rq = NULL; - unsigned int nr_segs; + unsigned int nr_segs = 1; blk_status_t ret; blk_queue_bounce(q, &bio); - __blk_queue_split(&bio, &nr_segs); + if (blk_may_split(q, bio)) + __blk_queue_split(q, &bio, &nr_segs); if (!bio_integrity_prep(bio)) goto queue_exit; diff --git a/block/blk.h b/block/blk.h index fa05d3f07976f..447a2defe2c8b 100644 --- a/block/blk.h +++ b/block/blk.h @@ -266,7 +266,32 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ssize_t part_timeout_store(struct device *, struct device_attribute *, const char *, size_t); -void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); +static inline bool blk_may_split(struct request_queue *q, struct bio *bio) +{ + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: + case REQ_OP_WRITE_SAME: + return true; /* non-trivial splitting decisions */ + default: + break; + } + + /* + * All drivers must accept single-segments bios that are <= PAGE_SIZE. + * This is a quick and dirty check that relies on the fact that + * bi_io_vec[0] is always valid if a bio has data. The check might + * lead to occasional false negatives when bios are cloned, but compared + * to the performance impact of cloned bios themselves the loop below + * doesn't matter anyway. + */ + return q->limits.chunk_sectors || bio->bi_vcnt != 1 || + bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; +} + +void __blk_queue_split(struct request_queue *q, struct bio **bio, + unsigned int *nr_segs); int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs); bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, From 17220ca5ce9606c1b015c4316fca18734c2df0bb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:26 +0100 Subject: [PATCH 085/205] block: cache request queue in bdev There are tons of places where we need to get a request_queue only having bdev, which turns into bdev->bd_disk->queue. There are probably a hundred of such places considering inline helpers, and enough of them are in hot paths. Cache queue pointer in struct block_device and make use of it in bdev_get_queue(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a3bfaecdd28956f03629d0ca5c63ebc096e1c809.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bdev.c | 1 + block/genhd.c | 4 +++- include/linux/blk_types.h | 1 + include/linux/blkdev.h | 2 +- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/block/bdev.c b/block/bdev.c index 93b1188d7e58d..fed8d0c041c74 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -493,6 +493,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) spin_lock_init(&bdev->bd_size_lock); bdev->bd_partno = partno; bdev->bd_inode = inode; + bdev->bd_queue = disk->queue; bdev->bd_stats = alloc_percpu(struct disk_stats); if (!bdev->bd_stats) { iput(inode); diff --git a/block/genhd.c b/block/genhd.c index ffbdb9b245556..d148c38450d70 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1267,6 +1267,9 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, if (!disk->bdi) goto out_free_disk; + /* bdev_alloc() might need the queue, set before the first call */ + disk->queue = q; + disk->part0 = bdev_alloc(disk, 0); if (!disk->part0) goto out_free_bdi; @@ -1282,7 +1285,6 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, disk_to_dev(disk)->type = &disk_type; device_initialize(disk_to_dev(disk)); inc_diskseq(disk); - disk->queue = q; q->disk = disk; lockdep_init_map(&disk->lockdep_map, "(bio completion)", lkclass, 0); #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 72736b4c057c6..1e370929c89eb 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -38,6 +38,7 @@ struct block_device { u8 bd_partno; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct gendisk * bd_disk; + struct request_queue * bd_queue; /* The counter of freeze processes */ int bd_fsfreeze_count; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2a8689e949b4c..d5b21fc8f49e5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -574,7 +574,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { - return bdev->bd_disk->queue; /* this is never NULL */ + return bdev->bd_queue; /* this is never NULL */ } /* From 025a38651ba6178a709ecf351ca90d11fa2908cd Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:27 +0100 Subject: [PATCH 086/205] block: use bdev_get_queue() in bdev.c Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a352936ce5d9ac719645b1e29b173d931ebcdc02.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bdev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/bdev.c b/block/bdev.c index fed8d0c041c74..cff0bb3a4578f 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -327,12 +327,12 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return result; - result = blk_queue_enter(bdev->bd_disk->queue, 0); + result = blk_queue_enter(bdev_get_queue(bdev), 0); if (result) return result; result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_READ); - blk_queue_exit(bdev->bd_disk->queue); + blk_queue_exit(bdev_get_queue(bdev)); return result; } @@ -363,7 +363,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return -EOPNOTSUPP; - result = blk_queue_enter(bdev->bd_disk->queue, 0); + result = blk_queue_enter(bdev_get_queue(bdev), 0); if (result) return result; @@ -376,7 +376,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, clean_page_buffers(page); unlock_page(page); } - blk_queue_exit(bdev->bd_disk->queue); + blk_queue_exit(bdev_get_queue(bdev)); return result; } From 3caee4634be68e755d2fb130962f1623661dbd5b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:28 +0100 Subject: [PATCH 087/205] block: use bdev_get_queue() in bio.c Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/85c36ea784d285a5075baa10049e6b59e15fb484.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bio.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/block/bio.c b/block/bio.c index a3c9ff23a036f..5fb8092577bf3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -965,7 +965,7 @@ EXPORT_SYMBOL(bio_add_pc_page); int bio_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); bool same_page = false; if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) @@ -1070,7 +1070,7 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct iov_iter i = *iter; iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9); @@ -1148,7 +1148,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) { unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); unsigned int max_append_sectors = queue_max_zone_append_sectors(q); struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; @@ -1485,10 +1485,10 @@ void bio_endio(struct bio *bio) return; if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) - rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); + rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio); if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { - trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio); + trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); bio_clear_flag(bio, BIO_TRACE_COMPLETION); } From eab4e02733699cdf76fbe5e542d248c28454b3af Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:29 +0100 Subject: [PATCH 088/205] block: use bdev_get_queue() in blk-core.c Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/efc41f880262517c8dc32f932f1b23112f21b255.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-core.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index f008c38ae9675..96ee996c05776 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -472,10 +472,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) static inline int bio_queue_enter(struct bio *bio) { - struct gendisk *disk = bio->bi_bdev->bd_disk; - struct request_queue *q = disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); while (!blk_try_enter_queue(q, false)) { + struct gendisk *disk = bio->bi_bdev->bd_disk; + if (bio->bi_opf & REQ_NOWAIT) { if (test_bit(GD_DEAD, &disk->state)) goto dead; @@ -800,7 +801,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, static noinline_for_stack bool submit_bio_checks(struct bio *bio) { struct block_device *bdev = bio->bi_bdev; - struct request_queue *q = bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; struct blk_plug *plug; @@ -962,7 +963,7 @@ static void __submit_bio_noacct(struct bio *bio) current->bio_list = bio_list_on_stack; do { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct bio_list lower, same; /* @@ -980,7 +981,7 @@ static void __submit_bio_noacct(struct bio *bio) bio_list_init(&lower); bio_list_init(&same); while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) - if (q == bio->bi_bdev->bd_disk->queue) + if (q == bdev_get_queue(bio->bi_bdev)) bio_list_add(&same, bio); else bio_list_add(&lower, bio); @@ -1062,7 +1063,7 @@ void submit_bio(struct bio *bio) if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) count = queue_logical_block_size( - bio->bi_bdev->bd_disk->queue) >> 9; + bdev_get_queue(bio->bi_bdev)) >> 9; else count = bio_sectors(bio); From ed6cddefdfd361af23a25bdeaaa5e345ac714c38 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 15:03:30 +0100 Subject: [PATCH 089/205] block: convert the rest of block to bdev_get_queue Convert bdev->bd_disk->queue to bdev_get_queue(), it's uses a cached queue pointer and so is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/addf6ea988c04213697ba3684c853e4ed7642a39.1634219547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/bio-integrity.c | 2 +- block/blk-cgroup.c | 16 ++++++++-------- block/blk-crypto.c | 2 +- block/blk-iocost.c | 12 ++++++------ block/blk-mq.c | 2 +- block/blk-throttle.c | 2 +- block/genhd.c | 4 ++-- block/partitions/core.c | 4 ++-- 8 files changed, 22 insertions(+), 22 deletions(-) diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 21234ff966d9b..d251147154592 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -134,7 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, iv = bip->bip_vec + bip->bip_vcnt; if (bip->bip_vcnt && - bvec_gap_to_prev(bio->bi_bdev->bd_disk->queue, + bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), &bip->bip_vec[bip->bip_vcnt - 1], offset)) return 0; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 166104d92b5ec..8908298d6ad3f 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -621,7 +621,7 @@ struct block_device *blkcg_conf_open_bdev(char **inputp) */ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, char *input, struct blkg_conf_ctx *ctx) - __acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock) + __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) { struct block_device *bdev; struct request_queue *q; @@ -632,7 +632,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, if (IS_ERR(bdev)) return PTR_ERR(bdev); - q = bdev->bd_disk->queue; + q = bdev_get_queue(bdev); rcu_read_lock(); spin_lock_irq(&q->queue_lock); @@ -737,9 +737,9 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep); * with blkg_conf_prep(). */ void blkg_conf_finish(struct blkg_conf_ctx *ctx) - __releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu) + __releases(&ctx->bdev->bd_queue->queue_lock) __releases(rcu) { - spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock); + spin_unlock_irq(&bdev_get_queue(ctx->bdev)->queue_lock); rcu_read_unlock(); blkdev_put_no_open(ctx->bdev); } @@ -842,7 +842,7 @@ static void blkcg_fill_root_iostats(void) while ((dev = class_dev_iter_next(&iter))) { struct block_device *bdev = dev_to_bdev(dev); struct blkcg_gq *blkg = - blk_queue_root_blkg(bdev->bd_disk->queue); + blk_queue_root_blkg(bdev_get_queue(bdev)); struct blkg_iostat tmp; int cpu; @@ -1801,7 +1801,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, rcu_read_lock(); blkg = blkg_lookup_create(css_to_blkcg(css), - bio->bi_bdev->bd_disk->queue); + bdev_get_queue(bio->bi_bdev)); while (blkg) { if (blkg_tryget(blkg)) { ret_blkg = blkg; @@ -1837,8 +1837,8 @@ void bio_associate_blkg_from_css(struct bio *bio, if (css && css->parent) { bio->bi_blkg = blkg_tryget_closest(bio, css); } else { - blkg_get(bio->bi_bdev->bd_disk->queue->root_blkg); - bio->bi_blkg = bio->bi_bdev->bd_disk->queue->root_blkg; + blkg_get(bdev_get_queue(bio->bi_bdev)->root_blkg); + bio->bi_blkg = bdev_get_queue(bio->bi_bdev)->root_blkg; } } EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css); diff --git a/block/blk-crypto.c b/block/blk-crypto.c index 103c2e2d50d67..8f53f4a1f9e2d 100644 --- a/block/blk-crypto.c +++ b/block/blk-crypto.c @@ -280,7 +280,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr) * Success if device supports the encryption context, or if we succeeded * in falling back to the crypto API. */ - if (blk_ksm_crypto_cfg_supported(bio->bi_bdev->bd_disk->queue->ksm, + if (blk_ksm_crypto_cfg_supported(bdev_get_queue(bio->bi_bdev)->ksm, &bc_key->crypto_cfg)) return true; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index b3880e4ba22a1..a5b37cc65b171 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3165,12 +3165,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, if (IS_ERR(bdev)) return PTR_ERR(bdev); - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); if (!ioc) { - ret = blk_iocost_init(bdev->bd_disk->queue); + ret = blk_iocost_init(bdev_get_queue(bdev)); if (ret) goto err; - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); } spin_lock_irq(&ioc->lock); @@ -3332,12 +3332,12 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, if (IS_ERR(bdev)) return PTR_ERR(bdev); - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); if (!ioc) { - ret = blk_iocost_init(bdev->bd_disk->queue); + ret = blk_iocost_init(bdev_get_queue(bdev)); if (ret) goto err; - ioc = q_to_ioc(bdev->bd_disk->queue); + ioc = q_to_ioc(bdev_get_queue(bdev)); } spin_lock_irq(&ioc->lock); diff --git a/block/blk-mq.c b/block/blk-mq.c index 09219080855f0..b58878221f177 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2253,7 +2253,7 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug) */ void blk_mq_submit_bio(struct bio *bio) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); const int is_sync = op_is_sync(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf); struct request *rq; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8cefd14deed5a..39bb6e68a9a29 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2063,7 +2063,7 @@ void blk_throtl_charge_bio_split(struct bio *bio) bool __blk_throtl_bio(struct bio *bio) { - struct request_queue *q = bio->bi_bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blkcg_gq *blkg = bio->bi_blkg; struct throtl_qnode *qn = NULL; struct throtl_grp *tg = blkg_to_tg(blkg); diff --git a/block/genhd.c b/block/genhd.c index d148c38450d70..759bc06810f80 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -883,7 +883,7 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct block_device *bdev = dev_to_bdev(dev); - struct request_queue *q = bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bdev); struct disk_stats stat; unsigned int inflight; @@ -927,7 +927,7 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf) { struct block_device *bdev = dev_to_bdev(dev); - struct request_queue *q = bdev->bd_disk->queue; + struct request_queue *q = bdev_get_queue(bdev); unsigned int inflight[2]; if (queue_is_mq(q)) diff --git a/block/partitions/core.c b/block/partitions/core.c index 3a4898433c434..9dbddc355b403 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -204,7 +204,7 @@ static ssize_t part_alignment_offset_show(struct device *dev, struct block_device *bdev = dev_to_bdev(dev); return sprintf(buf, "%u\n", - queue_limit_alignment_offset(&bdev->bd_disk->queue->limits, + queue_limit_alignment_offset(&bdev_get_queue(bdev)->limits, bdev->bd_start_sect)); } @@ -214,7 +214,7 @@ static ssize_t part_discard_alignment_show(struct device *dev, struct block_device *bdev = dev_to_bdev(dev); return sprintf(buf, "%u\n", - queue_limit_discard_alignment(&bdev->bd_disk->queue->limits, + queue_limit_discard_alignment(&bdev_get_queue(bdev)->limits, bdev->bd_start_sect)); } From d4aa57a1cac3c99ffd641f7c8e0a7aff5656de0d Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 09:01:43 -0600 Subject: [PATCH 090/205] block: don't bother iter advancing a fully done bio If we're completing nbytes and nbytes is the size of the bio, don't bother with calling into the iterator increment helpers. Just clear the bio size and we're done. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/bio.c | 15 ++------------- include/linux/bio.h | 24 ++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/block/bio.c b/block/bio.c index 5fb8092577bf3..4f397ba47db53 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1278,18 +1278,7 @@ int submit_bio_wait(struct bio *bio) } EXPORT_SYMBOL(submit_bio_wait); -/** - * bio_advance - increment/complete a bio by some number of bytes - * @bio: bio to advance - * @bytes: number of bytes to complete - * - * This updates bi_sector, bi_size and bi_idx; if the number of bytes to - * complete doesn't align with a bvec boundary, then bv_len and bv_offset will - * be updated on the last bvec as well. - * - * @bio will then represent the remaining, uncompleted portion of the io. - */ -void bio_advance(struct bio *bio, unsigned bytes) +void __bio_advance(struct bio *bio, unsigned bytes) { if (bio_integrity(bio)) bio_integrity_advance(bio, bytes); @@ -1297,7 +1286,7 @@ void bio_advance(struct bio *bio, unsigned bytes) bio_crypt_advance(bio, bytes); bio_advance_iter(bio, &bio->bi_iter, bytes); } -EXPORT_SYMBOL(bio_advance); +EXPORT_SYMBOL(__bio_advance); void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter) diff --git a/include/linux/bio.h b/include/linux/bio.h index 62d684b7dd4c3..9538f20ffaa59 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -119,6 +119,28 @@ static inline void bio_advance_iter_single(const struct bio *bio, bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); } +void __bio_advance(struct bio *, unsigned bytes); + +/** + * bio_advance - increment/complete a bio by some number of bytes + * @bio: bio to advance + * @bytes: number of bytes to complete + * + * This updates bi_sector, bi_size and bi_idx; if the number of bytes to + * complete doesn't align with a bvec boundary, then bv_len and bv_offset will + * be updated on the last bvec as well. + * + * @bio will then represent the remaining, uncompleted portion of the io. + */ +static inline void bio_advance(struct bio *bio, unsigned int nbytes) +{ + if (nbytes == bio->bi_iter.bi_size) { + bio->bi_iter.bi_size = 0; + return; + } + __bio_advance(bio, nbytes); +} + #define __bio_for_each_segment(bvl, bio, iter, start) \ for (iter = (start); \ (iter).bi_size && \ @@ -381,8 +403,6 @@ static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) struct request_queue; extern int submit_bio_wait(struct bio *bio); -extern void bio_advance(struct bio *, unsigned); - extern void bio_init(struct bio *bio, struct bio_vec *table, unsigned short max_vecs); extern void bio_uninit(struct bio *); From c477b7977838ac97dd9d20625591a5d23c8079b7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 09:15:40 -0600 Subject: [PATCH 091/205] block: remove useless caller argument to print_req_error() We have exactly one caller of this, just get rid of adding the useless function name to the output. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 96ee996c05776..b4094b31c99c7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -216,8 +216,7 @@ int blk_status_to_errno(blk_status_t status) } EXPORT_SYMBOL_GPL(blk_status_to_errno); -static void print_req_error(struct request *req, blk_status_t status, - const char *caller) +static void print_req_error(struct request *req, blk_status_t status) { int idx = (__force int)status; @@ -225,9 +224,9 @@ static void print_req_error(struct request *req, blk_status_t status, return; printk_ratelimited(KERN_ERR - "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " + "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x " "phys_seg %u prio class %u\n", - caller, blk_errors[idx].name, + blk_errors[idx].name, req->rq_disk ? req->rq_disk->disk_name : "?", blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), req->cmd_flags & ~REQ_OP_MASK, @@ -1471,7 +1470,7 @@ bool blk_update_request(struct request *req, blk_status_t error, if (unlikely(error && !blk_rq_is_passthrough(req) && !(req->rq_flags & RQF_QUIET))) - print_req_error(req, error, __func__); + print_req_error(req, error); blk_account_io_completion(req, nr_bytes); From 9be3e06fb75abcca00c955af740fabff46a13452 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 14 Oct 2021 09:17:01 -0600 Subject: [PATCH 092/205] block: move update request helpers into blk-mq.c For some reason we still have them in blk-core, with the rest of the request completion being in blk-mq. That causes and out-of-line call for each completion. Move them into blk-mq.c instead, where they belong. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 146 +---------------------------------------------- block/blk-mq.c | 144 ++++++++++++++++++++++++++++++++++++++++++++++ block/blk.h | 1 + 3 files changed, 146 insertions(+), 145 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index b4094b31c99c7..20b6cc06461a6 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -216,7 +216,7 @@ int blk_status_to_errno(blk_status_t status) } EXPORT_SYMBOL_GPL(blk_status_to_errno); -static void print_req_error(struct request *req, blk_status_t status) +void blk_print_req_error(struct request *req, blk_status_t status) { int idx = (__force int)status; @@ -234,33 +234,6 @@ static void print_req_error(struct request *req, blk_status_t status) IOPRIO_PRIO_CLASS(req->ioprio)); } -static void req_bio_endio(struct request *rq, struct bio *bio, - unsigned int nbytes, blk_status_t error) -{ - if (error) - bio->bi_status = error; - - if (unlikely(rq->rq_flags & RQF_QUIET)) - bio_set_flag(bio, BIO_QUIET); - - bio_advance(bio, nbytes); - - if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { - /* - * Partial zone append completions cannot be supported as the - * BIO fragments may end up not being written sequentially. - */ - if (bio->bi_iter.bi_size) - bio->bi_status = BLK_STS_IOERR; - else - bio->bi_iter.bi_sector = rq->__sector; - } - - /* don't actually finish bio if it's part of flush sequence */ - if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) - bio_endio(bio); -} - void blk_dump_rq_flags(struct request *rq, char *msg) { printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg, @@ -1311,17 +1284,6 @@ static void update_io_ticks(struct block_device *part, unsigned long now, } } -static void blk_account_io_completion(struct request *req, unsigned int bytes) -{ - if (req->part && blk_do_io_stat(req)) { - const int sgrp = op_stat_group(req_op(req)); - - part_stat_lock(); - part_stat_add(req->part, sectors[sgrp], bytes >> 9); - part_stat_unlock(); - } -} - void __blk_account_io_done(struct request *req, u64 now) { const int sgrp = op_stat_group(req_op(req)); @@ -1430,112 +1392,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq) } EXPORT_SYMBOL_GPL(blk_steal_bios); -/** - * blk_update_request - Complete multiple bytes without completing the request - * @req: the request being processed - * @error: block status code - * @nr_bytes: number of bytes to complete for @req - * - * Description: - * Ends I/O on a number of bytes attached to @req, but doesn't complete - * the request structure even if @req doesn't have leftover. - * If @req has leftover, sets it up for the next range of segments. - * - * Passing the result of blk_rq_bytes() as @nr_bytes guarantees - * %false return from this function. - * - * Note: - * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function - * except in the consistency check at the end of this function. - * - * Return: - * %false - this request doesn't have any more data - * %true - this request has more data - **/ -bool blk_update_request(struct request *req, blk_status_t error, - unsigned int nr_bytes) -{ - int total_bytes; - - trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); - - if (!req->bio) - return false; - -#ifdef CONFIG_BLK_DEV_INTEGRITY - if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && - error == BLK_STS_OK) - req->q->integrity.profile->complete_fn(req, nr_bytes); -#endif - - if (unlikely(error && !blk_rq_is_passthrough(req) && - !(req->rq_flags & RQF_QUIET))) - print_req_error(req, error); - - blk_account_io_completion(req, nr_bytes); - - total_bytes = 0; - while (req->bio) { - struct bio *bio = req->bio; - unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); - - if (bio_bytes == bio->bi_iter.bi_size) - req->bio = bio->bi_next; - - /* Completion has already been traced */ - bio_clear_flag(bio, BIO_TRACE_COMPLETION); - req_bio_endio(req, bio, bio_bytes, error); - - total_bytes += bio_bytes; - nr_bytes -= bio_bytes; - - if (!nr_bytes) - break; - } - - /* - * completely done - */ - if (!req->bio) { - /* - * Reset counters so that the request stacking driver - * can find how many bytes remain in the request - * later. - */ - req->__data_len = 0; - return false; - } - - req->__data_len -= total_bytes; - - /* update sector only for requests with clear definition of sector */ - if (!blk_rq_is_passthrough(req)) - req->__sector += total_bytes >> 9; - - /* mixed attributes always follow the first bio */ - if (req->rq_flags & RQF_MIXED_MERGE) { - req->cmd_flags &= ~REQ_FAILFAST_MASK; - req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; - } - - if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { - /* - * If total number of sectors is less than the first segment - * size, something has gone terribly wrong. - */ - if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { - blk_dump_rq_flags(req, "request botched"); - req->__data_len = blk_rq_cur_bytes(req); - } - - /* recalculate the number of segments */ - req->nr_phys_segments = blk_recalc_rq_segments(req); - } - - return true; -} -EXPORT_SYMBOL_GPL(blk_update_request); - #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE /** * rq_flush_dcache_pages - Helper function to flush all pages in a request diff --git a/block/blk-mq.c b/block/blk-mq.c index b58878221f177..9cff9e8eada40 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -628,6 +628,150 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug) } } +static void req_bio_endio(struct request *rq, struct bio *bio, + unsigned int nbytes, blk_status_t error) +{ + if (error) + bio->bi_status = error; + + if (unlikely(rq->rq_flags & RQF_QUIET)) + bio_set_flag(bio, BIO_QUIET); + + bio_advance(bio, nbytes); + + if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { + /* + * Partial zone append completions cannot be supported as the + * BIO fragments may end up not being written sequentially. + */ + if (bio->bi_iter.bi_size) + bio->bi_status = BLK_STS_IOERR; + else + bio->bi_iter.bi_sector = rq->__sector; + } + + /* don't actually finish bio if it's part of flush sequence */ + if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) + bio_endio(bio); +} + +static void blk_account_io_completion(struct request *req, unsigned int bytes) +{ + if (req->part && blk_do_io_stat(req)) { + const int sgrp = op_stat_group(req_op(req)); + + part_stat_lock(); + part_stat_add(req->part, sectors[sgrp], bytes >> 9); + part_stat_unlock(); + } +} + +/** + * blk_update_request - Complete multiple bytes without completing the request + * @req: the request being processed + * @error: block status code + * @nr_bytes: number of bytes to complete for @req + * + * Description: + * Ends I/O on a number of bytes attached to @req, but doesn't complete + * the request structure even if @req doesn't have leftover. + * If @req has leftover, sets it up for the next range of segments. + * + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees + * %false return from this function. + * + * Note: + * The RQF_SPECIAL_PAYLOAD flag is ignored on purpose in this function + * except in the consistency check at the end of this function. + * + * Return: + * %false - this request doesn't have any more data + * %true - this request has more data + **/ +bool blk_update_request(struct request *req, blk_status_t error, + unsigned int nr_bytes) +{ + int total_bytes; + + trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes); + + if (!req->bio) + return false; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && + error == BLK_STS_OK) + req->q->integrity.profile->complete_fn(req, nr_bytes); +#endif + + if (unlikely(error && !blk_rq_is_passthrough(req) && + !(req->rq_flags & RQF_QUIET))) + blk_print_req_error(req, error); + + blk_account_io_completion(req, nr_bytes); + + total_bytes = 0; + while (req->bio) { + struct bio *bio = req->bio; + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); + + if (bio_bytes == bio->bi_iter.bi_size) + req->bio = bio->bi_next; + + /* Completion has already been traced */ + bio_clear_flag(bio, BIO_TRACE_COMPLETION); + req_bio_endio(req, bio, bio_bytes, error); + + total_bytes += bio_bytes; + nr_bytes -= bio_bytes; + + if (!nr_bytes) + break; + } + + /* + * completely done + */ + if (!req->bio) { + /* + * Reset counters so that the request stacking driver + * can find how many bytes remain in the request + * later. + */ + req->__data_len = 0; + return false; + } + + req->__data_len -= total_bytes; + + /* update sector only for requests with clear definition of sector */ + if (!blk_rq_is_passthrough(req)) + req->__sector += total_bytes >> 9; + + /* mixed attributes always follow the first bio */ + if (req->rq_flags & RQF_MIXED_MERGE) { + req->cmd_flags &= ~REQ_FAILFAST_MASK; + req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK; + } + + if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) { + /* + * If total number of sectors is less than the first segment + * size, something has gone terribly wrong. + */ + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { + blk_dump_rq_flags(req, "request botched"); + req->__data_len = blk_rq_cur_bytes(req); + } + + /* recalculate the number of segments */ + req->nr_phys_segments = blk_recalc_rq_segments(req); + } + + return true; +} +EXPORT_SYMBOL_GPL(blk_update_request); + inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { if (blk_mq_need_time_stamp(rq)) { diff --git a/block/blk.h b/block/blk.h index 447a2defe2c8b..e80350327e6dc 100644 --- a/block/blk.h +++ b/block/blk.h @@ -215,6 +215,7 @@ static inline void blk_integrity_del(struct gendisk *disk) unsigned long blk_rq_timeout(unsigned long timeout); void blk_add_timer(struct request *req); +void blk_print_req_error(struct request *req, blk_status_t status); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs, struct request **same_queue_rq); From b60876296847e6cd7f1da4b8b7f0f31399d59aa1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 15:03:52 -0600 Subject: [PATCH 093/205] block: improve layout of struct request It's been a while since this was analyzed, move some members around to better flow with the use case. Initial state up top, and queued state after that. This improves my peak case by about 1.5%, from 7750K to 7900K IOPS. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- include/linux/blk-mq.h | 90 +++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 44 deletions(-) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a9c1d08825503..8ca9728cc7f21 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -83,6 +83,8 @@ struct request { int tag; int internal_tag; + unsigned int timeout; + /* the following two fields are internal, NEVER access directly */ unsigned int __data_len; /* total data len */ sector_t __sector; /* sector cursor */ @@ -95,49 +97,6 @@ struct request { struct request *rq_next; }; - /* - * The hash is used inside the scheduler, and killed once the - * request reaches the dispatch list. The ipi_list is only used - * to queue the request for softirq completion, which is long - * after the request has been unhashed (and even removed from - * the dispatch list). - */ - union { - struct hlist_node hash; /* merge hash */ - struct llist_node ipi_list; - }; - - /* - * The rb_node is only used inside the io scheduler, requests - * are pruned when moved to the dispatch queue. So let the - * completion_data share space with the rb_node. - */ - union { - struct rb_node rb_node; /* sort/lookup */ - struct bio_vec special_vec; - void *completion_data; - int error_count; /* for legacy drivers, don't use */ - }; - - /* - * Three pointers are available for the IO schedulers, if they need - * more they have to dynamically allocate it. Flush requests are - * never put on the IO scheduler. So let the flush fields share - * space with the elevator data. - */ - union { - struct { - struct io_cq *icq; - void *priv[2]; - } elv; - - struct { - unsigned int seq; - struct list_head list; - rq_end_io_fn *saved_end_io; - } flush; - }; - struct gendisk *rq_disk; struct block_device *part; #ifdef CONFIG_BLK_RQ_ALLOC_TIME @@ -180,9 +139,52 @@ struct request { enum mq_rq_state state; refcount_t ref; - unsigned int timeout; unsigned long deadline; + /* + * The hash is used inside the scheduler, and killed once the + * request reaches the dispatch list. The ipi_list is only used + * to queue the request for softirq completion, which is long + * after the request has been unhashed (and even removed from + * the dispatch list). + */ + union { + struct hlist_node hash; /* merge hash */ + struct llist_node ipi_list; + }; + + /* + * The rb_node is only used inside the io scheduler, requests + * are pruned when moved to the dispatch queue. So let the + * completion_data share space with the rb_node. + */ + union { + struct rb_node rb_node; /* sort/lookup */ + struct bio_vec special_vec; + void *completion_data; + int error_count; /* for legacy drivers, don't use */ + }; + + + /* + * Three pointers are available for the IO schedulers, if they need + * more they have to dynamically allocate it. Flush requests are + * never put on the IO scheduler. So let the flush fields share + * space with the elevator data. + */ + union { + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + + struct { + unsigned int seq; + struct list_head list; + rq_end_io_fn *saved_end_io; + } flush; + }; + union { struct __call_single_data csd; u64 fifo_time; From 90b8faa0e8de1b02b619fb33f6c6e1e13e7d1d70 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 20:06:18 -0600 Subject: [PATCH 094/205] block: only mark bio as tracked if it really is tracked We set BIO_TRACKED unconditionally when rq_qos_throttle() is called, even though we may not even have an rq_qos handler. Only mark it as TRACKED if it really is potentially tracked. This saves considerable time for the case where the bio isn't tracked: 2.64% -1.65% [kernel.vmlinux] [k] bio_endio Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-rq-qos.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index f000f83e0621c..3cfbc8668cba9 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -189,9 +189,10 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) * BIO_TRACKED lets controllers know that a bio went through the * normal rq_qos path. */ - bio_set_flag(bio, BIO_TRACKED); - if (q->rq_qos) + if (q->rq_qos) { + bio_set_flag(bio, BIO_TRACKED); __rq_qos_throttle(q->rq_qos, bio); + } } static inline void rq_qos_track(struct request_queue *q, struct request *rq, From 2ff0682da6e09c1e0db63a2d2abcd4efb531c8db Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 15 Oct 2021 09:44:38 -0600 Subject: [PATCH 095/205] block: store elevator state in request Add an rq private RQF_ELV flag, which tells the block layer that this request was initialized on a queue that has an IO scheduler attached. This allows for faster checking in the fast path, rather than having to deference rq->q later on. Elevator switching does full quiesce of the queue before detaching an IO scheduler, so it's safe to cache this in the request itself. Signed-off-by: Jens Axboe --- block/blk-mq-sched.h | 27 ++++++++++++++++----------- block/blk-mq.c | 20 +++++++++++--------- include/linux/blk-mq.h | 2 ++ 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h index fe252278ed9a5..98836106b25fc 100644 --- a/block/blk-mq-sched.h +++ b/block/blk-mq-sched.h @@ -56,29 +56,34 @@ static inline bool blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, struct bio *bio) { - struct elevator_queue *e = q->elevator; - - if (e && e->type->ops.allow_merge) - return e->type->ops.allow_merge(q, rq, bio); + if (rq->rq_flags & RQF_ELV) { + struct elevator_queue *e = q->elevator; + if (e->type->ops.allow_merge) + return e->type->ops.allow_merge(q, rq, bio); + } return true; } static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) { - struct elevator_queue *e = rq->q->elevator; + if (rq->rq_flags & RQF_ELV) { + struct elevator_queue *e = rq->q->elevator; - if (e && e->type->ops.completed_request) - e->type->ops.completed_request(rq, now); + if (e->type->ops.completed_request) + e->type->ops.completed_request(rq, now); + } } static inline void blk_mq_sched_requeue_request(struct request *rq) { - struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; + if (rq->rq_flags & RQF_ELV) { + struct request_queue *q = rq->q; + struct elevator_queue *e = q->elevator; - if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) - e->type->ops.requeue_request(rq); + if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request) + e->type->ops.requeue_request(rq); + } } static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9cff9e8eada40..28eb1f3c6f76f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -306,7 +306,7 @@ void blk_mq_wake_waiters(struct request_queue *q) */ static inline bool blk_mq_need_time_stamp(struct request *rq) { - return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; + return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); } static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, @@ -316,9 +316,11 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct request *rq = tags->static_rqs[tag]; if (data->q->elevator) { + rq->rq_flags = RQF_ELV; rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { + rq->rq_flags = 0; rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; } @@ -327,7 +329,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->q = data->q; rq->mq_ctx = data->ctx; rq->mq_hctx = data->hctx; - rq->rq_flags = 0; rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) rq->rq_flags |= RQF_PM; @@ -363,11 +364,11 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; refcount_set(&rq->ref, 1); - if (!op_is_flush(data->cmd_flags)) { + if (!op_is_flush(data->cmd_flags) && (rq->rq_flags & RQF_ELV)) { struct elevator_queue *e = data->q->elevator; rq->elv.icq = NULL; - if (e && e->type->ops.prepare_request) { + if (e->type->ops.prepare_request) { if (e->type->icq_cache) blk_mq_sched_assign_ioc(rq); @@ -588,12 +589,13 @@ static void __blk_mq_free_request(struct request *rq) void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - if (rq->rq_flags & RQF_ELVPRIV) { - if (e && e->type->ops.finish_request) + if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) { + struct elevator_queue *e = q->elevator; + + if (e->type->ops.finish_request) e->type->ops.finish_request(rq); if (rq->elv.icq) { put_io_context(rq->elv.icq->ioc); @@ -2254,7 +2256,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, goto insert; } - if (q->elevator && !bypass_insert) + if ((rq->rq_flags & RQF_ELV) && !bypass_insert) goto insert; budget_token = blk_mq_get_dispatch_budget(q); @@ -2492,7 +2494,7 @@ void blk_mq_submit_bio(struct bio *bio) } blk_add_rq_to_plug(plug, rq); - } else if (q->elevator) { + } else if (rq->rq_flags & RQF_ELV) { /* Insert the request at the IO scheduler queue */ blk_mq_sched_insert_request(rq, false, true, true); } else if (plug && !blk_queue_nomerges(q)) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8ca9728cc7f21..95c3bd3a008e5 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -55,6 +55,8 @@ typedef __u32 __bitwise req_flags_t; #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20)) /* ->timeout has been called, don't expire again */ #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) +/* queue has elevator attached */ +#define RQF_ELV ((__force req_flags_t)(1 << 22)) /* flags that prevent us from merging requests: */ #define RQF_NOMERGE_FLAGS \ From 4f266f2be822eacd70aca2a7a53c4a111be79acb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Oct 2021 21:37:27 +0100 Subject: [PATCH 096/205] block: skip elevator fields init for non-elv queue Don't init rq->hash and rq->rb_node in blk_mq_rq_ctx_init() if there is no elevator. Also, move some other initialisers that imply barriers to the end, so the compiler is free to rearrange and optimise other the rest of them. note: fold in a change from Jens leaving queue_list unconditional, as it might lead to problems otherwise. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 28eb1f3c6f76f..1d2e2fd4043e3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -325,6 +325,10 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->internal_tag = BLK_MQ_NO_TAG; } + if (blk_mq_need_time_stamp(rq)) + rq->start_time_ns = ktime_get_ns(); + else + rq->start_time_ns = 0; /* csd/requeue_work/fifo_time is initialized before use */ rq->q = data->q; rq->mq_ctx = data->ctx; @@ -334,41 +338,37 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->rq_flags |= RQF_PM; if (blk_queue_io_stat(data->q)) rq->rq_flags |= RQF_IO_STAT; - INIT_LIST_HEAD(&rq->queuelist); - INIT_HLIST_NODE(&rq->hash); - RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; #ifdef CONFIG_BLK_RQ_ALLOC_TIME rq->alloc_time_ns = alloc_time_ns; #endif - if (blk_mq_need_time_stamp(rq)) - rq->start_time_ns = ktime_get_ns(); - else - rq->start_time_ns = 0; rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif - blk_crypto_rq_set_defaults(rq); - /* tag was already set */ - WRITE_ONCE(rq->deadline, 0); - rq->timeout = 0; - rq->end_io = NULL; rq->end_io_data = NULL; data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; + blk_crypto_rq_set_defaults(rq); + INIT_LIST_HEAD(&rq->queuelist); + /* tag was already set */ + WRITE_ONCE(rq->deadline, 0); refcount_set(&rq->ref, 1); - if (!op_is_flush(data->cmd_flags) && (rq->rq_flags & RQF_ELV)) { + if (rq->rq_flags & RQF_ELV) { struct elevator_queue *e = data->q->elevator; rq->elv.icq = NULL; - if (e->type->ops.prepare_request) { + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + + if (!op_is_flush(data->cmd_flags) && + e->type->ops.prepare_request) { if (e->type->icq_cache) blk_mq_sched_assign_ioc(rq); From 605f784e4f5faecf6c78070c6bf446e920104f9f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Oct 2021 21:37:28 +0100 Subject: [PATCH 097/205] block: blk_mq_rq_ctx_init cache ctx/q/hctx We should have enough of registers in blk_mq_rq_ctx_init(), store them in local vars, so we don't keep reloading them. note: keeping q->elevator may look unnecessary, but it's also used inside inlined blk_mq_tags_from_data(). Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d2e2fd4043e3..fa4de25c3bcbc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -312,10 +312,14 @@ static inline bool blk_mq_need_time_stamp(struct request *rq) static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, unsigned int tag, u64 alloc_time_ns) { + struct blk_mq_ctx *ctx = data->ctx; + struct blk_mq_hw_ctx *hctx = data->hctx; + struct request_queue *q = data->q; + struct elevator_queue *e = q->elevator; struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; - if (data->q->elevator) { + if (e) { rq->rq_flags = RQF_ELV; rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; @@ -330,13 +334,13 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, else rq->start_time_ns = 0; /* csd/requeue_work/fifo_time is initialized before use */ - rq->q = data->q; - rq->mq_ctx = data->ctx; - rq->mq_hctx = data->hctx; + rq->q = q; + rq->mq_ctx = ctx; + rq->mq_hctx = hctx; rq->cmd_flags = data->cmd_flags; if (data->flags & BLK_MQ_REQ_PM) rq->rq_flags |= RQF_PM; - if (blk_queue_io_stat(data->q)) + if (blk_queue_io_stat(q)) rq->rq_flags |= RQF_IO_STAT; rq->rq_disk = NULL; rq->part = NULL; From 128459062bc994355027e190477c432ec5b5638a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Oct 2021 21:37:29 +0100 Subject: [PATCH 098/205] block: cache rq_flags inside blk_mq_rq_ctx_init() Add a local variable for rq_flags, it helps to compile out some of rq_flags reloads. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- block/blk-mq.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index fa4de25c3bcbc..633d735807124 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -318,17 +318,23 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, struct elevator_queue *e = q->elevator; struct blk_mq_tags *tags = blk_mq_tags_from_data(data); struct request *rq = tags->static_rqs[tag]; + unsigned int rq_flags = 0; if (e) { - rq->rq_flags = RQF_ELV; + rq_flags = RQF_ELV; rq->tag = BLK_MQ_NO_TAG; rq->internal_tag = tag; } else { - rq->rq_flags = 0; rq->tag = tag; rq->internal_tag = BLK_MQ_NO_TAG; } + if (data->flags & BLK_MQ_REQ_PM) + rq_flags |= RQF_PM; + if (blk_queue_io_stat(q)) + rq_flags |= RQF_IO_STAT; + rq->rq_flags = rq_flags; + if (blk_mq_need_time_stamp(rq)) rq->start_time_ns = ktime_get_ns(); else @@ -338,10 +344,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->mq_ctx = ctx; rq->mq_hctx = hctx; rq->cmd_flags = data->cmd_flags; - if (data->flags & BLK_MQ_REQ_PM) - rq->rq_flags |= RQF_PM; - if (blk_queue_io_stat(q)) - rq->rq_flags |= RQF_IO_STAT; rq->rq_disk = NULL; rq->part = NULL; #ifdef CONFIG_BLK_RQ_ALLOC_TIME From 9a14d6ce4135fa72705a926c894218a0d6988924 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 16 Oct 2021 17:27:20 -0600 Subject: [PATCH 099/205] block: remove debugfs blk_mq_ctx dispatched/merged/completed attributes These were added as part of early days debugging for blk-mq, and they are not really useful anymore. Rather than spend cycles updating them, just get rid of them. As a bonus, this shrinks the per-cpu software queue size from 256b to 192b. That's a whole cacheline less. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 54 ------------------------------------------ block/blk-mq-sched.c | 5 +--- block/blk-mq.c | 3 --- block/blk-mq.h | 7 ------ 4 files changed, 1 insertion(+), 68 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 409a8256d9ffa..928a16af91758 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -663,57 +663,6 @@ CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); -static int ctx_dispatched_show(void *data, struct seq_file *m) -{ - struct blk_mq_ctx *ctx = data; - - seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]); - return 0; -} - -static ssize_t ctx_dispatched_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_ctx *ctx = data; - - ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0; - return count; -} - -static int ctx_merged_show(void *data, struct seq_file *m) -{ - struct blk_mq_ctx *ctx = data; - - seq_printf(m, "%lu\n", ctx->rq_merged); - return 0; -} - -static ssize_t ctx_merged_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_ctx *ctx = data; - - ctx->rq_merged = 0; - return count; -} - -static int ctx_completed_show(void *data, struct seq_file *m) -{ - struct blk_mq_ctx *ctx = data; - - seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]); - return 0; -} - -static ssize_t ctx_completed_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_ctx *ctx = data; - - ctx->rq_completed[0] = ctx->rq_completed[1] = 0; - return count; -} - static int blk_mq_debugfs_show(struct seq_file *m, void *v) { const struct blk_mq_debugfs_attr *attr = m->private; @@ -803,9 +752,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, - {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write}, - {"merged", 0600, ctx_merged_show, ctx_merged_write}, - {"completed", 0600, ctx_completed_show, ctx_completed_write}, {}, }; diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index efc1374b88318..e85b7556b0961 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -387,13 +387,10 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, * potentially merge with. Currently includes a hand-wavy stop * count of 8, to not spend too much time checking for merges. */ - if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { - ctx->rq_merged++; + if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) ret = true; - } spin_unlock(&ctx->lock); - return ret; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 633d735807124..990d214a76581 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -359,7 +359,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->end_io = NULL; rq->end_io_data = NULL; - data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; blk_crypto_rq_set_defaults(rq); INIT_LIST_HEAD(&rq->queuelist); /* tag was already set */ @@ -595,7 +594,6 @@ static void __blk_mq_free_request(struct request *rq) void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - struct blk_mq_ctx *ctx = rq->mq_ctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) { @@ -609,7 +607,6 @@ void blk_mq_free_request(struct request *rq) } } - ctx->rq_completed[rq_is_sync(rq)]++; if (rq->rq_flags & RQF_MQ_INFLIGHT) __blk_mq_dec_active_requests(hctx); diff --git a/block/blk-mq.h b/block/blk-mq.h index 8be447995106c..1b91a3fdaa018 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -25,13 +25,6 @@ struct blk_mq_ctx { unsigned short index_hw[HCTX_MAX_TYPES]; struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; - /* incremented at dispatch time */ - unsigned long rq_dispatched[2]; - unsigned long rq_merged; - - /* incremented at completion time */ - unsigned long ____cacheline_aligned_in_smp rq_completed[2]; - struct request_queue *queue; struct blk_mq_ctxs *ctxs; struct kobject kobj; From afd7de03c5268f74202c1dd4780a8532a11f4c6b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 08:53:19 -0600 Subject: [PATCH 100/205] block: remove some blk_mq_hw_ctx debugfs entries Just like the blk_mq_ctx counterparts, we've got a bunch of counters in here that are only for debugfs and are of questionnable value. They are: - dispatched, index of how many requests were dispatched in one go - poll_{considered,invoked,success}, which track poll sucess rates. We're confident in the iopoll implementation at this point, don't bother tracking these. As a bonus, this shrinks each hardware queue from 576 bytes to 512 bytes, dropping a whole cacheline. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 67 ------------------------------------------ block/blk-mq.c | 16 ---------- include/linux/blk-mq.h | 10 ------- 3 files changed, 93 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 928a16af91758..68ca5d21cda77 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -529,70 +529,6 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) return res; } -static int hctx_io_poll_show(void *data, struct seq_file *m) -{ - struct blk_mq_hw_ctx *hctx = data; - - seq_printf(m, "considered=%lu\n", hctx->poll_considered); - seq_printf(m, "invoked=%lu\n", hctx->poll_invoked); - seq_printf(m, "success=%lu\n", hctx->poll_success); - return 0; -} - -static ssize_t hctx_io_poll_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_hw_ctx *hctx = data; - - hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0; - return count; -} - -static int hctx_dispatched_show(void *data, struct seq_file *m) -{ - struct blk_mq_hw_ctx *hctx = data; - int i; - - seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]); - - for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) { - unsigned int d = 1U << (i - 1); - - seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]); - } - - seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]); - return 0; -} - -static ssize_t hctx_dispatched_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_hw_ctx *hctx = data; - int i; - - for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) - hctx->dispatched[i] = 0; - return count; -} - -static int hctx_queued_show(void *data, struct seq_file *m) -{ - struct blk_mq_hw_ctx *hctx = data; - - seq_printf(m, "%lu\n", hctx->queued); - return 0; -} - -static ssize_t hctx_queued_write(void *data, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct blk_mq_hw_ctx *hctx = data; - - hctx->queued = 0; - return count; -} - static int hctx_run_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; @@ -738,9 +674,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { {"tags_bitmap", 0400, hctx_tags_bitmap_show}, {"sched_tags", 0400, hctx_sched_tags_show}, {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, - {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write}, - {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write}, - {"queued", 0600, hctx_queued_show, hctx_queued_write}, {"run", 0600, hctx_run_show, hctx_run_write}, {"active", 0400, hctx_active_show}, {"dispatch_busy", 0400, hctx_dispatch_busy_show}, diff --git a/block/blk-mq.c b/block/blk-mq.c index 990d214a76581..bd241fd7ee498 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -382,7 +382,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, } } - data->hctx->queued++; return rq; } @@ -1301,14 +1300,6 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, return data.rq; } -static inline unsigned int queued_to_index(unsigned int queued) -{ - if (!queued) - return 0; - - return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1); -} - static bool __blk_mq_get_driver_tag(struct request *rq) { struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags; @@ -1632,8 +1623,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, if (!list_empty(&zone_list)) list_splice_tail_init(&zone_list, list); - hctx->dispatched[queued_to_index(queued)]++; - /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ @@ -4200,14 +4189,9 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, long state = get_current_state(); int ret; - hctx->poll_considered++; - do { - hctx->poll_invoked++; - ret = q->mq_ops->poll(hctx); if (ret > 0) { - hctx->poll_success++; __set_current_state(TASK_RUNNING); return ret; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 95c3bd3a008e5..9fb8618fb957d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -341,9 +341,6 @@ struct blk_mq_hw_ctx { unsigned long queued; /** @run: Number of dispatched requests. */ unsigned long run; -#define BLK_MQ_MAX_DISPATCH_ORDER 7 - /** @dispatched: Number of dispatch requests by queue. */ - unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; /** @numa_node: NUMA node the storage adapter has been connected to. */ unsigned int numa_node; @@ -363,13 +360,6 @@ struct blk_mq_hw_ctx { /** @kobj: Kernel object for sysfs. */ struct kobject kobj; - /** @poll_considered: Count times blk_mq_poll() was called. */ - unsigned long poll_considered; - /** @poll_invoked: Count how many requests blk_mq_poll() polled. */ - unsigned long poll_invoked; - /** @poll_success: Count how many polled requests were completed. */ - unsigned long poll_success; - #ifdef CONFIG_BLK_DEBUG_FS /** * @debugfs_dir: debugfs directory for this hardware queue. Named From 013a7f95438144f4ab39a1017a0bff2765d2551a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Oct 2021 07:58:52 -0600 Subject: [PATCH 101/205] block: provide helpers for rq_list manipulation Instead of open-coding the list additions, traversal, and removal, provide a basic set of helpers. Suggested-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq.c | 19 +++++-------------- include/linux/blkdev.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index bd241fd7ee498..74505b545dd33 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -404,17 +404,11 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data, tag = tag_offset + i; tags &= ~(1UL << i); rq = blk_mq_rq_ctx_init(data, tag, alloc_time_ns); - rq->rq_next = *data->cached_rq; - *data->cached_rq = rq; + rq_list_add(data->cached_rq, rq); } data->nr_tags -= nr; - if (!data->cached_rq) - return NULL; - - rq = *data->cached_rq; - *data->cached_rq = rq->rq_next; - return rq; + return rq_list_pop(data->cached_rq); } static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data) @@ -622,11 +616,9 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request); void blk_mq_free_plug_rqs(struct blk_plug *plug) { - while (plug->cached_rq) { - struct request *rq; + struct request *rq; - rq = plug->cached_rq; - plug->cached_rq = rq->rq_next; + while ((rq = rq_list_pop(&plug->cached_rq)) != NULL) { percpu_ref_get(&rq->q->q_usage_counter); blk_mq_free_request(rq); } @@ -2418,8 +2410,7 @@ void blk_mq_submit_bio(struct bio *bio) plug = blk_mq_plug(q, bio); if (plug && plug->cached_rq) { - rq = plug->cached_rq; - plug->cached_rq = rq->rq_next; + rq = rq_list_pop(&plug->cached_rq); INIT_LIST_HEAD(&rq->queuelist); } else { struct blk_mq_alloc_data data = { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d5b21fc8f49e5..b0a3221729655 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1298,4 +1298,33 @@ int fsync_bdev(struct block_device *bdev); int freeze_bdev(struct block_device *bdev); int thaw_bdev(struct block_device *bdev); +#define rq_list_add(listptr, rq) do { \ + (rq)->rq_next = *(listptr); \ + *(listptr) = rq; \ +} while (0) + +#define rq_list_pop(listptr) \ +({ \ + struct request *__req = NULL; \ + if ((listptr) && *(listptr)) { \ + __req = *(listptr); \ + *(listptr) = __req->rq_next; \ + } \ + __req; \ +}) + +#define rq_list_peek(listptr) \ +({ \ + struct request *__req = NULL; \ + if ((listptr) && *(listptr)) \ + __req = *(listptr); \ + __req; \ +}) + +#define rq_list_for_each(listptr, pos) \ + for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) \ + +#define rq_list_next(rq) (rq)->rq_next +#define rq_list_empty(list) ((list) == (struct request *) NULL) + #endif /* _LINUX_BLKDEV_H */ From 5a72e899ceb465d731c413d57c6c12cdbf88303c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Oct 2021 09:24:29 -0600 Subject: [PATCH 102/205] block: add a struct io_comp_batch argument to fops->iopoll() struct io_comp_batch contains a list head and a completion handler, which will allow completions to more effciently completed batches of IO. For now, no functional changes in this patch, we just define the io_comp_batch structure and add the argument to the file_operations iopoll handler. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 9 +++++---- block/blk-exec.c | 2 +- block/blk-mq.c | 9 +++++---- block/blk-mq.h | 3 ++- block/fops.c | 4 ++-- drivers/block/rnbd/rnbd-clt.c | 2 +- drivers/nvme/host/pci.c | 4 ++-- drivers/nvme/host/rdma.c | 2 +- drivers/nvme/host/tcp.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- fs/io_uring.c | 2 +- fs/iomap/direct-io.c | 2 +- include/linux/blk-mq.h | 2 +- include/linux/blkdev.h | 13 +++++++++++-- include/linux/fs.h | 4 +++- mm/page_io.c | 2 +- 16 files changed, 39 insertions(+), 25 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 20b6cc06461a6..d0c2e11411d03 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1078,7 +1078,7 @@ EXPORT_SYMBOL(submit_bio); * Note: the caller must either be the context that submitted @bio, or * be in a RCU critical section to prevent freeing of @bio. */ -int bio_poll(struct bio *bio, unsigned int flags) +int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; blk_qc_t cookie = READ_ONCE(bio->bi_cookie); @@ -1096,7 +1096,7 @@ int bio_poll(struct bio *bio, unsigned int flags) if (WARN_ON_ONCE(!queue_is_mq(q))) ret = 0; /* not yet implemented, should not happen */ else - ret = blk_mq_poll(q, cookie, flags); + ret = blk_mq_poll(q, cookie, iob, flags); blk_queue_exit(q); return ret; } @@ -1106,7 +1106,8 @@ EXPORT_SYMBOL_GPL(bio_poll); * Helper to implement file_operations.iopoll. Requires the bio to be stored * in iocb->private, and cleared before freeing the bio. */ -int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) +int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, + unsigned int flags) { struct bio *bio; int ret = 0; @@ -1134,7 +1135,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags) rcu_read_lock(); bio = READ_ONCE(kiocb->private); if (bio && bio->bi_bdev) - ret = bio_poll(bio, flags); + ret = bio_poll(bio, iob, flags); rcu_read_unlock(); return ret; diff --git a/block/blk-exec.c b/block/blk-exec.c index 55f0cd34b37b9..1b8b47f6e79bb 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -77,7 +77,7 @@ static bool blk_rq_is_poll(struct request *rq) static void blk_rq_poll_completion(struct request *rq, struct completion *wait) { do { - bio_poll(rq->bio, 0); + bio_poll(rq->bio, NULL, 0); cond_resched(); } while (!completion_done(wait)); } diff --git a/block/blk-mq.c b/block/blk-mq.c index 74505b545dd33..79c25b64e8b09 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4174,14 +4174,14 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc) } static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, - unsigned int flags) + struct io_comp_batch *iob, unsigned int flags) { struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie); long state = get_current_state(); int ret; do { - ret = q->mq_ops->poll(hctx); + ret = q->mq_ops->poll(hctx, iob); if (ret > 0) { __set_current_state(TASK_RUNNING); return ret; @@ -4201,14 +4201,15 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie, return 0; } -int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags) +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, + unsigned int flags) { if (!(flags & BLK_POLL_NOSLEEP) && q->poll_nsec != BLK_MQ_POLL_CLASSIC) { if (blk_mq_poll_hybrid(q, cookie)) return 1; } - return blk_mq_poll_classic(q, cookie, flags); + return blk_mq_poll_classic(q, cookie, iob, flags); } unsigned int blk_mq_rq_cpu(struct request *rq) diff --git a/block/blk-mq.h b/block/blk-mq.h index 1b91a3fdaa018..ebf67f4d4f2ed 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -31,7 +31,8 @@ struct blk_mq_ctx { } ____cacheline_aligned_in_smp; void blk_mq_submit_bio(struct bio *bio); -int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags); +int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob, + unsigned int flags); void blk_mq_exit_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); diff --git a/block/fops.c b/block/fops.c index 1d4f862950bbb..2c43e493e37c6 100644 --- a/block/fops.c +++ b/block/fops.c @@ -105,7 +105,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, set_current_state(TASK_UNINTERRUPTIBLE); if (!READ_ONCE(bio.bi_private)) break; - if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0)) + if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); @@ -291,7 +291,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, if (!READ_ONCE(dio->waiter)) break; - if (!do_poll || !bio_poll(bio, 0)) + if (!do_poll || !bio_poll(bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index bd4a41afbbfc9..0ec0191d41969 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1176,7 +1176,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } -static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx) +static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct rnbd_queue *q = hctx->driver_data; struct rnbd_clt_dev *dev = q->dev; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 896328271471b..bb0482dfab3c0 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1092,7 +1092,7 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } -static int nvme_poll(struct blk_mq_hw_ctx *hctx) +static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_queue *nvmeq = hctx->driver_data; bool found; @@ -1274,7 +1274,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) * Did we miss an interrupt? */ if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) - nvme_poll(req->mq_hctx); + nvme_poll(req->mq_hctx, NULL); else nvme_poll_irqdisable(nvmeq); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 40317e1b91835..1624da3702d43 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -2106,7 +2106,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } -static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx) +static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_rdma_queue *queue = hctx->driver_data; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 3c1c29dd30207..9ce3458ee1dd1 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2429,7 +2429,7 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set) return 0; } -static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) +static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 33fd9a01330ce..30f7d0b4eb732 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1784,7 +1784,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, } -static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx) +static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct Scsi_Host *shost = hctx->driver_data; diff --git a/fs/io_uring.c b/fs/io_uring.c index c5066146b8de3..cd77a137f2d86 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2483,7 +2483,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags); + ret = kiocb->ki_filp->f_op->iopoll(kiocb, NULL, poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 8efab177011dd..83ecfba53abe1 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -630,7 +630,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, break; if (!dio->submit.poll_bio || - !bio_poll(dio->submit.poll_bio, 0)) + !bio_poll(dio->submit.poll_bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 9fb8618fb957d..4c79439af2f29 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -532,7 +532,7 @@ struct blk_mq_ops { /** * @poll: Called to poll for completion of a specific tag. */ - int (*poll)(struct blk_mq_hw_ctx *); + int (*poll)(struct blk_mq_hw_ctx *, struct io_comp_batch *); /** * @complete: Mark the request as complete. diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b0a3221729655..fd9771a1da096 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -569,8 +569,9 @@ blk_status_t errno_to_blk_status(int errno); #define BLK_POLL_ONESHOT (1 << 0) /* do not sleep to wait for the expected completion time */ #define BLK_POLL_NOSLEEP (1 << 1) -int bio_poll(struct bio *bio, unsigned int flags); -int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags); +int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags); +int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, + unsigned int flags); static inline struct request_queue *bdev_get_queue(struct block_device *bdev) { @@ -1298,6 +1299,14 @@ int fsync_bdev(struct block_device *bdev); int freeze_bdev(struct block_device *bdev); int thaw_bdev(struct block_device *bdev); +struct io_comp_batch { + struct request *req_list; + bool need_ts; + void (*complete)(struct io_comp_batch *); +}; + +#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } + #define rq_list_add(listptr, rq) do { \ (rq)->rq_next = *(listptr); \ *(listptr) = rq; \ diff --git a/include/linux/fs.h b/include/linux/fs.h index f595f4097cb75..31029a91f440e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -48,6 +48,7 @@ struct backing_dev_info; struct bdi_writeback; struct bio; +struct io_comp_batch; struct export_operations; struct fiemap_extent_info; struct hd_geometry; @@ -2071,7 +2072,8 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); - int (*iopoll)(struct kiocb *kiocb, unsigned int flags); + int (*iopoll)(struct kiocb *kiocb, struct io_comp_batch *, + unsigned int flags); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); diff --git a/mm/page_io.c b/mm/page_io.c index a68faab5b3105..6010fb07f2319 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -424,7 +424,7 @@ int swap_readpage(struct page *page, bool synchronous) if (!READ_ONCE(bio->bi_private)) break; - if (!bio_poll(bio, 0)) + if (!bio_poll(bio, NULL, 0)) blk_io_schedule(); } __set_current_state(TASK_RUNNING); From 1aec5e4a2962f7e0b3fb3e7308dd726be2472c26 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 Oct 2021 05:44:23 -0600 Subject: [PATCH 103/205] sbitmap: add helper to clear a batch of tags sbitmap currently only supports clearing tags one-by-one, add a helper that allows the caller to pass in an array of tags to clear. Signed-off-by: Jens Axboe --- include/linux/sbitmap.h | 11 +++++++++++ lib/sbitmap.c | 44 ++++++++++++++++++++++++++++++++++++++--- 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index e30b56023eadf..4a6ff274335a2 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -528,6 +528,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu); +/** + * sbitmap_queue_clear_batch() - Free a batch of allocated bits + * &struct sbitmap_queue. + * @sbq: Bitmap to free from. + * @offset: offset for each tag in array + * @tags: array of tags + * @nr_tags: number of tags in array + */ +void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, + int *tags, int nr_tags); + static inline int sbq_index_inc(int index) { return (index + 1) & (SBQ_WAIT_QUEUES - 1); diff --git a/lib/sbitmap.c b/lib/sbitmap.c index f398e0ae548e1..c6e2f1f2c4d23 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -628,6 +628,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) } EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); +static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) +{ + if (likely(!sb->round_robin && tag < sb->depth)) + *per_cpu_ptr(sb->alloc_hint, cpu) = tag; +} + +void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, + int *tags, int nr_tags) +{ + struct sbitmap *sb = &sbq->sb; + unsigned long *addr = NULL; + unsigned long mask = 0; + int i; + + smp_mb__before_atomic(); + for (i = 0; i < nr_tags; i++) { + const int tag = tags[i] - offset; + unsigned long *this_addr; + + /* since we're clearing a batch, skip the deferred map */ + this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; + if (!addr) { + addr = this_addr; + } else if (addr != this_addr) { + atomic_long_andnot(mask, (atomic_long_t *) addr); + mask = 0; + addr = this_addr; + } + mask |= (1UL << SB_NR_TO_BIT(sb, tag)); + } + + if (mask) + atomic_long_andnot(mask, (atomic_long_t *) addr); + + smp_mb__after_atomic(); + sbitmap_queue_wake_up(sbq); + sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), + tags[nr_tags - 1] - offset); +} + void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, unsigned int cpu) { @@ -652,9 +692,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, */ smp_mb__after_atomic(); sbitmap_queue_wake_up(sbq); - - if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth)) - *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr; + sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); } EXPORT_SYMBOL_GPL(sbitmap_queue_clear); From f794f3351f2672d782b8df0fa59f3cef38cffa59 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 Oct 2021 05:50:46 -0600 Subject: [PATCH 104/205] block: add support for blk_mq_end_request_batch() Instead of calling blk_mq_end_request() on a single request, add a helper that takes the new struct io_comp_batch and completes any request stored in there. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-mq-tag.c | 6 ++++ block/blk-mq-tag.h | 1 + block/blk-mq.c | 82 ++++++++++++++++++++++++++++++++---------- include/linux/blk-mq.h | 29 +++++++++++++++ 4 files changed, 99 insertions(+), 19 deletions(-) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index c43b972011617..b94c3e8ef3924 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -207,6 +207,12 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, } } +void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) +{ + sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags, + tag_array, nr_tags); +} + struct bt_iter_data { struct blk_mq_hw_ctx *hctx; busy_iter_fn *fn; diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 71c2f7d8e9b74..78ae2fb8e2a4b 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -42,6 +42,7 @@ unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, unsigned int *offset); extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, unsigned int tag); +void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags **tags, unsigned int depth, bool can_grow); diff --git a/block/blk-mq.c b/block/blk-mq.c index 79c25b64e8b09..9248edd8a7d3b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -300,15 +300,6 @@ void blk_mq_wake_waiters(struct request_queue *q) blk_mq_tag_wakeup_all(hctx->tags, true); } -/* - * Only need start/end time stamping if we have iostat or - * blk stats enabled, or using an IO scheduler. - */ -static inline bool blk_mq_need_time_stamp(struct request *rq) -{ - return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); -} - static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, unsigned int tag, u64 alloc_time_ns) { @@ -768,19 +759,21 @@ bool blk_update_request(struct request *req, blk_status_t error, } EXPORT_SYMBOL_GPL(blk_update_request); -inline void __blk_mq_end_request(struct request *rq, blk_status_t error) +static inline void __blk_mq_end_request_acct(struct request *rq, u64 now) { - if (blk_mq_need_time_stamp(rq)) { - u64 now = ktime_get_ns(); + if (rq->rq_flags & RQF_STATS) { + blk_mq_poll_stats_start(rq->q); + blk_stat_add(rq, now); + } - if (rq->rq_flags & RQF_STATS) { - blk_mq_poll_stats_start(rq->q); - blk_stat_add(rq, now); - } + blk_mq_sched_completed_request(rq, now); + blk_account_io_done(rq, now); +} - blk_mq_sched_completed_request(rq, now); - blk_account_io_done(rq, now); - } +inline void __blk_mq_end_request(struct request *rq, blk_status_t error) +{ + if (blk_mq_need_time_stamp(rq)) + __blk_mq_end_request_acct(rq, ktime_get_ns()); if (rq->end_io) { rq_qos_done(rq->q, rq); @@ -799,6 +792,57 @@ void blk_mq_end_request(struct request *rq, blk_status_t error) } EXPORT_SYMBOL(blk_mq_end_request); +#define TAG_COMP_BATCH 32 + +static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx, + int *tag_array, int nr_tags) +{ + struct request_queue *q = hctx->queue; + + blk_mq_put_tags(hctx->tags, tag_array, nr_tags); + percpu_ref_put_many(&q->q_usage_counter, nr_tags); +} + +void blk_mq_end_request_batch(struct io_comp_batch *iob) +{ + int tags[TAG_COMP_BATCH], nr_tags = 0; + struct blk_mq_hw_ctx *last_hctx = NULL; + struct request *rq; + u64 now = 0; + + if (iob->need_ts) + now = ktime_get_ns(); + + while ((rq = rq_list_pop(&iob->req_list)) != NULL) { + prefetch(rq->bio); + prefetch(rq->rq_next); + + blk_update_request(rq, BLK_STS_OK, blk_rq_bytes(rq)); + if (iob->need_ts) + __blk_mq_end_request_acct(rq, now); + + WRITE_ONCE(rq->state, MQ_RQ_IDLE); + if (!refcount_dec_and_test(&rq->ref)) + continue; + + blk_crypto_free_request(rq); + blk_pm_mark_last_busy(rq); + rq_qos_done(rq->q, rq); + + if (nr_tags == TAG_COMP_BATCH || + (last_hctx && last_hctx != rq->mq_hctx)) { + blk_mq_flush_tag_batch(last_hctx, tags, nr_tags); + nr_tags = 0; + } + tags[nr_tags++] = rq->tag; + last_hctx = rq->mq_hctx; + } + + if (nr_tags) + blk_mq_flush_tag_batch(last_hctx, tags, nr_tags); +} +EXPORT_SYMBOL_GPL(blk_mq_end_request_batch); + static void blk_complete_reqs(struct llist_head *list) { struct llist_node *entry = llist_reverse_order(llist_del_all(list)); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4c79439af2f29..656fe34bdb6cd 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -728,6 +728,35 @@ static inline void blk_mq_set_request_complete(struct request *rq) void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, blk_status_t error); void __blk_mq_end_request(struct request *rq, blk_status_t error); +void blk_mq_end_request_batch(struct io_comp_batch *ib); + +/* + * Only need start/end time stamping if we have iostat or + * blk stats enabled, or using an IO scheduler. + */ +static inline bool blk_mq_need_time_stamp(struct request *rq) +{ + return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV)); +} + +/* + * Batched completions only work when there is no I/O error and no special + * ->end_io handler. + */ +static inline bool blk_mq_add_to_batch(struct request *req, + struct io_comp_batch *iob, int ioerror, + void (*complete)(struct io_comp_batch *)) +{ + if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror) + return false; + if (!iob->complete) + iob->complete = complete; + else if (iob->complete != complete) + return false; + iob->need_ts |= blk_mq_need_time_stamp(req); + rq_list_add(&iob->req_list, req); + return true; +} void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); From c234a65392062504acf04afe0ae404cca61a8e1a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 8 Oct 2021 05:59:37 -0600 Subject: [PATCH 105/205] nvme: add support for batched completion of polled IO Take advantage of struct io_comp_batch, if passed in to the nvme poll handler. If it's set, rather than complete each request individually inline, store them in the io_comp_batch list. We only do so for requests that will complete successfully, anything else will be completed inline as before. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/core.c | 17 ++++++++++++++--- drivers/nvme/host/nvme.h | 14 ++++++++++++++ drivers/nvme/host/pci.c | 32 +++++++++++++++++++++++--------- 3 files changed, 51 insertions(+), 12 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index ae15cb7145963..3109bdf137e4b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -346,15 +346,19 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return RETRY; } -static inline void nvme_end_req(struct request *req) +static inline void nvme_end_req_zoned(struct request *req) { - blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && req_op(req) == REQ_OP_ZONE_APPEND) req->__sector = nvme_lba_to_sect(req->q->queuedata, le64_to_cpu(nvme_req(req)->result.u64)); +} + +static inline void nvme_end_req(struct request *req) +{ + blk_status_t status = nvme_error_status(nvme_req(req)->status); + nvme_end_req_zoned(req); nvme_trace_bio_complete(req); blk_mq_end_request(req, status); } @@ -381,6 +385,13 @@ void nvme_complete_rq(struct request *req) } EXPORT_SYMBOL_GPL(nvme_complete_rq); +void nvme_complete_batch_req(struct request *req) +{ + nvme_cleanup_cmd(req); + nvme_end_req_zoned(req); +} +EXPORT_SYMBOL_GPL(nvme_complete_batch_req); + /* * Called to unwind from ->queue_rq on a failed command submission so that the * multipathing code gets called to potentially failover to another path. diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ed79a6c7e8043..ef2467b93adbc 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -638,6 +638,20 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) } void nvme_complete_rq(struct request *req); +void nvme_complete_batch_req(struct request *req); + +static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, + void (*fn)(struct request *rq)) +{ + struct request *req; + + rq_list_for_each(&iob->req_list, req) { + fn(req); + nvme_complete_batch_req(req); + } + blk_mq_end_request_batch(iob); +} + blk_status_t nvme_host_path_error(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); void nvme_cancel_tagset(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb0482dfab3c0..83d3503d5b884 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -959,7 +959,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, return ret; } -static void nvme_pci_complete_rq(struct request *req) +static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_dev *dev = iod->nvmeq->dev; @@ -969,9 +969,19 @@ static void nvme_pci_complete_rq(struct request *req) rq_integrity_vec(req)->bv_len, rq_data_dir(req)); if (blk_rq_nr_phys_segments(req)) nvme_unmap_data(dev, req); +} + +static void nvme_pci_complete_rq(struct request *req) +{ + nvme_pci_unmap_rq(req); nvme_complete_rq(req); } +static void nvme_pci_complete_batch(struct io_comp_batch *iob) +{ + nvme_complete_batch(iob, nvme_pci_unmap_rq); +} + /* We read the CQE phase first to check if the rest of the entry is valid */ static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) { @@ -996,7 +1006,8 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; } -static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) +static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, + struct io_comp_batch *iob, u16 idx) { struct nvme_completion *cqe = &nvmeq->cqes[idx]; __u16 command_id = READ_ONCE(cqe->command_id); @@ -1023,7 +1034,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) } trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); - if (!nvme_try_complete_req(req, cqe->status, cqe->result)) + if (!nvme_try_complete_req(req, cqe->status, cqe->result) && + !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } @@ -1039,7 +1052,8 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) } } -static inline int nvme_process_cq(struct nvme_queue *nvmeq) +static inline int nvme_poll_cq(struct nvme_queue *nvmeq, + struct io_comp_batch *iob) { int found = 0; @@ -1050,7 +1064,7 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq) * the cqe requires a full read memory barrier */ dma_rmb(); - nvme_handle_cqe(nvmeq, nvmeq->cq_head); + nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); nvme_update_cq_head(nvmeq); } @@ -1063,7 +1077,7 @@ static irqreturn_t nvme_irq(int irq, void *data) { struct nvme_queue *nvmeq = data; - if (nvme_process_cq(nvmeq)) + if (nvme_poll_cq(nvmeq, NULL)) return IRQ_HANDLED; return IRQ_NONE; } @@ -1088,7 +1102,7 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); - nvme_process_cq(nvmeq); + nvme_poll_cq(nvmeq, NULL); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } @@ -1101,7 +1115,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) return 0; spin_lock(&nvmeq->cq_poll_lock); - found = nvme_process_cq(nvmeq); + found = nvme_poll_cq(nvmeq, iob); spin_unlock(&nvmeq->cq_poll_lock); return found; @@ -1434,7 +1448,7 @@ static void nvme_reap_pending_cqes(struct nvme_dev *dev) for (i = dev->ctrl.queue_count - 1; i > 0; i--) { spin_lock(&dev->queues[i].cq_poll_lock); - nvme_process_cq(&dev->queues[i]); + nvme_poll_cq(&dev->queues[i], NULL); spin_unlock(&dev->queues[i].cq_poll_lock); } } From b688f11e86c9a22169a0e522530982735d2db19b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Oct 2021 09:28:46 -0600 Subject: [PATCH 106/205] io_uring: utilize the io batching infrastructure for more efficient polled IO Wire up using an io_comp_batch for f_op->iopoll(). If the lower stack supports it, we can handle high rates of polled IO more efficiently. This raises the single core efficiency on my system from ~6.1M IOPS to ~6.6M IOPS running a random read workload at depth 128 on two gen2 Optane drives. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index cd77a137f2d86..d4631a55a6920 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2458,6 +2458,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, { struct io_kiocb *req, *tmp; unsigned int poll_flags = BLK_POLL_NOSLEEP; + DEFINE_IO_COMP_BATCH(iob); LIST_HEAD(done); /* @@ -2483,17 +2484,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (!list_empty(&done)) break; - ret = kiocb->ki_filp->f_op->iopoll(kiocb, NULL, poll_flags); + ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags); if (unlikely(ret < 0)) return ret; else if (ret) poll_flags |= BLK_POLL_ONESHOT; /* iopoll may have completed current req */ - if (READ_ONCE(req->iopoll_completed)) + if (!rq_list_empty(iob.req_list) || + READ_ONCE(req->iopoll_completed)) list_move_tail(&req->inflight_entry, &done); } + if (!rq_list_empty(iob.req_list)) + iob.complete(&iob); if (!list_empty(&done)) io_iopoll_complete(ctx, nr_events, &done); From 4f5022453acd0f7b28012e20b7d048470f129894 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 08:45:39 -0600 Subject: [PATCH 107/205] nvme: wire up completion batching for the IRQ path Trivial to do now, just need our own io_comp_batch on the stack and pass that in to the usual command completion handling. I pondered making this dependent on how many entries we had to process, but even for a single entry there's no discernable difference in performance or latency. Running a sync workload over io_uring: t/io_uring -b512 -d1 -s1 -c1 -p0 -F1 -B1 -n2 /dev/nvme1n1 /dev/nvme2n1 yields the below performance before the patch: IOPS=254820, BW=124MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=251174, BW=122MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=250806, BW=122MiB/s, IOS/call=1/1, inflight=(1 1) and the following after: IOPS=255972, BW=124MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=251920, BW=123MiB/s, IOS/call=1/1, inflight=(1 1) IOPS=251794, BW=122MiB/s, IOS/call=1/1, inflight=(1 1) which definitely isn't slower, about the same if you factor in a bit of variance. For peak performance workloads, benchmarking shows a 2% improvement. Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe --- drivers/nvme/host/pci.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 83d3503d5b884..ed684874842ff 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1076,9 +1076,13 @@ static inline int nvme_poll_cq(struct nvme_queue *nvmeq, static irqreturn_t nvme_irq(int irq, void *data) { struct nvme_queue *nvmeq = data; + DEFINE_IO_COMP_BATCH(iob); - if (nvme_poll_cq(nvmeq, NULL)) + if (nvme_poll_cq(nvmeq, &iob)) { + if (!rq_list_empty(iob.req_list)) + nvme_pci_complete_batch(&iob); return IRQ_HANDLED; + } return IRQ_NONE; } From e0d78afeb8d190164a823d5ef5821b0b3802af33 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Oct 2021 20:54:39 -0600 Subject: [PATCH 108/205] block: fix too broad elevator check in blk_mq_free_request() We added RQF_ELV to tell whether there's an IO scheduler attached, and RQF_ELVPRIV tells us whether there's an IO scheduler with private data attached. Don't check RQF_ELV in blk_mq_free_request(), what we care about here is just if we have scheduler private data attached. This fixes a boot crash Fixes: 2ff0682da6e0 ("block: store elevator state in request") Reported-by: Yi Zhang Reported-by: syzbot+eb8104072aeab6cc1195@syzkaller.appspotmail.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 9248edd8a7d3b..bbc61394eef39 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -580,7 +580,7 @@ void blk_mq_free_request(struct request *rq) struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx = rq->mq_hctx; - if (rq->rq_flags & (RQF_ELVPRIV | RQF_ELV)) { + if (rq->rq_flags & RQF_ELVPRIV) { struct elevator_queue *e = q->elevator; if (e->type->ops.finish_request) From a87acfde949140946456859eafa5f15175d0f960 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 11 Sep 2021 16:04:50 -0600 Subject: [PATCH 109/205] io_uring: dump sqe contents if issue fails I recently had to look at a production problem where a request ended up getting the dreaded -EINVAL error on submit. The most used and hence useless of error codes, as it just tells you that something was wrong with your request, but not more than that. Let's dump the full sqe contents if we run into an issue failure, that'll allow easier diagnosing of a wide variety of issues. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ include/trace/events/io_uring.h | 61 +++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index d4631a55a6920..5daee36c1f33d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7117,6 +7117,8 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ret = io_init_req(ctx, req, sqe); if (unlikely(ret)) { fail_req: + trace_io_uring_req_failed(sqe, ret); + /* fail even hard links since we don't submit */ if (link->head) { /* diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 0dd30de00e5b4..7346f0164cf49 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -6,6 +6,7 @@ #define _TRACE_IO_URING_H #include +#include struct io_wq_work; @@ -497,6 +498,66 @@ TRACE_EVENT(io_uring_task_run, (unsigned long long) __entry->user_data) ); +/* + * io_uring_req_failed - called when an sqe is errored dring submission + * + * @sqe: pointer to the io_uring_sqe that failed + * @error: error it failed with + * + * Allows easier diagnosing of malformed requests in production systems. + */ +TRACE_EVENT(io_uring_req_failed, + + TP_PROTO(const struct io_uring_sqe *sqe, int error), + + TP_ARGS(sqe, error), + + TP_STRUCT__entry ( + __field( u8, opcode ) + __field( u8, flags ) + __field( u8, ioprio ) + __field( u64, off ) + __field( u64, addr ) + __field( u32, len ) + __field( u32, op_flags ) + __field( u64, user_data ) + __field( u16, buf_index ) + __field( u16, personality ) + __field( u32, file_index ) + __field( u64, pad1 ) + __field( u64, pad2 ) + __field( int, error ) + ), + + TP_fast_assign( + __entry->opcode = sqe->opcode; + __entry->flags = sqe->flags; + __entry->ioprio = sqe->ioprio; + __entry->off = sqe->off; + __entry->addr = sqe->addr; + __entry->len = sqe->len; + __entry->op_flags = sqe->rw_flags; + __entry->user_data = sqe->user_data; + __entry->buf_index = sqe->buf_index; + __entry->personality = sqe->personality; + __entry->file_index = sqe->file_index; + __entry->pad1 = sqe->__pad2[0]; + __entry->pad2 = sqe->__pad2[1]; + __entry->error = error; + ), + + TP_printk("op %d, flags=0x%x, prio=%d, off=%llu, addr=%llu, " + "len=%u, rw_flags=0x%x, user_data=0x%llx, buf_index=%d, " + "personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d", + __entry->opcode, __entry->flags, __entry->ioprio, + (unsigned long long)__entry->off, + (unsigned long long) __entry->addr, __entry->len, + __entry->op_flags, (unsigned long long) __entry->user_data, + __entry->buf_index, __entry->personality, __entry->file_index, + (unsigned long long) __entry->pad1, + (unsigned long long) __entry->pad2, __entry->error) +); + #endif /* _TRACE_IO_URING_H */ /* This part must be outside protection */ From 71e1cef2d794338cc7b979d4c6144e1dc12718b5 Mon Sep 17 00:00:00 2001 From: Bixuan Cui Date: Sat, 11 Sep 2021 16:58:47 +0800 Subject: [PATCH 110/205] io-wq: Remove duplicate code in io_workqueue_create() While task_work_add() in io_workqueue_create() is true, then duplicate code is executed: -> clear_bit_unlock(0, &worker->create_state); -> io_worker_release(worker); -> atomic_dec(&acct->nr_running); -> io_worker_ref_put(wq); -> return false; -> clear_bit_unlock(0, &worker->create_state); // back to io_workqueue_create() -> io_worker_release(worker); -> kfree(worker); The io_worker_release() and clear_bit_unlock() are executed twice. Fixes: 3146cba99aa2 ("io-wq: make worker creation resilient against signals") Signed-off-by: Bixuan Cui Link: https://lore.kernel.org/r/20210911085847.34849-1-cuibixuan@huawei.com Reviwed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io-wq.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 5bf8aa81715e1..811299ac96848 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -323,8 +323,10 @@ static bool io_queue_worker_create(struct io_worker *worker, init_task_work(&worker->create_work, func); worker->create_index = acct->index; - if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) + if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) { + clear_bit_unlock(0, &worker->create_state); return true; + } clear_bit_unlock(0, &worker->create_state); fail_release: io_worker_release(worker); @@ -716,11 +718,8 @@ static void io_workqueue_create(struct work_struct *work) struct io_worker *worker = container_of(work, struct io_worker, work); struct io_wqe_acct *acct = io_wqe_get_acct(worker); - if (!io_queue_worker_create(worker, acct, create_worker_cont)) { - clear_bit_unlock(0, &worker->create_state); - io_worker_release(worker); + if (!io_queue_worker_create(worker, acct, create_worker_cont)) kfree(worker); - } } static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index) From 4b628aeb69cc49adf56b9c1fde43558143e810f5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:49 +0100 Subject: [PATCH 111/205] io_uring: kill off ios_left ->ios_left is only used to decide whether to plug or not, kill it to avoid this extra accounting, just use the initial submission number. There is no much difference in regards of enabling plugging, where this one does it in a few more cases, but all major ones should be covered well. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f13993bcf5b477f9a7d52881fc49f9457ea9870a.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5daee36c1f33d..6917f96f0e825 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -315,6 +315,7 @@ struct io_submit_state { unsigned int free_reqs; bool plug_started; + bool need_plug; /* * Batch completion logic @@ -323,8 +324,6 @@ struct io_submit_state { unsigned int compl_nr; /* inline/task_work completion list, under ->uring_lock */ struct list_head free_list; - - unsigned int ios_left; }; struct io_ring_ctx { @@ -7090,10 +7089,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, * Plug now if we have more than 1 IO left after this, and the target * is potentially a read/write to block based storage. */ - if (!state->plug_started && state->ios_left > 1 && - io_op_defs[req->opcode].plug) { + if (state->need_plug && io_op_defs[req->opcode].plug) { blk_start_plug(&state->plug); state->plug_started = true; + state->need_plug = false; } if (io_op_defs[req->opcode].needs_file) { @@ -7102,8 +7101,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (unlikely(!req->file)) ret = -EBADF; } - - state->ios_left--; return ret; } @@ -7212,7 +7209,7 @@ static void io_submit_state_start(struct io_submit_state *state, unsigned int max_ios) { state->plug_started = false; - state->ios_left = max_ios; + state->need_plug = max_ios > 2; /* set only head, no need to init link_last in advance */ state->link.head = NULL; } From 6b639522f63f82437350038a4925633f769e4ec8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:50 +0100 Subject: [PATCH 112/205] io_uring: inline io_dismantle_req io_dismantle_req() is hot, and not _too_ huge. Inline it, there are 3 call sites, which hopefully will turn into 2 in the future. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/bdd2dc30716cac270c2403e99bccd6286e4ae201.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6917f96f0e825..43ed9f6821fbe 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1960,7 +1960,7 @@ static inline void io_put_file(struct file *file) fput(file); } -static void io_dismantle_req(struct io_kiocb *req) +static inline void io_dismantle_req(struct io_kiocb *req) { unsigned int flags = req->flags; From d81499bfcd471e6b920683f8fcf06ce65f84286b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:51 +0100 Subject: [PATCH 113/205] io_uring: inline linked part of io_req_find_next Inline part of __io_req_find_next() that returns a request but doesn't need io_disarm_next(). It's just two places, but makes links a bit faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4126d13f23d0e91b39b3558e16bd86cafa7fcef2.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 43ed9f6821fbe..10dd3589f8bef 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2074,40 +2074,39 @@ static bool io_disarm_next(struct io_kiocb *req) return posted; } -static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) +static void __io_req_find_next_prep(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + bool posted; + + spin_lock(&ctx->completion_lock); + posted = io_disarm_next(req); + if (posted) + io_commit_cqring(req->ctx); + spin_unlock(&ctx->completion_lock); + if (posted) + io_cqring_ev_posted(ctx); +} + +static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) { struct io_kiocb *nxt; + if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) + return NULL; /* * If LINK is set, we have dependent requests in this chain. If we * didn't fail this request, queue the first one up, moving any other * dependencies to the next request. In case of failure, fail the rest * of the chain. */ - if (req->flags & IO_DISARM_MASK) { - struct io_ring_ctx *ctx = req->ctx; - bool posted; - - spin_lock(&ctx->completion_lock); - posted = io_disarm_next(req); - if (posted) - io_commit_cqring(req->ctx); - spin_unlock(&ctx->completion_lock); - if (posted) - io_cqring_ev_posted(ctx); - } + if (unlikely(req->flags & IO_DISARM_MASK)) + __io_req_find_next_prep(req); nxt = req->link; req->link = NULL; return nxt; } -static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) -{ - if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) - return NULL; - return __io_req_find_next(req); -} - static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) { if (!ctx) From c450178d9be9dc44327f3b526f82c36e4c5d362b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:52 +0100 Subject: [PATCH 114/205] io_uring: dedup CQE flushing non-empty checks We don't do io_submit_flush_completions() when there is no requests enqueued, and every single caller checks for it. Hide that check into the function not forgetting about inlining. That will make it much easier for changing the empty check condition in the future. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d7ff8cef5da1b38e8ea648f5aad9a315ddfc7b57.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 10dd3589f8bef..c4ec3672da77b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1092,7 +1092,7 @@ static void __io_queue_sqe(struct io_kiocb *req); static void io_rsrc_put_work(struct work_struct *work); static void io_req_task_queue(struct io_kiocb *req); -static void io_submit_flush_completions(struct io_ring_ctx *ctx); +static void __io_submit_flush_completions(struct io_ring_ctx *ctx); static int io_req_prep_async(struct io_kiocb *req); static int io_install_fixed_file(struct io_kiocb *req, struct file *file, @@ -1164,6 +1164,12 @@ static inline void req_ref_get(struct io_kiocb *req) atomic_inc(&req->refs); } +static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) +{ + if (ctx->submit_state.compl_nr) + __io_submit_flush_completions(ctx); +} + static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) { if (!(req->flags & REQ_F_REFCOUNT)) { @@ -1252,8 +1258,7 @@ static void io_fallback_req_func(struct work_struct *work) req->io_task_work.func(req, &locked); if (locked) { - if (ctx->submit_state.compl_nr) - io_submit_flush_completions(ctx); + io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); } percpu_ref_put(&ctx->refs); @@ -2112,8 +2117,7 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked) if (!ctx) return; if (*locked) { - if (ctx->submit_state.compl_nr) - io_submit_flush_completions(ctx); + io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); *locked = false; } @@ -2130,7 +2134,7 @@ static void tctx_task_work(struct callback_head *cb) while (1) { struct io_wq_work_node *node; - if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr) + if (!tctx->task_list.first && locked) io_submit_flush_completions(ctx); spin_lock_irq(&tctx->task_lock); @@ -2315,7 +2319,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, list_add(&req->inflight_entry, &state->free_list); } -static void io_submit_flush_completions(struct io_ring_ctx *ctx) +static void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; @@ -7195,8 +7199,7 @@ static void io_submit_state_end(struct io_submit_state *state, { if (state->link.head) io_queue_sqe(state->link.head); - if (state->compl_nr) - io_submit_flush_completions(ctx); + io_submit_flush_completions(ctx); if (state->plug_started) blk_finish_plug(&state->plug); } From d97ec6239ad8684e4d31b12b39cefca6782c09a3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 8 Sep 2021 16:40:53 +0100 Subject: [PATCH 115/205] io_uring: kill extra wake_up_process in tw add TWA_SIGNAL already wakes the thread, no need in wake_up_process() after it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7e90cf643f633e857443e0c9e72471b221735c50.1631115443.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c4ec3672da77b..ead85f4d34272 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2197,8 +2197,9 @@ static void io_req_task_work_add(struct io_kiocb *req) * will do the job. */ notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; - if (!task_work_add(tsk, &tctx->task_work, notify)) { - wake_up_process(tsk); + if (likely(!task_work_add(tsk, &tctx->task_work, notify))) { + if (notify == TWA_NONE) + wake_up_process(tsk); return; } From 83f84356bc8f2dda9d0c9c7edb94decf71a36d26 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Mon, 13 Sep 2021 21:08:54 +0800 Subject: [PATCH 116/205] io_uring: add more uring info to fdinfo for debug Developers may need some uring info to help themselves debug and address issues in production. This includes sqring/cqring head/tail and the detailed sqe/cqe info, which is very useful when an application is hung on a ring. Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20210913130854.38542-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index ead85f4d34272..323ef0bb4bc1a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -10056,8 +10056,48 @@ static int io_uring_show_cred(struct seq_file *m, unsigned int id, static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) { struct io_sq_data *sq = NULL; + struct io_overflow_cqe *ocqe; + struct io_rings *r = ctx->rings; + unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; + unsigned int cached_sq_head = ctx->cached_sq_head; + unsigned int cached_cq_tail = ctx->cached_cq_tail; + unsigned int sq_head = READ_ONCE(r->sq.head); + unsigned int sq_tail = READ_ONCE(r->sq.tail); + unsigned int cq_head = READ_ONCE(r->cq.head); + unsigned int cq_tail = READ_ONCE(r->cq.tail); bool has_lock; - int i; + unsigned int i; + + /* + * we may get imprecise sqe and cqe info if uring is actively running + * since we get cached_sq_head and cached_cq_tail without uring_lock + * and sq_tail and cq_head are changed by userspace. But it's ok since + * we usually use these info when it is stuck. + */ + seq_printf(m, "SqHead:\t%u\n", sq_head & sq_mask); + seq_printf(m, "SqTail:\t%u\n", sq_tail & sq_mask); + seq_printf(m, "CachedSqHead:\t%u\n", cached_sq_head & sq_mask); + seq_printf(m, "CqHead:\t%u\n", cq_head & cq_mask); + seq_printf(m, "CqTail:\t%u\n", cq_tail & cq_mask); + seq_printf(m, "CachedCqTail:\t%u\n", cached_cq_tail & cq_mask); + seq_printf(m, "SQEs:\t%u\n", sq_tail - cached_sq_head); + for (i = cached_sq_head; i < sq_tail; i++) { + unsigned int sq_idx = READ_ONCE(ctx->sq_array[i & sq_mask]); + + if (likely(sq_idx <= sq_mask)) { + struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; + + seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", + sq_idx, sqe->opcode, sqe->fd, sqe->flags, sqe->user_data); + } + } + seq_printf(m, "CQEs:\t%u\n", cached_cq_tail - cq_head); + for (i = cq_head; i < cached_cq_tail; i++) { + struct io_uring_cqe *cqe = &r->cqes[i & cq_mask]; + + seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n", + i & cq_mask, cqe->user_data, cqe->res, cqe->flags); + } /* * Avoid ABBA deadlock between the seq lock and the io_uring mutex, @@ -10099,7 +10139,10 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) xa_for_each(&ctx->personalities, index, cred) io_uring_show_cred(m, index, cred); } - seq_printf(m, "PollList:\n"); + if (has_lock) + mutex_unlock(&ctx->uring_lock); + + seq_puts(m, "PollList:\n"); spin_lock(&ctx->completion_lock); for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { struct hlist_head *list = &ctx->cancel_hash[i]; @@ -10109,9 +10152,17 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) seq_printf(m, " op=%d, task_works=%d\n", req->opcode, req->task->task_works != NULL); } + + seq_puts(m, "CqOverflowList:\n"); + list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { + struct io_uring_cqe *cqe = &ocqe->cqe; + + seq_printf(m, " user_data=%llu, res=%d, flags=%x\n", + cqe->user_data, cqe->res, cqe->flags); + + } + spin_unlock(&ctx->completion_lock); - if (has_lock) - mutex_unlock(&ctx->uring_lock); } static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) From a3f349071eb0ae2aa18ac2da55060865d8190cf0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Sep 2021 12:04:20 +0100 Subject: [PATCH 117/205] io_uring: remove ctx referencing from complete_post Now completions are done from task context, that means that it's either the task itself, task_work or io-wq worker. In all those cases the ctx will be staying alive by mutexing, explicit referencing or req references by iowq. Remove extra ctx pinning from io_req_complete_post(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/60a0e96434c16ab4fe587651448290d61ec9a113.1631703756.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 323ef0bb4bc1a..027ca7a6df446 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1809,17 +1809,11 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_put_task(req->task, 1); list_add(&req->inflight_entry, &ctx->locked_free_list); ctx->locked_free_nr++; - } else { - if (!percpu_ref_tryget(&ctx->refs)) - req = NULL; + percpu_ref_put(&ctx->refs); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); - - if (req) { - io_cqring_ev_posted(ctx); - percpu_ref_put(&ctx->refs); - } + io_cqring_ev_posted(ctx); } static inline bool io_req_needs_clean(struct io_kiocb *req) From 68fe256aadc0db70cb27132a6f5583819794d867 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 15 Sep 2021 12:03:38 +0100 Subject: [PATCH 118/205] io_uring: optimise io_req_init() sqe flags checks IOSQE_IO_DRAIN is quite marginal and we don't care too much about IOSQE_BUFFER_SELECT. Save to ifs and hide both of them under SQE_VALID_FLAGS check. Now we first check whether it uses a "safe" subset, i.e. without DRAIN and BUFFER_SELECT, and only if it's not true we test the rest of the flags. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/dccfb9ab2ab0969a2d8dc59af88fa0ce44eeb1d5.1631703764.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 027ca7a6df446..3801f2e5ea1bc 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -103,9 +103,11 @@ #define IORING_MAX_REG_BUFFERS (1U << 14) -#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK| \ - IOSQE_IO_HARDLINK | IOSQE_ASYNC | \ - IOSQE_BUFFER_SELECT) +#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \ + IOSQE_IO_HARDLINK | IOSQE_ASYNC) + +#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN) + #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS) @@ -7059,20 +7061,21 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->fixed_rsrc_refs = NULL; req->task = current; - /* enforce forwards compatibility on users */ - if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) - return -EINVAL; if (unlikely(req->opcode >= IORING_OP_LAST)) return -EINVAL; + if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { + /* enforce forwards compatibility on users */ + if (sqe_flags & ~SQE_VALID_FLAGS) + return -EINVAL; + if ((sqe_flags & IOSQE_BUFFER_SELECT) && + !io_op_defs[req->opcode].buffer_select) + return -EOPNOTSUPP; + if (sqe_flags & IOSQE_IO_DRAIN) + ctx->drain_active = true; + } if (!io_check_restriction(ctx, req, sqe_flags)) return -EACCES; - if ((sqe_flags & IOSQE_BUFFER_SELECT) && - !io_op_defs[req->opcode].buffer_select) - return -EOPNOTSUPP; - if (unlikely(sqe_flags & IOSQE_IO_DRAIN)) - ctx->drain_active = true; - personality = READ_ONCE(sqe->personality); if (personality) { req->creds = xa_load(&ctx->personalities, personality); @@ -11017,6 +11020,8 @@ static int __init io_uring_init(void) /* should fit into one byte */ BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8)); + BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8)); + BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS); BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST); BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int)); From 8d4af6857c6fb5b1922218e93052bee29eb540f4 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Wed, 22 Sep 2021 18:15:22 +0800 Subject: [PATCH 119/205] io_uring: return boolean value for io_alloc_async_data boolean value is good enough for io_alloc_async_data. Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20210922101522.9179-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3801f2e5ea1bc..3ced6a7bdf2b5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3301,7 +3301,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, } } -static inline int io_alloc_async_data(struct io_kiocb *req) +static inline bool io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); From 6878b40e7b28bd780dedb7d505c13dbf82b73290 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:41 +0100 Subject: [PATCH 120/205] io_uring: mark having different creds unlikely Hint the compiler that it's not as likely to have creds different from current attached to a request. The current code generation is far from ideal, hopefully it can help to some compilers to remove duplicated jump tables and so. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e7815251ac4bf5a4a23d298c752f029ae19f3837.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 3ced6a7bdf2b5..bda302bedeeff 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6638,7 +6638,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) const struct cred *creds = NULL; int ret; - if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) + if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) creds = override_creds(req->creds); switch (req->opcode) { From 87a115fb715bf1f6765e122babbcba566ada286e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:42 +0100 Subject: [PATCH 121/205] io_uring: force_nonspin We don't really need to pass the number of requests to complete into io_do_iopoll(), a flag whether to enforce non-spin mode is enough. Should be straightforward, maybe except io_iopoll_check(). We pass !min there, because we do never enter with the number of already reaped requests is larger than the specified @min, apart from the first iteration, where nr_events is 0 and so the final check should be identical. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/782b39d1d8ec584eae15bca0a1feb6f0571fe5b8.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bda302bedeeff..d605954879767 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2453,7 +2453,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, } static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, - long min) + bool force_nonspin) { struct io_kiocb *req, *tmp; unsigned int poll_flags = BLK_POLL_NOSLEEP; @@ -2462,9 +2462,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, /* * Only spin for completions if we don't have multiple devices hanging - * off our complete list, and we're under the requested amount. + * off our complete list. */ - if (ctx->poll_multi_queue || *nr_events >= min) + if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { @@ -2516,7 +2516,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) while (!list_empty(&ctx->iopoll_list)) { unsigned int nr_events = 0; - io_do_iopoll(ctx, &nr_events, 0); + io_do_iopoll(ctx, &nr_events, true); /* let it sleep and repeat later if can't complete a request */ if (nr_events == 0) @@ -2578,7 +2578,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) list_empty(&ctx->iopoll_list)) break; } - ret = io_do_iopoll(ctx, &nr_events, min); + ret = io_do_iopoll(ctx, &nr_events, !min); } while (!ret && nr_events < min && !need_resched()); out: mutex_unlock(&ctx->uring_lock); @@ -7347,7 +7347,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) mutex_lock(&ctx->uring_lock); if (!list_empty(&ctx->iopoll_list)) - io_do_iopoll(ctx, &nr_events, 0); + io_do_iopoll(ctx, &nr_events, true); /* * Don't submit if refs are dying, good for io_uring_register(), From 5ba3c874eb8acb09fd8aea78ded67e023931894a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:43 +0100 Subject: [PATCH 122/205] io_uring: make io_do_iopoll return number of reqs Don't pass nr_events pointer around but return directly, it's less expensive than pointer increments. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f771a8153a86f16f12ff4272524e9e549c5de40b.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d605954879767..27ca4fb733f96 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2425,8 +2425,7 @@ static inline bool io_run_task_work(void) /* * Find and free completed poll iocbs */ -static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, - struct list_head *done) +static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) { struct req_batch rb; struct io_kiocb *req; @@ -2441,7 +2440,6 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, __io_cqring_fill_event(ctx, req->user_data, req->result, io_put_rw_kbuf(req)); - (*nr_events)++; if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); @@ -2452,12 +2450,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, io_req_free_batch_finish(ctx, &rb); } -static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, - bool force_nonspin) +static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_kiocb *req, *tmp; unsigned int poll_flags = BLK_POLL_NOSLEEP; DEFINE_IO_COMP_BATCH(iob); + int nr_events = 0; LIST_HEAD(done); /* @@ -2478,6 +2476,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, */ if (READ_ONCE(req->iopoll_completed)) { list_move_tail(&req->inflight_entry, &done); + nr_events++; continue; } if (!list_empty(&done)) @@ -2491,16 +2490,18 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, /* iopoll may have completed current req */ if (!rq_list_empty(iob.req_list) || - READ_ONCE(req->iopoll_completed)) + READ_ONCE(req->iopoll_completed)) { list_move_tail(&req->inflight_entry, &done); + nr_events++; + } } if (!rq_list_empty(iob.req_list)) iob.complete(&iob); if (!list_empty(&done)) - io_iopoll_complete(ctx, nr_events, &done); + io_iopoll_complete(ctx, &done); - return 0; + return nr_events; } /* @@ -2514,12 +2515,8 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) mutex_lock(&ctx->uring_lock); while (!list_empty(&ctx->iopoll_list)) { - unsigned int nr_events = 0; - - io_do_iopoll(ctx, &nr_events, true); - /* let it sleep and repeat later if can't complete a request */ - if (nr_events == 0) + if (io_do_iopoll(ctx, true) == 0) break; /* * Ensure we allow local-to-the-cpu processing to take place, @@ -2578,8 +2575,12 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) list_empty(&ctx->iopoll_list)) break; } - ret = io_do_iopoll(ctx, &nr_events, !min); - } while (!ret && nr_events < min && !need_resched()); + ret = io_do_iopoll(ctx, !min); + if (ret < 0) + break; + nr_events += ret; + ret = 0; + } while (nr_events < min && !need_resched()); out: mutex_unlock(&ctx->uring_lock); return ret; @@ -7339,7 +7340,6 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; if (!list_empty(&ctx->iopoll_list) || to_submit) { - unsigned nr_events = 0; const struct cred *creds = NULL; if (ctx->sq_creds != current_cred()) @@ -7347,7 +7347,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) mutex_lock(&ctx->uring_lock); if (!list_empty(&ctx->iopoll_list)) - io_do_iopoll(ctx, &nr_events, true); + io_do_iopoll(ctx, true); /* * Don't submit if refs are dying, good for io_uring_register(), From 6f33b0bc4ea43f5c5ce7b7c9ab66051f80837862 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:44 +0100 Subject: [PATCH 123/205] io_uring: use slist for completion batching Currently we collect requests for completion batching in an array. Replace them with a singly linked list. It's as fast as arrays but doesn't take some much space in ctx, and will be used in future patches. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a666826f2854d17e9fb9417fb302edfeb750f425.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 52 +++++++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 27ca4fb733f96..0ccfd70af3e0d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -322,8 +322,8 @@ struct io_submit_state { /* * Batch completion logic */ - struct io_kiocb *compl_reqs[IO_COMPL_BATCH]; - unsigned int compl_nr; + struct io_wq_work_list compl_reqs; + /* inline/task_work completion list, under ->uring_lock */ struct list_head free_list; }; @@ -882,6 +882,8 @@ struct io_kiocb { struct io_wq_work work; const struct cred *creds; + struct io_wq_work_node comp_list; + /* store used ubuf, so we can prevent reloading */ struct io_mapped_ubuf *imu; }; @@ -1168,7 +1170,7 @@ static inline void req_ref_get(struct io_kiocb *req) static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) { - if (ctx->submit_state.compl_nr) + if (!wq_list_empty(&ctx->submit_state.compl_reqs)) __io_submit_flush_completions(ctx); } @@ -1325,6 +1327,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->submit_state.free_list); INIT_LIST_HEAD(&ctx->locked_free_list); INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); + INIT_WQ_LIST(&ctx->submit_state.compl_reqs); return ctx; err: kfree(ctx->dummy_ubuf); @@ -1826,11 +1829,16 @@ static inline bool io_req_needs_clean(struct io_kiocb *req) static void io_req_complete_state(struct io_kiocb *req, long res, unsigned int cflags) { + struct io_submit_state *state; + if (io_req_needs_clean(req)) io_clean_op(req); req->result = res; req->compl.cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; + + state = &req->ctx->submit_state; + wq_list_add_tail(&req->comp_list, &state->compl_reqs); } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, @@ -2319,13 +2327,14 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, static void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { + struct io_wq_work_node *node, *prev; struct io_submit_state *state = &ctx->submit_state; - int i, nr = state->compl_nr; struct req_batch rb; spin_lock(&ctx->completion_lock); - for (i = 0; i < nr; i++) { - struct io_kiocb *req = state->compl_reqs[i]; + wq_list_for_each(node, prev, &state->compl_reqs) { + struct io_kiocb *req = container_of(node, struct io_kiocb, + comp_list); __io_cqring_fill_event(ctx, req->user_data, req->result, req->compl.cflags); @@ -2335,15 +2344,18 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) io_cqring_ev_posted(ctx); io_init_req_batch(&rb); - for (i = 0; i < nr; i++) { - struct io_kiocb *req = state->compl_reqs[i]; + node = state->compl_reqs.first; + do { + struct io_kiocb *req = container_of(node, struct io_kiocb, + comp_list); + node = req->comp_list.next; if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); - } + } while (node); io_req_free_batch_finish(ctx, &rb); - state->compl_nr = 0; + INIT_WQ_LIST(&state->compl_reqs); } /* @@ -2668,17 +2680,10 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked) unsigned int cflags = io_put_rw_kbuf(req); long res = req->result; - if (*locked) { - struct io_ring_ctx *ctx = req->ctx; - struct io_submit_state *state = &ctx->submit_state; - + if (*locked) io_req_complete_state(req, res, cflags); - state->compl_reqs[state->compl_nr++] = req; - if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) - io_submit_flush_completions(ctx); - } else { + else io_req_complete_post(req, res, cflags); - } } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -6962,15 +6967,8 @@ static void __io_queue_sqe(struct io_kiocb *req) * doesn't support non-blocking read/write attempts */ if (likely(!ret)) { - if (req->flags & REQ_F_COMPLETE_INLINE) { - struct io_ring_ctx *ctx = req->ctx; - struct io_submit_state *state = &ctx->submit_state; - - state->compl_reqs[state->compl_nr++] = req; - if (state->compl_nr == ARRAY_SIZE(state->compl_reqs)) - io_submit_flush_completions(ctx); + if (req->flags & REQ_F_COMPLETE_INLINE) return; - } linked_timeout = io_prep_linked_timeout(req); if (linked_timeout) From 3ab665b74e59ad467b3eab5b2b95d378cea78b22 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:45 +0100 Subject: [PATCH 124/205] io_uring: remove allocation cache array We have several of request allocation layers, remove the last one, which is the submit->reqs array, and always use submit->free_reqs instead. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8547095c35f7a87bab14f6447ecd30a273ed7500.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 60 +++++++++++++++------------------------------------ 1 file changed, 17 insertions(+), 43 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0ccfd70af3e0d..e3bfbd3f47873 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -310,12 +310,6 @@ struct io_submit_state { struct blk_plug plug; struct io_submit_link link; - /* - * io_kiocb alloc cache - */ - void *reqs[IO_REQ_CACHE_SIZE]; - unsigned int free_reqs; - bool plug_started; bool need_plug; @@ -1898,7 +1892,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; - int nr; /* * If we have more than a batch's worth of requests in our IRQ side @@ -1907,20 +1900,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) */ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) io_flush_cached_locked_reqs(ctx, state); - - nr = state->free_reqs; - while (!list_empty(&state->free_list)) { - struct io_kiocb *req = list_first_entry(&state->free_list, - struct io_kiocb, inflight_entry); - - list_del(&req->inflight_entry); - state->reqs[nr++] = req; - if (nr == ARRAY_SIZE(state->reqs)) - break; - } - - state->free_reqs = nr; - return nr != 0; + return !list_empty(&state->free_list); } /* @@ -1934,33 +1914,36 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; + void *reqs[IO_REQ_ALLOC_BATCH]; + struct io_kiocb *req; int ret, i; - BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH); - - if (likely(state->free_reqs || io_flush_cached_reqs(ctx))) + if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx))) goto got_req; - ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH, - state->reqs); + ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); /* * Bulk alloc is all-or-nothing. If we fail to get a batch, * retry single alloc to be on the safe side. */ if (unlikely(ret <= 0)) { - state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); - if (!state->reqs[0]) + reqs[0] = kmem_cache_alloc(req_cachep, gfp); + if (!reqs[0]) return NULL; ret = 1; } - for (i = 0; i < ret; i++) - io_preinit_req(state->reqs[i], ctx); - state->free_reqs = ret; + for (i = 0; i < ret; i++) { + req = reqs[i]; + + io_preinit_req(req, ctx); + list_add(&req->inflight_entry, &state->free_list); + } got_req: - state->free_reqs--; - return state->reqs[state->free_reqs]; + req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry); + list_del(&req->inflight_entry); + return req; } static inline void io_put_file(struct file *file) @@ -2318,10 +2301,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, rb->task_refs++; rb->ctx_refs++; - if (state->free_reqs != ARRAY_SIZE(state->reqs)) - state->reqs[state->free_reqs++] = req; - else - list_add(&req->inflight_entry, &state->free_list); + list_add(&req->inflight_entry, &state->free_list); } static void __io_submit_flush_completions(struct io_ring_ctx *ctx) @@ -9228,12 +9208,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) struct io_submit_state *state = &ctx->submit_state; mutex_lock(&ctx->uring_lock); - - if (state->free_reqs) { - kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs); - state->free_reqs = 0; - } - io_flush_cached_locked_reqs(ctx, state); io_req_cache_free(&state->free_list); mutex_unlock(&ctx->uring_lock); From 0d9521b9b526a264a46c42fcde5d3d9a7cb490ea Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:46 +0100 Subject: [PATCH 125/205] io-wq: add io_wq_work_node based stack Apart from just using lists (i.e. io_wq_work_list), we also want to have stacks, which are a bit faster, and have some interoperability between them. Add a stack implementation based on io_wq_work_node and some helpers. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5d3a412a5ac0d47e0f0499d70d2207d70a68925e.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.h | 57 +++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 7 deletions(-) diff --git a/fs/io-wq.h b/fs/io-wq.h index bf5c4c5337605..c870062105d1b 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -29,6 +29,15 @@ struct io_wq_work_list { struct io_wq_work_node *last; }; +#define wq_list_for_each(pos, prv, head) \ + for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) + +#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) +#define INIT_WQ_LIST(list) do { \ + (list)->first = NULL; \ + (list)->last = NULL; \ +} while (0) + static inline void wq_list_add_after(struct io_wq_work_node *node, struct io_wq_work_node *pos, struct io_wq_work_list *list) @@ -54,6 +63,15 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node, } } +static inline void wq_list_add_head(struct io_wq_work_node *node, + struct io_wq_work_list *list) +{ + node->next = list->first; + if (!node->next) + list->last = node; + WRITE_ONCE(list->first, node); +} + static inline void wq_list_cut(struct io_wq_work_list *list, struct io_wq_work_node *last, struct io_wq_work_node *prev) @@ -69,6 +87,31 @@ static inline void wq_list_cut(struct io_wq_work_list *list, last->next = NULL; } +static inline void __wq_list_splice(struct io_wq_work_list *list, + struct io_wq_work_node *to) +{ + list->last->next = to->next; + to->next = list->first; + INIT_WQ_LIST(list); +} + +static inline bool wq_list_splice(struct io_wq_work_list *list, + struct io_wq_work_node *to) +{ + if (!wq_list_empty(list)) { + __wq_list_splice(list, to); + return true; + } + return false; +} + +static inline void wq_stack_add_head(struct io_wq_work_node *node, + struct io_wq_work_node *stack) +{ + node->next = stack->next; + stack->next = node; +} + static inline void wq_list_del(struct io_wq_work_list *list, struct io_wq_work_node *node, struct io_wq_work_node *prev) @@ -76,14 +119,14 @@ static inline void wq_list_del(struct io_wq_work_list *list, wq_list_cut(list, node, prev); } -#define wq_list_for_each(pos, prv, head) \ - for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) +static inline +struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack) +{ + struct io_wq_work_node *node = stack->next; -#define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) -#define INIT_WQ_LIST(list) do { \ - (list)->first = NULL; \ - (list)->last = NULL; \ -} while (0) + stack->next = node->next; + return node; +} struct io_wq_work { struct io_wq_work_node list; From c2b6c6bc4e0d3452ac4e208c198f6062e0f1d9df Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:47 +0100 Subject: [PATCH 126/205] io_uring: replace list with stack for req caches Replace struct list_head free_list serving for caching requests with singly linked stack, which is faster. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1bc942b82422fb2624b8353bd93aca183a022846.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 51 ++++++++++++++++++++++++--------------------------- 1 file changed, 24 insertions(+), 27 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e3bfbd3f47873..90ad9cd254c5d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -319,7 +319,7 @@ struct io_submit_state { struct io_wq_work_list compl_reqs; /* inline/task_work completion list, under ->uring_lock */ - struct list_head free_list; + struct io_wq_work_node free_list; }; struct io_ring_ctx { @@ -379,7 +379,7 @@ struct io_ring_ctx { } ____cacheline_aligned_in_smp; /* IRQ completion list, under ->completion_lock */ - struct list_head locked_free_list; + struct io_wq_work_list locked_free_list; unsigned int locked_free_nr; const struct cred *sq_creds; /* cred used for __io_sq_thread() */ @@ -1318,8 +1318,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work); init_llist_head(&ctx->rsrc_put_llist); INIT_LIST_HEAD(&ctx->tctx_list); - INIT_LIST_HEAD(&ctx->submit_state.free_list); - INIT_LIST_HEAD(&ctx->locked_free_list); + ctx->submit_state.free_list.next = NULL; + INIT_WQ_LIST(&ctx->locked_free_list); INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func); INIT_WQ_LIST(&ctx->submit_state.compl_reqs); return ctx; @@ -1806,7 +1806,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, } io_dismantle_req(req); io_put_task(req->task, 1); - list_add(&req->inflight_entry, &ctx->locked_free_list); + wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; percpu_ref_put(&ctx->refs); } @@ -1883,7 +1883,7 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, struct io_submit_state *state) { spin_lock(&ctx->completion_lock); - list_splice_init(&ctx->locked_free_list, &state->free_list); + wq_list_splice(&ctx->locked_free_list, &state->free_list); ctx->locked_free_nr = 0; spin_unlock(&ctx->completion_lock); } @@ -1900,7 +1900,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) */ if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH) io_flush_cached_locked_reqs(ctx, state); - return !list_empty(&state->free_list); + return !!state->free_list.next; } /* @@ -1915,10 +1915,11 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; void *reqs[IO_REQ_ALLOC_BATCH]; + struct io_wq_work_node *node; struct io_kiocb *req; int ret, i; - if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx))) + if (likely(state->free_list.next || io_flush_cached_reqs(ctx))) goto got_req; ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); @@ -1938,12 +1939,11 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) req = reqs[i]; io_preinit_req(req, ctx); - list_add(&req->inflight_entry, &state->free_list); + wq_stack_add_head(&req->comp_list, &state->free_list); } got_req: - req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry); - list_del(&req->inflight_entry); - return req; + node = wq_stack_extract(&state->free_list); + return container_of(node, struct io_kiocb, comp_list); } static inline void io_put_file(struct file *file) @@ -1976,7 +1976,7 @@ static void __io_free_req(struct io_kiocb *req) io_put_task(req->task, 1); spin_lock(&ctx->completion_lock); - list_add(&req->inflight_entry, &ctx->locked_free_list); + wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; spin_unlock(&ctx->completion_lock); @@ -2300,8 +2300,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, } rb->task_refs++; rb->ctx_refs++; - - list_add(&req->inflight_entry, &state->free_list); + wq_stack_add_head(&req->comp_list, &state->free_list); } static void __io_submit_flush_completions(struct io_ring_ctx *ctx) @@ -7261,7 +7260,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) } sqe = io_get_sqe(ctx); if (unlikely(!sqe)) { - list_add(&req->inflight_entry, &ctx->submit_state.free_list); + wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); break; } /* will complete beyond this point, count as submitted */ @@ -9193,23 +9192,21 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) } } -static void io_req_cache_free(struct list_head *list) -{ - struct io_kiocb *req, *nxt; - - list_for_each_entry_safe(req, nxt, list, inflight_entry) { - list_del(&req->inflight_entry); - kmem_cache_free(req_cachep, req); - } -} - static void io_req_caches_free(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; mutex_lock(&ctx->uring_lock); io_flush_cached_locked_reqs(ctx, state); - io_req_cache_free(&state->free_list); + + while (state->free_list.next) { + struct io_wq_work_node *node; + struct io_kiocb *req; + + node = wq_stack_extract(&state->free_list); + req = container_of(node, struct io_kiocb, comp_list); + kmem_cache_free(req_cachep, req); + } mutex_unlock(&ctx->uring_lock); } From e3f721e6f6d5d916edf71e5f26ac0547a4b28e1e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:48 +0100 Subject: [PATCH 127/205] io_uring: split iopoll loop The main loop of io_do_iopoll() iterates and does ->iopoll() until it meets a first completed request, then it continues from that position and splices requests to pass them through io_iopoll_complete(). Split the loop in two for clearness, iopolling and reaping completed requests from the list. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a7f6fd27a94845e5dc925a47a4a9765a92e514fb.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 90ad9cd254c5d..0e46ed6f92ea9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2441,6 +2441,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) io_req_free_batch_finish(ctx, &rb); } +/* same as "continue" but starts from the pos, not next to it */ +#define list_for_each_entry_safe_resume(pos, n, head, member) \ + for (n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_kiocb *req, *tmp; @@ -2456,7 +2462,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; - list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { + list_for_each_entry(req, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; int ret; @@ -2465,12 +2471,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) * If we find a request that requires polling, break out * and complete those lists first, if we have entries there. */ - if (READ_ONCE(req->iopoll_completed)) { - list_move_tail(&req->inflight_entry, &done); - nr_events++; - continue; - } - if (!list_empty(&done)) + if (READ_ONCE(req->iopoll_completed)) break; ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags); @@ -2481,17 +2482,22 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) /* iopoll may have completed current req */ if (!rq_list_empty(iob.req_list) || - READ_ONCE(req->iopoll_completed)) { - list_move_tail(&req->inflight_entry, &done); - nr_events++; - } + READ_ONCE(req->iopoll_completed)) + break; } if (!rq_list_empty(iob.req_list)) iob.complete(&iob); - if (!list_empty(&done)) - io_iopoll_complete(ctx, &done); + list_for_each_entry_safe_resume(req, tmp, &ctx->iopoll_list, + inflight_entry) { + if (!READ_ONCE(req->iopoll_completed)) + break; + list_move_tail(&req->inflight_entry, &done); + nr_events++; + } + if (nr_events) + io_iopoll_complete(ctx, &done); return nr_events; } From 5eef4e87eb0b2b8482f6a77ebcad067636a867b3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:49 +0100 Subject: [PATCH 128/205] io_uring: use single linked list for iopoll Use single linked lists for keeping iopoll requests, takes less space, may be faster, but mostly will be of benefit for further patches. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/314033676b100cd485518c3bc55e1b95a0dcd71f.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.h | 3 +++ fs/io_uring.c | 54 +++++++++++++++++++++++++-------------------------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/fs/io-wq.h b/fs/io-wq.h index c870062105d1b..87ba6a7336307 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -32,6 +32,9 @@ struct io_wq_work_list { #define wq_list_for_each(pos, prv, head) \ for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next) +#define wq_list_for_each_resume(pos, prv) \ + for (; pos; prv = pos, pos = (pos)->next) + #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) #define INIT_WQ_LIST(list) do { \ (list)->first = NULL; \ diff --git a/fs/io_uring.c b/fs/io_uring.c index 0e46ed6f92ea9..fac1d0310d665 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -412,7 +412,7 @@ struct io_ring_ctx { * For SQPOLL, only the single threaded io_sq_thread() will * manipulate the list, hence no extra locking is needed there. */ - struct list_head iopoll_list; + struct io_wq_work_list iopoll_list; struct hlist_head *cancel_hash; unsigned cancel_hash_bits; bool poll_multi_queue; @@ -1309,7 +1309,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_waitqueue_head(&ctx->cq_wait); spin_lock_init(&ctx->completion_lock); spin_lock_init(&ctx->timeout_lock); - INIT_LIST_HEAD(&ctx->iopoll_list); + INIT_WQ_LIST(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); INIT_LIST_HEAD(&ctx->ltimeout_list); @@ -2441,15 +2441,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) io_req_free_batch_finish(ctx, &rb); } -/* same as "continue" but starts from the pos, not next to it */ -#define list_for_each_entry_safe_resume(pos, n, head, member) \ - for (n = list_next_entry(pos, member); \ - !list_entry_is_head(pos, head, member); \ - pos = n, n = list_next_entry(n, member)) - static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { - struct io_kiocb *req, *tmp; + struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = BLK_POLL_NOSLEEP; DEFINE_IO_COMP_BATCH(iob); int nr_events = 0; @@ -2462,7 +2456,8 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (ctx->poll_multi_queue || force_nonspin) poll_flags |= BLK_POLL_ONESHOT; - list_for_each_entry(req, &ctx->iopoll_list, inflight_entry) { + wq_list_for_each(pos, start, &ctx->iopoll_list) { + struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); struct kiocb *kiocb = &req->rw.kiocb; int ret; @@ -2488,14 +2483,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (!rq_list_empty(iob.req_list)) iob.complete(&iob); - list_for_each_entry_safe_resume(req, tmp, &ctx->iopoll_list, - inflight_entry) { + else if (!pos) + return 0; + + prev = start; + wq_list_for_each_resume(pos, prev) { + struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); + if (!READ_ONCE(req->iopoll_completed)) break; - list_move_tail(&req->inflight_entry, &done); + list_add_tail(&req->inflight_entry, &done); nr_events++; } + wq_list_cut(&ctx->iopoll_list, prev, start); if (nr_events) io_iopoll_complete(ctx, &done); return nr_events; @@ -2511,7 +2512,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) return; mutex_lock(&ctx->uring_lock); - while (!list_empty(&ctx->iopoll_list)) { + while (!wq_list_empty(&ctx->iopoll_list)) { /* let it sleep and repeat later if can't complete a request */ if (io_do_iopoll(ctx, true) == 0) break; @@ -2560,7 +2561,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) * forever, while the workqueue is stuck trying to acquire the * very same mutex. */ - if (list_empty(&ctx->iopoll_list)) { + if (wq_list_empty(&ctx->iopoll_list)) { u32 tail = ctx->cached_cq_tail; mutex_unlock(&ctx->uring_lock); @@ -2569,7 +2570,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) /* some requests don't go through iopoll_list */ if (tail != ctx->cached_cq_tail || - list_empty(&ctx->iopoll_list)) + wq_list_empty(&ctx->iopoll_list)) break; } ret = io_do_iopoll(ctx, !min); @@ -2729,14 +2730,13 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * how we do polling eventually, not spinning if we're on potentially * different devices. */ - if (list_empty(&ctx->iopoll_list)) { + if (wq_list_empty(&ctx->iopoll_list)) { ctx->poll_multi_queue = false; } else if (!ctx->poll_multi_queue) { struct io_kiocb *list_req; - list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, - inflight_entry); - + list_req = container_of(ctx->iopoll_list.first, struct io_kiocb, + comp_list); if (list_req->file != req->file) ctx->poll_multi_queue = true; } @@ -2746,9 +2746,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * it to the front so we find it first. */ if (READ_ONCE(req->iopoll_completed)) - list_add(&req->inflight_entry, &ctx->iopoll_list); + wq_list_add_head(&req->comp_list, &ctx->iopoll_list); else - list_add_tail(&req->inflight_entry, &ctx->iopoll_list); + wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); if (unlikely(in_async)) { /* @@ -7322,14 +7322,14 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE) to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE; - if (!list_empty(&ctx->iopoll_list) || to_submit) { + if (!wq_list_empty(&ctx->iopoll_list) || to_submit) { const struct cred *creds = NULL; if (ctx->sq_creds != current_cred()) creds = override_creds(ctx->sq_creds); mutex_lock(&ctx->uring_lock); - if (!list_empty(&ctx->iopoll_list)) + if (!wq_list_empty(&ctx->iopoll_list)) io_do_iopoll(ctx, true); /* @@ -7407,7 +7407,7 @@ static int io_sq_thread(void *data) list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { int ret = __io_sq_thread(ctx, cap_entries); - if (!sqt_spin && (ret > 0 || !list_empty(&ctx->iopoll_list))) + if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) sqt_spin = true; } if (io_run_task_work()) @@ -7428,7 +7428,7 @@ static int io_sq_thread(void *data) io_ring_set_wakeup_flag(ctx); if ((ctx->flags & IORING_SETUP_IOPOLL) && - !list_empty_careful(&ctx->iopoll_list)) { + !wq_list_empty(&ctx->iopoll_list)) { needs_sched = false; break; } @@ -9583,7 +9583,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, /* SQPOLL thread does its own polling */ if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || (ctx->sq_data && ctx->sq_data->thread == current)) { - while (!list_empty_careful(&ctx->iopoll_list)) { + while (!wq_list_empty(&ctx->iopoll_list)) { io_iopoll_try_reap_events(ctx); ret = true; } From 3aa83bfb6e5c3a89d0ec67e598ee04bfc1425b13 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:50 +0100 Subject: [PATCH 129/205] io_uring: add a helper for batch free Add a helper io_free_batch_list(), which takes a single linked list and puts/frees all requests from it in an efficient manner. Will be reused later. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4fc8306b542c6b1dd1d08e8021ef3bdb0ad15010.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index fac1d0310d665..df122cdfc85d6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2303,12 +2303,31 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, wq_stack_add_head(&req->comp_list, &state->free_list); } +static void io_free_batch_list(struct io_ring_ctx *ctx, + struct io_wq_work_list *list) + __must_hold(&ctx->uring_lock) +{ + struct io_wq_work_node *node; + struct req_batch rb; + + io_init_req_batch(&rb); + node = list->first; + do { + struct io_kiocb *req = container_of(node, struct io_kiocb, + comp_list); + + node = req->comp_list.next; + if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); + } while (node); + io_req_free_batch_finish(ctx, &rb); +} + static void __io_submit_flush_completions(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_wq_work_node *node, *prev; struct io_submit_state *state = &ctx->submit_state; - struct req_batch rb; spin_lock(&ctx->completion_lock); wq_list_for_each(node, prev, &state->compl_reqs) { @@ -2322,18 +2341,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); - io_init_req_batch(&rb); - node = state->compl_reqs.first; - do { - struct io_kiocb *req = container_of(node, struct io_kiocb, - comp_list); - - node = req->comp_list.next; - if (req_ref_put_and_test(req)) - io_req_free_batch(&rb, req, &ctx->submit_state); - } while (node); - - io_req_free_batch_finish(ctx, &rb); + io_free_batch_list(ctx, &state->compl_reqs); INIT_WQ_LIST(&state->compl_reqs); } From b3fa03fd1b17f557e2c43736440feed66fb86741 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:51 +0100 Subject: [PATCH 130/205] io_uring: convert iopoll_completed to store_release Convert explicit barrier around iopoll_completed to smp_load_acquire() and smp_store_release(). Similar on the callback side, but replaces a single smp_rmb() with per-request smp_load_acquire(), neither imply any extra CPU ordering for x86. Use READ_ONCE as usual where it doesn't matter. Use it to move filling CQEs by iopoll earlier, that will be necessary to avoid traversing the list one extra time in the future. Suggested-by: Bart Van Assche Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8bd663cb15efdc72d6247c38ee810964e744a450.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index df122cdfc85d6..fb122c2b69f23 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2429,17 +2429,11 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) struct req_batch rb; struct io_kiocb *req; - /* order with ->result store in io_complete_rw_iopoll() */ - smp_rmb(); - io_init_req_batch(&rb); while (!list_empty(done)) { req = list_first_entry(done, struct io_kiocb, inflight_entry); list_del(&req->inflight_entry); - __io_cqring_fill_event(ctx, req->user_data, req->result, - io_put_rw_kbuf(req)); - if (req_ref_put_and_test(req)) io_req_free_batch(&rb, req, &ctx->submit_state); } @@ -2498,8 +2492,12 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) wq_list_for_each_resume(pos, prev) { struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); - if (!READ_ONCE(req->iopoll_completed)) + /* order with io_complete_rw_iopoll(), e.g. ->result updates */ + if (!smp_load_acquire(&req->iopoll_completed)) break; + __io_cqring_fill_event(ctx, req->user_data, req->result, + io_put_rw_kbuf(req)); + list_add_tail(&req->inflight_entry, &done); nr_events++; } @@ -2712,10 +2710,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) } } - WRITE_ONCE(req->result, res); - /* order with io_iopoll_complete() checking ->result */ - smp_wmb(); - WRITE_ONCE(req->iopoll_completed, 1); + req->result = res; + /* order with io_iopoll_complete() checking ->iopoll_completed */ + smp_store_release(&req->iopoll_completed, 1); } /* From f5ed3bcd5b117ffe73b9dc2394bbbad26a68c086 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:52 +0100 Subject: [PATCH 131/205] io_uring: optimise batch completion First, convert rest of iopoll bits to single linked lists, and also replace per-request list_add_tail() with splicing a part of slist. With that, use io_free_batch_list() to put/free requests. The main advantage of it is that it's now the only user of struct req_batch and friends, and so they can be inlined. The main overhead there was per-request call to not-inlined io_req_free_batch(), which is expensive enough. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b37fc6d5954b241e025eead7ab92c6f44a42f229.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 38 ++++++++++---------------------------- 1 file changed, 10 insertions(+), 28 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index fb122c2b69f23..91077b56a5643 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2421,35 +2421,13 @@ static inline bool io_run_task_work(void) return false; } -/* - * Find and free completed poll iocbs - */ -static void io_iopoll_complete(struct io_ring_ctx *ctx, struct list_head *done) -{ - struct req_batch rb; - struct io_kiocb *req; - - io_init_req_batch(&rb); - while (!list_empty(done)) { - req = list_first_entry(done, struct io_kiocb, inflight_entry); - list_del(&req->inflight_entry); - - if (req_ref_put_and_test(req)) - io_req_free_batch(&rb, req, &ctx->submit_state); - } - - io_commit_cqring(ctx); - io_cqring_ev_posted_iopoll(ctx); - io_req_free_batch_finish(ctx, &rb); -} - static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = BLK_POLL_NOSLEEP; + struct io_wq_work_list list; DEFINE_IO_COMP_BATCH(iob); int nr_events = 0; - LIST_HEAD(done); /* * Only spin for completions if we don't have multiple devices hanging @@ -2496,15 +2474,19 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) if (!smp_load_acquire(&req->iopoll_completed)) break; __io_cqring_fill_event(ctx, req->user_data, req->result, - io_put_rw_kbuf(req)); - - list_add_tail(&req->inflight_entry, &done); + io_put_rw_kbuf(req)); nr_events++; } + if (unlikely(!nr_events)) + return 0; + + io_commit_cqring(ctx); + io_cqring_ev_posted_iopoll(ctx); + list.first = start ? start->next : ctx->iopoll_list.first; + list.last = prev; wq_list_cut(&ctx->iopoll_list, prev, start); - if (nr_events) - io_iopoll_complete(ctx, &done); + io_free_batch_list(ctx, &list); return nr_events; } From d4b7a5ef2b9c06def90d12db9b99bd12d75758fb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:53 +0100 Subject: [PATCH 132/205] io_uring: inline completion batching helpers We now have a single function for batched put of requests, just inline struct req_batch and all related helpers into it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/595a2917f80dd94288cd7203052c7934f5446580.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 66 +++++++++++++++++---------------------------------- 1 file changed, 22 insertions(+), 44 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 91077b56a5643..7229bf74c9e17 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2264,63 +2264,41 @@ static void io_free_req_work(struct io_kiocb *req, bool *locked) io_free_req(req); } -struct req_batch { - struct task_struct *task; - int task_refs; - int ctx_refs; -}; - -static inline void io_init_req_batch(struct req_batch *rb) -{ - rb->task_refs = 0; - rb->ctx_refs = 0; - rb->task = NULL; -} - -static void io_req_free_batch_finish(struct io_ring_ctx *ctx, - struct req_batch *rb) -{ - if (rb->ctx_refs) - percpu_ref_put_many(&ctx->refs, rb->ctx_refs); - if (rb->task) - io_put_task(rb->task, rb->task_refs); -} - -static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, - struct io_submit_state *state) -{ - io_queue_next(req); - io_dismantle_req(req); - - if (req->task != rb->task) { - if (rb->task) - io_put_task(rb->task, rb->task_refs); - rb->task = req->task; - rb->task_refs = 0; - } - rb->task_refs++; - rb->ctx_refs++; - wq_stack_add_head(&req->comp_list, &state->free_list); -} - static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_list *list) __must_hold(&ctx->uring_lock) { struct io_wq_work_node *node; - struct req_batch rb; + struct task_struct *task = NULL; + int task_refs = 0, ctx_refs = 0; - io_init_req_batch(&rb); node = list->first; do { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); node = req->comp_list.next; - if (req_ref_put_and_test(req)) - io_req_free_batch(&rb, req, &ctx->submit_state); + if (!req_ref_put_and_test(req)) + continue; + + io_queue_next(req); + io_dismantle_req(req); + + if (req->task != task) { + if (task) + io_put_task(task, task_refs); + task = req->task; + task_refs = 0; + } + task_refs++; + ctx_refs++; + wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } while (node); - io_req_free_batch_finish(ctx, &rb); + + if (ctx_refs) + percpu_ref_put_many(&ctx->refs, ctx_refs); + if (task) + io_put_task(task, task_refs); } static void __io_submit_flush_completions(struct io_ring_ctx *ctx) From 1cce17aca621c38c657410dc278a48cda982dd2e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:54 +0100 Subject: [PATCH 133/205] io_uring: don't pass tail into io_free_batch_list io_free_batch_list() iterates all requests in the passed in list, so we don't really need to know the tail but can keep iterating until meet NULL. Just pass the first node into it and it will be enough. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4a12c84b6d887d980e05f417ba4172d04c64acae.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7229bf74c9e17..693ad31082aeb 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2265,14 +2265,12 @@ static void io_free_req_work(struct io_kiocb *req, bool *locked) } static void io_free_batch_list(struct io_ring_ctx *ctx, - struct io_wq_work_list *list) + struct io_wq_work_node *node) __must_hold(&ctx->uring_lock) { - struct io_wq_work_node *node; struct task_struct *task = NULL; int task_refs = 0, ctx_refs = 0; - node = list->first; do { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); @@ -2319,7 +2317,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); - io_free_batch_list(ctx, &state->compl_reqs); + io_free_batch_list(ctx, state->compl_reqs.first); INIT_WQ_LIST(&state->compl_reqs); } @@ -2403,7 +2401,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) { struct io_wq_work_node *pos, *start, *prev; unsigned int poll_flags = BLK_POLL_NOSLEEP; - struct io_wq_work_list list; DEFINE_IO_COMP_BATCH(iob); int nr_events = 0; @@ -2461,10 +2458,9 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) io_commit_cqring(ctx); io_cqring_ev_posted_iopoll(ctx); - list.first = start ? start->next : ctx->iopoll_list.first; - list.last = prev; + pos = start ? start->next : ctx->iopoll_list.first; wq_list_cut(&ctx->iopoll_list, prev, start); - io_free_batch_list(ctx, &list); + io_free_batch_list(ctx, pos); return nr_events; } From 553deffd0920fc52e8c91e7f8a42d1186a75760a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:55 +0100 Subject: [PATCH 134/205] io_uring: don't pass state to io_submit_state_end Submission state and ctx and coupled together, no need to passs Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e22d77a5786ef77e0c49b933ad74bae55cfb6ca6.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 693ad31082aeb..5088b47d1bc49 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7137,11 +7137,13 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, /* * Batched submission is done, ensure local IO is flushed out. */ -static void io_submit_state_end(struct io_submit_state *state, - struct io_ring_ctx *ctx) +static void io_submit_state_end(struct io_ring_ctx *ctx) { + struct io_submit_state *state = &ctx->submit_state; + if (state->link.head) io_queue_sqe(state->link.head); + /* flush only after queuing links as they can generate completions */ io_submit_flush_completions(ctx); if (state->plug_started) blk_finish_plug(&state->plug); @@ -7244,7 +7246,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) percpu_ref_put_many(&ctx->refs, unused); } - io_submit_state_end(&ctx->submit_state, ctx); + io_submit_state_end(ctx); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); From f15a3431775a598ed89028acee334200160cc2d6 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:56 +0100 Subject: [PATCH 135/205] io_uring: deduplicate io_queue_sqe() call sites There are two call sites of io_queue_sqe() in io_submit_sqe(), combine them into one, because io_queue_sqe() is inline and we don't want to bloat binary, and will become even bigger text data bss dec hex filename 92126 13986 8 106120 19e88 ./fs/io_uring.o 91966 13986 8 105960 19de8 ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/506124b8e767f0a4576f7a459f6aea3d13fb4dda.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5088b47d1bc49..9b99ec46da255 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7117,20 +7117,18 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, link->last->link = req; link->last = req; + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) + return 0; /* last request of a link, enqueue the link */ - if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { - link->head = NULL; - io_queue_sqe(head); - } - } else { - if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { - link->head = req; - link->last = req; - } else { - io_queue_sqe(req); - } + link->head = NULL; + req = head; + } else if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { + link->head = req; + link->last = req; + return 0; } + io_queue_sqe(req); return 0; } From 2a56a9bd64dbdff4fe5cbe00b20014da07694a78 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:57 +0100 Subject: [PATCH 136/205] io_uring: remove drain_active check from hot path req->ctx->active_drain is a bit too expensive, partially because of two dereferences. Do a trick, if we see it set in io_init_req(), set REQ_F_FORCE_ASYNC and it automatically goes through a slower path where we can catch it. It's nearly free to do in io_init_req() because there is already ->restricted check and it's in the same byte of a bitmask. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d7e7ddc63c15e8a300833132abb3eb8fd3918aef.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 53 ++++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 9b99ec46da255..f0fb458e81f39 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6438,23 +6438,15 @@ static bool io_drain_req(struct io_kiocb *req) int ret; u32 seq; - if (req->flags & REQ_F_FAIL) { - io_req_complete_fail_submit(req); - return true; - } - - /* - * If we need to drain a request in the middle of a link, drain the - * head request and the next request/link after the current link. - * Considering sequential execution of links, IOSQE_IO_DRAIN will be - * maintained for every request of our link. - */ - if (ctx->drain_next) { - req->flags |= REQ_F_IO_DRAIN; - ctx->drain_next = false; - } /* not interested in head, start from the first linked */ io_for_each_link(pos, req->link) { + /* + * If we need to drain a request in the middle of a link, drain + * the head request and the next request/link after the current + * link. Considering sequential execution of links, + * IOSQE_IO_DRAIN will be maintained for every request of our + * link. + */ if (pos->flags & REQ_F_IO_DRAIN) { ctx->drain_next = true; req->flags |= REQ_F_IO_DRAIN; @@ -6946,13 +6938,12 @@ static void __io_queue_sqe(struct io_kiocb *req) static inline void io_queue_sqe(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { - if (unlikely(req->ctx->drain_active) && io_drain_req(req)) - return; - if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { __io_queue_sqe(req); } else if (req->flags & REQ_F_FAIL) { io_req_complete_fail_submit(req); + } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { + return; } else { int ret = io_req_prep_async(req); @@ -6972,9 +6963,6 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags) { - if (likely(!ctx->restricted)) - return true; - if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) return false; @@ -7015,11 +7003,28 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if ((sqe_flags & IOSQE_BUFFER_SELECT) && !io_op_defs[req->opcode].buffer_select) return -EOPNOTSUPP; - if (sqe_flags & IOSQE_IO_DRAIN) + if (sqe_flags & IOSQE_IO_DRAIN) { + struct io_submit_link *link = &ctx->submit_state.link; + ctx->drain_active = true; + req->flags |= REQ_F_FORCE_ASYNC; + if (link->head) + link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; + } + } + if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { + if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) + return -EACCES; + /* knock it to the slow queue path, will be drained there */ + if (ctx->drain_active) + req->flags |= REQ_F_FORCE_ASYNC; + /* if there is no link, we're at "next" request and need to drain */ + if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { + ctx->drain_next = false; + ctx->drain_active = true; + req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN; + } } - if (!io_check_restriction(ctx, req, sqe_flags)) - return -EACCES; personality = READ_ONCE(sqe->personality); if (personality) { From 4652fe3f10e57abc3fc6ac11c431b2ef39f78c03 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:58 +0100 Subject: [PATCH 137/205] io_uring: split slow path from io_queue_sqe We don't want the slow path of io_queue_sqe to be inlined, so extract a function from it. text data bss dec hex filename 91950 13986 8 105944 19dd8 ./fs/io_uring.o 91758 13986 8 105752 19d18 ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/fb01253911f8fb374268f65b1ba939b54ca6583f.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index f0fb458e81f39..e5a414c208bd9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6935,12 +6935,10 @@ static void __io_queue_sqe(struct io_kiocb *req) } } -static inline void io_queue_sqe(struct io_kiocb *req) +static void io_queue_sqe_fallback(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { - if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { - __io_queue_sqe(req); - } else if (req->flags & REQ_F_FAIL) { + if (req->flags & REQ_F_FAIL) { io_req_complete_fail_submit(req); } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { return; @@ -6954,6 +6952,15 @@ static inline void io_queue_sqe(struct io_kiocb *req) } } +static inline void io_queue_sqe(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) +{ + if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) + __io_queue_sqe(req); + else + io_queue_sqe_fallback(req); +} + /* * Check SQE restrictions (opcode and flags). * From d475a9a6226c86b7febe3863b900b820a0e6b71c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 21:59:59 +0100 Subject: [PATCH 138/205] io_uring: inline hot path of __io_queue_sqe() Extract slow paths from __io_queue_sqe() into a function and inline the hot path. With that we have everything completely inlined on the submission path up until io_issue_sqe(). -> io_submit_sqes() -> io_submit_sqe() (inlined) -> io_queue_sqe() (inlined) -> __io_queue_sqe() (inlined) -> io_issue_sqe() Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f1606864d95d7f26dc28c7eec3dc6ed6ec32618a.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 48 ++++++++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e5a414c208bd9..d4f8abb451c44 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6891,13 +6891,38 @@ static void io_queue_linked_timeout(struct io_kiocb *req) io_put_req(req); } -static void __io_queue_sqe(struct io_kiocb *req) +static void io_queue_sqe_arm_apoll(struct io_kiocb *req) + __must_hold(&req->ctx->uring_lock) +{ + struct io_kiocb *linked_timeout = io_prep_linked_timeout(req); + + switch (io_arm_poll_handler(req)) { + case IO_APOLL_READY: + if (linked_timeout) { + io_unprep_linked_timeout(req); + linked_timeout = NULL; + } + io_req_task_queue(req); + break; + case IO_APOLL_ABORTED: + /* + * Queued up for async execution, worker will release + * submit reference when the iocb is actually submitted. + */ + io_queue_async_work(req, NULL); + break; + } + + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); +} + +static inline void __io_queue_sqe(struct io_kiocb *req) __must_hold(&req->ctx->uring_lock) { struct io_kiocb *linked_timeout; int ret; -issue_sqe: ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); /* @@ -6912,24 +6937,7 @@ static void __io_queue_sqe(struct io_kiocb *req) if (linked_timeout) io_queue_linked_timeout(linked_timeout); } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { - linked_timeout = io_prep_linked_timeout(req); - - switch (io_arm_poll_handler(req)) { - case IO_APOLL_READY: - if (linked_timeout) - io_unprep_linked_timeout(req); - goto issue_sqe; - case IO_APOLL_ABORTED: - /* - * Queued up for async execution, worker will release - * submit reference when the iocb is actually submitted. - */ - io_queue_async_work(req, NULL); - break; - } - - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); + io_queue_sqe_arm_apoll(req); } else { io_req_complete_failed(req, ret); } From d9f9d2842c9156470b3f1d3dafe5684a3c036366 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:00 +0100 Subject: [PATCH 139/205] io_uring: reshuffle queue_sqe completion handling If a request completed inline the result should only be zero, it's a grave error otherwise. So, when we see REQ_F_COMPLETE_INLINE it's not even necessary to check the return code, and the flag check can be moved earlier. It's one "if" less for inline completions, and same two checks for it normally completing (ret == 0). Those are two cases we care about the most. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ebd4e397a9c26d96c99b24447acc309741041a83.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d4f8abb451c44..bbb2a262c2728 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6925,14 +6925,13 @@ static inline void __io_queue_sqe(struct io_kiocb *req) ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); + if (req->flags & REQ_F_COMPLETE_INLINE) + return; /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ if (likely(!ret)) { - if (req->flags & REQ_F_COMPLETE_INLINE) - return; - linked_timeout = io_prep_linked_timeout(req); if (linked_timeout) io_queue_linked_timeout(linked_timeout); From 6962980947e2b967ab26bfd34004b6b573597513 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:01 +0100 Subject: [PATCH 140/205] io_uring: restructure submit sqes to_submit checks Put an explicit check for number of requests to submit. First, we can turn while into do-while and it generates better code, and second that if can be cheaper, e.g. by using CPU flags after sub in io_sqring_entries(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5926baadd20c28feab7a5e1725fedf32e4553ff7.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bbb2a262c2728..b8f60cf361560 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -7225,16 +7225,19 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) __must_hold(&ctx->uring_lock) { + unsigned int entries = io_sqring_entries(ctx); int submitted = 0; + if (!entries) + return 0; /* make sure SQ entry isn't read before tail */ - nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); + nr = min3(nr, ctx->sq_entries, entries); if (!percpu_ref_tryget_many(&ctx->refs, nr)) return -EAGAIN; io_get_task_refs(nr); io_submit_state_start(&ctx->submit_state, nr); - while (submitted < nr) { + do { const struct io_uring_sqe *sqe; struct io_kiocb *req; @@ -7253,7 +7256,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) submitted++; if (io_submit_sqe(ctx, req, sqe)) break; - } + } while (submitted < nr); if (unlikely(submitted != nr)) { int ref_used = (submitted == -EAGAIN) ? 0 : submitted; From ef05d9ebcc927260f700a94436e7c9347657bbef Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:02 +0100 Subject: [PATCH 141/205] io_uring: kill off ->inflight_entry field ->inflight_entry is not used anymore after converting everything to single linked lists, remove it. Also adjust io_kiocb layout, so all hot bits are in first 3 cachelines. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/fd8d68087ede26c4e1707ce6b175aa1eb2381f2b.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index b8f60cf361560..eff24d8eb399a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -868,18 +868,15 @@ struct io_kiocb { struct percpu_ref *fixed_rsrc_refs; /* used with ctx->iopoll_list with reads/writes */ - struct list_head inflight_entry; + struct io_wq_work_node comp_list; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; struct async_poll *apoll; - struct io_wq_work work; - const struct cred *creds; - - struct io_wq_work_node comp_list; - /* store used ubuf, so we can prevent reloading */ struct io_mapped_ubuf *imu; + struct io_wq_work work; + const struct cred *creds; }; struct io_tctx_node { From a1cdbb4cb5f7190c429b1127892f756b7cd32db4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:03 +0100 Subject: [PATCH 142/205] io_uring: comment why inline complete calls io_clean_op() io_req_complete_state() calls io_clean_op() and it may be not entirely obvious, leave a comment. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/21806f862151e223fdf439e5e8ed7178a8d66979.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/io_uring.c b/fs/io_uring.c index eff24d8eb399a..5a06063d5d819 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1822,6 +1822,7 @@ static void io_req_complete_state(struct io_kiocb *req, long res, { struct io_submit_state *state; + /* clean per-opcode space, because req->compl is aliased with it */ if (io_req_needs_clean(req)) io_clean_op(req); req->result = res; From 5e371265ea1d3e0cd02236b1a6d79fe322523ae8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 24 Sep 2021 22:00:04 +0100 Subject: [PATCH 143/205] io_uring: disable draining earlier Clear ->drain_active in two more cases where we check for a need of draining. It's not a bug, but still may lead to some extra requests being punted to io-wq, and that may be not desirable. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/d20b265f77bb4e8860b15b9987252c7c711dfcba.1632516769.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5a06063d5d819..fd3dc6432a343 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6461,8 +6461,10 @@ static bool io_drain_req(struct io_kiocb *req) seq = io_get_sequence(req); /* Still a chance to pass the sequence check */ - if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) + if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { + ctx->drain_active = false; return false; + } ret = io_req_prep_async(req); if (ret) @@ -6481,6 +6483,7 @@ static bool io_drain_req(struct io_kiocb *req) spin_unlock(&ctx->completion_lock); kfree(de); io_queue_async_work(req, NULL); + ctx->drain_active = false; return true; } From 22b2ca310afcea319c72e051df0371f668192b10 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:00 +0100 Subject: [PATCH 144/205] io_uring: extra a helper for drain init Add a helper io_init_req_drain for initialising requests with IOSQE_DRAIN set. Also move bits from preambule of io_drain_req() in there, because we already modify all the bits needed inside the helper. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/dcb412825b35b1cb8891245a387d7d69f8d14cef.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 48 ++++++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 26 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index fd3dc6432a343..fb073915fa5c0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6430,28 +6430,11 @@ static u32 io_get_sequence(struct io_kiocb *req) static bool io_drain_req(struct io_kiocb *req) { - struct io_kiocb *pos; struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; int ret; u32 seq; - /* not interested in head, start from the first linked */ - io_for_each_link(pos, req->link) { - /* - * If we need to drain a request in the middle of a link, drain - * the head request and the next request/link after the current - * link. Considering sequential execution of links, - * IOSQE_IO_DRAIN will be maintained for every request of our - * link. - */ - if (pos->flags & REQ_F_IO_DRAIN) { - ctx->drain_next = true; - req->flags |= REQ_F_IO_DRAIN; - break; - } - } - /* Still need defer if there is pending req in defer list. */ if (likely(list_empty_careful(&ctx->defer_list) && !(req->flags & REQ_F_IO_DRAIN))) { @@ -6992,6 +6975,25 @@ static inline bool io_check_restriction(struct io_ring_ctx *ctx, return true; } +static void io_init_req_drain(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_kiocb *head = ctx->submit_state.link.head; + + ctx->drain_active = true; + if (head) { + /* + * If we need to drain a request in the middle of a link, drain + * the head request and the next request/link after the current + * link. Considering sequential execution of links, + * IOSQE_IO_DRAIN will be maintained for every request of our + * link. + */ + head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; + ctx->drain_next = true; + } +} + static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) __must_hold(&ctx->uring_lock) @@ -7018,14 +7020,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if ((sqe_flags & IOSQE_BUFFER_SELECT) && !io_op_defs[req->opcode].buffer_select) return -EOPNOTSUPP; - if (sqe_flags & IOSQE_IO_DRAIN) { - struct io_submit_link *link = &ctx->submit_state.link; - - ctx->drain_active = true; - req->flags |= REQ_F_FORCE_ASYNC; - if (link->head) - link->head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; - } + if (sqe_flags & IOSQE_IO_DRAIN) + io_init_req_drain(req); } if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) { if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags)) @@ -7037,7 +7033,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) { ctx->drain_next = false; ctx->drain_active = true; - req->flags |= REQ_F_FORCE_ASYNC | IOSQE_IO_DRAIN; + req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC; } } From e0eb71dcfc4b862261d99f7f90169142867beb0a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:01 +0100 Subject: [PATCH 145/205] io_uring: don't return from io_drain_req() Never return from io_drain_req() but punt to tw if we've got there but it's a false positive and we shouldn't actually drain. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/93583cee51b8783706b76c73196c155b28d9e762.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index fb073915fa5c0..b0b4c5b00f372 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6428,46 +6428,39 @@ static u32 io_get_sequence(struct io_kiocb *req) return seq; } -static bool io_drain_req(struct io_kiocb *req) +static void io_drain_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; int ret; - u32 seq; + u32 seq = io_get_sequence(req); /* Still need defer if there is pending req in defer list. */ - if (likely(list_empty_careful(&ctx->defer_list) && - !(req->flags & REQ_F_IO_DRAIN))) { - ctx->drain_active = false; - return false; - } - - seq = io_get_sequence(req); - /* Still a chance to pass the sequence check */ if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) { +queue: ctx->drain_active = false; - return false; + io_req_task_queue(req); + return; } ret = io_req_prep_async(req); - if (ret) - goto fail; + if (ret) { +fail: + io_req_complete_failed(req, ret); + return; + } io_prep_async_link(req); de = kmalloc(sizeof(*de), GFP_KERNEL); if (!de) { ret = -ENOMEM; -fail: - io_req_complete_failed(req, ret); - return true; + goto fail; } spin_lock(&ctx->completion_lock); if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { spin_unlock(&ctx->completion_lock); kfree(de); - io_queue_async_work(req, NULL); - ctx->drain_active = false; - return true; + goto queue; } trace_io_uring_defer(ctx, req, req->user_data); @@ -6475,7 +6468,6 @@ static bool io_drain_req(struct io_kiocb *req) de->seq = seq; list_add_tail(&de->list, &ctx->defer_list); spin_unlock(&ctx->completion_lock); - return true; } static void io_clean_op(struct io_kiocb *req) @@ -6931,8 +6923,8 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) { if (req->flags & REQ_F_FAIL) { io_req_complete_fail_submit(req); - } else if (unlikely(req->ctx->drain_active) && io_drain_req(req)) { - return; + } else if (unlikely(req->ctx->drain_active)) { + io_drain_req(req); } else { int ret = io_req_prep_async(req); From fc0ae0244bbbedf309a835afb278e39a3fc9bb94 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:02 +0100 Subject: [PATCH 146/205] io_uring: init opcode in io_init_req() Move io_req_prep() call inside of io_init_req(), it simplifies a bit error handling for callers. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a0f59291fd52da4672c323542fd56fd899e23f8f.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index b0b4c5b00f372..81292041378f7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6992,7 +6992,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, { struct io_submit_state *state; unsigned int sqe_flags; - int personality, ret = 0; + int personality; /* req is partially pre-initialised, see io_preinit_req() */ req->opcode = READ_ONCE(sqe->opcode); @@ -7053,9 +7053,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), (sqe_flags & IOSQE_FIXED_FILE)); if (unlikely(!req->file)) - ret = -EBADF; + return -EBADF; } - return ret; + + return io_req_prep(req, sqe); } static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, @@ -7067,7 +7068,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ret = io_init_req(ctx, req, sqe); if (unlikely(ret)) { -fail_req: trace_io_uring_req_failed(sqe, ret); /* fail even hard links since we don't submit */ @@ -7092,10 +7092,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, return ret; } req_fail_link_node(req, ret); - } else { - ret = io_req_prep(req, sqe); - if (unlikely(ret)) - goto fail_req; } /* don't need @sqe from now on */ From 30d51dd4ad2040d4c90497287b69635af7c67502 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 1 Oct 2021 18:07:03 +0100 Subject: [PATCH 147/205] io_uring: clean up buffer select Hiding a pointer to a struct io_buffer in rw.addr is error prone. We have some place in io_kiocb, so keep kbuf's in a separate field without aliasing and risks of it being misused. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/3e63a6a953b04cad81d9ea827b12344dd57b37b4.1633107393.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 46 ++++++++++++---------------------------------- 1 file changed, 12 insertions(+), 34 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 81292041378f7..5b3b52815b88b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -573,7 +573,6 @@ struct io_sr_msg { int msg_flags; int bgid; size_t len; - struct io_buffer *kbuf; }; struct io_open { @@ -877,6 +876,7 @@ struct io_kiocb { struct io_mapped_ubuf *imu; struct io_wq_work work; const struct cred *creds; + struct io_buffer *kbuf; }; struct io_tctx_node { @@ -2376,12 +2376,9 @@ static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) { - struct io_buffer *kbuf; - if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) return 0; - kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; - return io_put_kbuf(req, kbuf); + return io_put_kbuf(req, req->kbuf); } static inline bool io_run_task_work(void) @@ -3000,9 +2997,9 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) } static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, - int bgid, struct io_buffer *kbuf, - bool needs_lock) + int bgid, bool needs_lock) { + struct io_buffer *kbuf = req->kbuf; struct io_buffer *head; if (req->flags & REQ_F_BUFFER_SELECTED) @@ -3024,12 +3021,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, } if (*len > kbuf->len) *len = kbuf->len; + req->flags |= REQ_F_BUFFER_SELECTED; + req->kbuf = kbuf; } else { kbuf = ERR_PTR(-ENOBUFS); } io_ring_submit_unlock(req->ctx, needs_lock); - return kbuf; } @@ -3039,13 +3037,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer *kbuf; u16 bgid; - kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; bgid = req->buf_index; - kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); + kbuf = io_buffer_select(req, len, bgid, needs_lock); if (IS_ERR(kbuf)) return kbuf; - req->rw.addr = (u64) (unsigned long) kbuf; - req->flags |= REQ_F_BUFFER_SELECTED; return u64_to_user_ptr(kbuf->addr); } @@ -3101,9 +3096,8 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, bool needs_lock) { if (req->flags & REQ_F_BUFFER_SELECTED) { - struct io_buffer *kbuf; + struct io_buffer *kbuf = req->kbuf; - kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; iov[0].iov_base = u64_to_user_ptr(kbuf->addr); iov[0].iov_len = kbuf->len; return 0; @@ -4869,20 +4863,13 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, bool needs_lock) { struct io_sr_msg *sr = &req->sr_msg; - struct io_buffer *kbuf; - - kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); - if (IS_ERR(kbuf)) - return kbuf; - sr->kbuf = kbuf; - req->flags |= REQ_F_BUFFER_SELECTED; - return kbuf; + return io_buffer_select(req, &sr->len, sr->bgid, needs_lock); } static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) { - return io_put_kbuf(req, req->sr_msg.kbuf); + return io_put_kbuf(req, req->kbuf); } static int io_recvmsg_prep_async(struct io_kiocb *req) @@ -6473,17 +6460,8 @@ static void io_drain_req(struct io_kiocb *req) static void io_clean_op(struct io_kiocb *req) { if (req->flags & REQ_F_BUFFER_SELECTED) { - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - kfree((void *)(unsigned long)req->rw.addr); - break; - case IORING_OP_RECVMSG: - case IORING_OP_RECV: - kfree(req->sr_msg.kbuf); - break; - } + kfree(req->kbuf); + req->kbuf = NULL; } if (req->flags & REQ_F_NEED_CLEANUP) { From 6224590d242fc8c6b26941328e02a40b4384949b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 2 Oct 2021 19:36:14 +0100 Subject: [PATCH 148/205] io_uring: add flag to not fail link after timeout For some reason non-off IORING_OP_TIMEOUT always fails links, it's pretty inconvenient and unnecessary limits chaining after it to hard linking, which is far from ideal, e.g. doesn't pair well with timeout cancellation. Add a flag forcing it to not fail links on -ETIME. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/17c7ec0fb7a6113cc6be8cdaedcada0ba836ac0e.1633199723.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- include/uapi/linux/io_uring.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5b3b52815b88b..5b3b4f8f96597 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5857,7 +5857,10 @@ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) static void io_req_task_timeout(struct io_kiocb *req, bool *locked) { - req_set_fail(req); + struct io_timeout_data *data = req->async_data; + + if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) + req_set_fail(req); io_req_complete_post(req, -ETIME, 0); } @@ -6063,7 +6066,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (off && is_timeout_link) return -EINVAL; flags = READ_ONCE(sqe->timeout_flags); - if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK)) + if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | + IORING_TIMEOUT_ETIME_SUCCESS)) return -EINVAL; /* more than one clock specified is invalid, obviously */ if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index b270a07b285ec..c45b5e9a93879 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -158,6 +158,7 @@ enum { #define IORING_TIMEOUT_BOOTTIME (1U << 2) #define IORING_TIMEOUT_REALTIME (1U << 3) #define IORING_LINK_TIMEOUT_UPDATE (1U << 4) +#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5) #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME) #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE) /* From 7e3709d57651feab9c77d7a5a8024041f73f69f7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:46 +0100 Subject: [PATCH 149/205] io_uring: optimise kiocb layout We want ->comp_list in the second cacheline, which is hotter comparing to the 3rd. Swap the field with ->link, which is not as hot and controlled by flags and so not accessed unless there is a link. By the way add a couple of comments for io_kiocb fields. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9d9dde31f8f62279a5f48c575bbc27b8290edc0c.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5b3b4f8f96597..ab07a0813f209 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -863,19 +863,22 @@ struct io_kiocb { struct task_struct *task; u64 user_data; - struct io_kiocb *link; struct percpu_ref *fixed_rsrc_refs; - /* used with ctx->iopoll_list with reads/writes */ + /* used by request caches, completion batching and iopoll */ struct io_wq_work_node comp_list; + struct io_kiocb *link; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; + /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; /* store used ubuf, so we can prevent reloading */ struct io_mapped_ubuf *imu; struct io_wq_work work; + /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; + /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; }; From 51d48dab62ed9e44e0ccc723411b0d95286a821c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:47 +0100 Subject: [PATCH 150/205] io_uring: add more likely/unlikely() annotations Add two extra unlikely() in io_submit_sqes() and one around io_req_needs_clean() to help the compiler to avoid extra jumps in hot paths. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/88e087afe657e7660194353aada9b00f11d480f9.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index ab07a0813f209..bc87ed85f2065 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1957,7 +1957,7 @@ static inline void io_dismantle_req(struct io_kiocb *req) { unsigned int flags = req->flags; - if (io_req_needs_clean(req)) + if (unlikely(io_req_needs_clean(req))) io_clean_op(req); if (!(flags & REQ_F_FIXED_FILE)) io_put_file(req->file); @@ -7198,11 +7198,11 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) unsigned int entries = io_sqring_entries(ctx); int submitted = 0; - if (!entries) + if (unlikely(!entries)) return 0; /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, entries); - if (!percpu_ref_tryget_many(&ctx->refs, nr)) + if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr))) return -EAGAIN; io_get_task_refs(nr); From fff4e40e3094972b119e38c8f0de4e1ca9fec654 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:48 +0100 Subject: [PATCH 151/205] io_uring: delay req queueing into compl-batch list io_req_complete_state() is inlined and used in lots of places, so we want to keep it concise. Move adding a request into a completion batch list from io_req_complete_state() into the consumer, i.e. __io_queue_sqe(). before vs after text data bss dec hex filename 91894 14002 8 105904 19db0 ./fs/io_uring.o 91046 14002 8 105056 19a60 ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4afca4e11abfd4cc8e99777fdcaf4d34cf4d022d.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bc87ed85f2065..045474b7b9c25 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1446,6 +1446,13 @@ static void io_prep_async_link(struct io_kiocb *req) } } +static inline void io_req_add_compl_list(struct io_kiocb *req) +{ + struct io_submit_state *state = &req->ctx->submit_state; + + wq_list_add_tail(&req->comp_list, &state->compl_reqs); +} + static void io_queue_async_work(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; @@ -1820,20 +1827,15 @@ static inline bool io_req_needs_clean(struct io_kiocb *req) return req->flags & IO_REQ_CLEAN_FLAGS; } -static void io_req_complete_state(struct io_kiocb *req, long res, - unsigned int cflags) +static inline void io_req_complete_state(struct io_kiocb *req, long res, + unsigned int cflags) { - struct io_submit_state *state; - /* clean per-opcode space, because req->compl is aliased with it */ if (io_req_needs_clean(req)) io_clean_op(req); req->result = res; req->compl.cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; - - state = &req->ctx->submit_state; - wq_list_add_tail(&req->comp_list, &state->compl_reqs); } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, @@ -2626,10 +2628,12 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked) unsigned int cflags = io_put_rw_kbuf(req); long res = req->result; - if (*locked) + if (*locked) { io_req_complete_state(req, res, cflags); - else + io_req_add_compl_list(req); + } else { io_req_complete_post(req, res, cflags); + } } static void __io_complete_rw(struct io_kiocb *req, long res, long res2, @@ -6886,8 +6890,10 @@ static inline void __io_queue_sqe(struct io_kiocb *req) ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); - if (req->flags & REQ_F_COMPLETE_INLINE) + if (req->flags & REQ_F_COMPLETE_INLINE) { + io_req_add_compl_list(req); return; + } /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts From a33ae9ce16a8ca62c5dffbe8909d185c6c5b4d77 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:49 +0100 Subject: [PATCH 152/205] io_uring: optimise request allocation Even after fully inlining io_alloc_req() my compiler does a NULL check in the path of successful allocation, no hacks like an empty dereference help it. Restructure io_alloc_req() by splitting out refilling part, so the compiler generate a slightly better binary. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/eda17571bdc7248d8e617b23e7132a5416e4680b.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 045474b7b9c25..614c982c2697a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1912,18 +1912,17 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * Because of that, io_alloc_req() should be called only under ->uring_lock * and with extra caution to not get a request that is still worked on. */ -static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) +static bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; void *reqs[IO_REQ_ALLOC_BATCH]; - struct io_wq_work_node *node; struct io_kiocb *req; int ret, i; if (likely(state->free_list.next || io_flush_cached_reqs(ctx))) - goto got_req; + return true; ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); @@ -1934,7 +1933,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) if (unlikely(ret <= 0)) { reqs[0] = kmem_cache_alloc(req_cachep, gfp); if (!reqs[0]) - return NULL; + return false; ret = 1; } @@ -1944,8 +1943,21 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) io_preinit_req(req, ctx); wq_stack_add_head(&req->comp_list, &state->free_list); } -got_req: - node = wq_stack_extract(&state->free_list); + return true; +} + +static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) +{ + if (unlikely(!ctx->submit_state.free_list.next)) + return __io_alloc_req_refill(ctx); + return true; +} + +static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) +{ + struct io_wq_work_node *node; + + node = wq_stack_extract(&ctx->submit_state.free_list); return container_of(node, struct io_kiocb, comp_list); } @@ -7217,12 +7229,12 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) const struct io_uring_sqe *sqe; struct io_kiocb *req; - req = io_alloc_req(ctx); - if (unlikely(!req)) { + if (unlikely(!io_alloc_req_refill(ctx))) { if (!submitted) submitted = -EAGAIN; break; } + req = io_alloc_req(ctx); sqe = io_get_sqe(ctx); if (unlikely(!sqe)) { wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); From 765ff496c781a982ed0a882a7784c93ce10d1155 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:50 +0100 Subject: [PATCH 153/205] io_uring: optimise INIT_WQ_LIST The invariant of io_wq_work_list is that it's empty IFF ->first is NULL, so no need to initially set ->last. With now having more users of the list it may play a role, i.e. used in each tw iteration and on every completion flushing. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/c464ab5cab6e46a858c6d39c107e92b3b5291f13.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.h | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/io-wq.h b/fs/io-wq.h index 87ba6a7336307..41bf37674a49d 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -38,7 +38,6 @@ struct io_wq_work_list { #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL) #define INIT_WQ_LIST(list) do { \ (list)->first = NULL; \ - (list)->last = NULL; \ } while (0) static inline void wq_list_add_after(struct io_wq_work_node *node, From aede728aae355d4c1d38dd02747d415af011eea7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:51 +0100 Subject: [PATCH 154/205] io_uring: don't wake sqpoll in io_cqring_ev_posted io_cqring_ev_posted() doesn't need to wake SQPOLL, it's either done by userspace or with task_work, but no action is required on request completion. Rip off bits waking it up in io_cqring_ev_posted(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b49dab27b64cf11f4c50f2f90dcaac123430e05d.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 614c982c2697a..af2cbe633345f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1619,8 +1619,6 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) */ if (wq_has_sleeper(&ctx->cq_wait)) wake_up_all(&ctx->cq_wait); - if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait)) - wake_up(&ctx->sq_data->wait); if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); if (waitqueue_active(&ctx->poll_wait)) From d60aa65ba221f038404b98d8484f562f72bb807b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:52 +0100 Subject: [PATCH 155/205] io_uring: merge CQ and poll waitqueues ->cq_wait and ->poll_wait and waken up in the same manner, use a single waitqueue for both of them. CQ waiters are queued exclusively, so wake up should first go over all pollers and that's what we need. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/00fe603e50000365774cf8435ef5fe03f049c1c9.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index af2cbe633345f..c9e6f4b687184 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -394,7 +394,6 @@ struct io_ring_ctx { unsigned cached_cq_tail; unsigned cq_entries; struct eventfd_ctx *cq_ev_fd; - struct wait_queue_head poll_wait; struct wait_queue_head cq_wait; unsigned cq_extra; atomic_t cq_timeouts; @@ -1300,7 +1299,6 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) ctx->flags = p->flags; init_waitqueue_head(&ctx->sqo_sq_wait); INIT_LIST_HEAD(&ctx->sqd_list); - init_waitqueue_head(&ctx->poll_wait); INIT_LIST_HEAD(&ctx->cq_overflow_list); init_completion(&ctx->ref_comp); xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1); @@ -1621,8 +1619,6 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) wake_up_all(&ctx->cq_wait); if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); - if (waitqueue_active(&ctx->poll_wait)) - wake_up_interruptible(&ctx->poll_wait); } static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) @@ -1636,8 +1632,6 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) } if (io_should_trigger_evfd(ctx)) eventfd_signal(ctx->cq_ev_fd, 1); - if (waitqueue_active(&ctx->poll_wait)) - wake_up_interruptible(&ctx->poll_wait); } /* Returns true if there are no backlogged entries after the flush */ @@ -9253,7 +9247,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait) struct io_ring_ctx *ctx = file->private_data; __poll_t mask = 0; - poll_wait(file, &ctx->poll_wait, wait); + poll_wait(file, &ctx->cq_wait, wait); /* * synchronizes with barrier from wq_has_sleeper call in * io_commit_cqring From 37f0e767e177af328fcaf74d3772495235e361a8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:53 +0100 Subject: [PATCH 156/205] io_uring: optimise ctx referencing by requests Currenlty, we allocate one ctx reference per request at submission time and put them at free. It's batched and not so expensive but it still bloats the kernel, adds 2 function calls for rcu and adds some overhead for request counting in io_free_batch_list(). Always keep one reference with a request, even when it's freed and in io_uring request caches. There is extra work at ring exit / quiesce paths, which now need to put all cached requests. io_ring_exit_work() is already looping, so it's not a problem. Add hybrid-busy waiting to io_ctx_quiesce() as well for now. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/99613fbe396e80777228cde39bbda1aa8938554e.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c9e6f4b687184..9ec86e35668f5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1807,7 +1807,6 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_put_task(req->task, 1); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; - percpu_ref_put(&ctx->refs); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); @@ -1929,6 +1928,7 @@ static bool __io_alloc_req_refill(struct io_ring_ctx *ctx) ret = 1; } + percpu_ref_get_many(&ctx->refs, ret); for (i = 0; i < ret; i++) { req = reqs[i]; @@ -1986,8 +1986,6 @@ static void __io_free_req(struct io_kiocb *req) wq_list_add_head(&req->comp_list, &ctx->locked_free_list); ctx->locked_free_nr++; spin_unlock(&ctx->completion_lock); - - percpu_ref_put(&ctx->refs); } static inline void io_remove_next_linked(struct io_kiocb *req) @@ -2276,7 +2274,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, __must_hold(&ctx->uring_lock) { struct task_struct *task = NULL; - int task_refs = 0, ctx_refs = 0; + int task_refs = 0; do { struct io_kiocb *req = container_of(node, struct io_kiocb, @@ -2296,12 +2294,9 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, task_refs = 0; } task_refs++; - ctx_refs++; wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } while (node); - if (ctx_refs) - percpu_ref_put_many(&ctx->refs, ctx_refs); if (task) io_put_task(task, task_refs); } @@ -7212,8 +7207,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) return 0; /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, entries); - if (unlikely(!percpu_ref_tryget_many(&ctx->refs, nr))) - return -EAGAIN; io_get_task_refs(nr); io_submit_state_start(&ctx->submit_state, nr); @@ -7243,7 +7236,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) int unused = nr - ref_used; current->io_uring->cached_refs += unused; - percpu_ref_put_many(&ctx->refs, unused); } io_submit_state_end(ctx); @@ -9164,6 +9156,7 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) static void io_req_caches_free(struct io_ring_ctx *ctx) { struct io_submit_state *state = &ctx->submit_state; + int nr = 0; mutex_lock(&ctx->uring_lock); io_flush_cached_locked_reqs(ctx, state); @@ -9175,7 +9168,10 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) node = wq_stack_extract(&state->free_list); req = container_of(node, struct io_kiocb, comp_list); kmem_cache_free(req_cachep, req); + nr++; } + if (nr) + percpu_ref_put_many(&ctx->refs, nr); mutex_unlock(&ctx->uring_lock); } @@ -9345,6 +9341,8 @@ static void io_ring_exit_work(struct work_struct *work) io_sq_thread_unpark(sqd); } + io_req_caches_free(ctx); + if (WARN_ON_ONCE(time_after(jiffies, timeout))) { /* there is little hope left, don't run it too often */ interval = HZ * 60; @@ -10724,10 +10722,14 @@ static int io_ctx_quiesce(struct io_ring_ctx *ctx) */ mutex_unlock(&ctx->uring_lock); do { - ret = wait_for_completion_interruptible(&ctx->ref_comp); - if (!ret) + ret = wait_for_completion_interruptible_timeout(&ctx->ref_comp, HZ); + if (ret) { + ret = min(0L, ret); break; + } + ret = io_run_task_work_sig(); + io_req_caches_free(ctx); } while (ret >= 0); mutex_lock(&ctx->uring_lock); From c072481ded14bceb9a4163ac81ce73d907a43855 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:54 +0100 Subject: [PATCH 157/205] io_uring: mark cold functions Attribute cold functions so compilers can optimise them for size. It shrinks the binary by 2.5-3% text data bss dec hex filename 90670 14002 8 104680 198e8 ./fs/io_uring.o 88053 14002 8 102063 18eaf ./fs/io_uring.o Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b53d385f91dca45170b67d7f11c7abd787e821f6.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 123 ++++++++++++++++++++++++++------------------------ 1 file changed, 64 insertions(+), 59 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 9ec86e35668f5..e404c98e846ad 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1230,7 +1230,7 @@ static inline void req_fail_link_node(struct io_kiocb *req, int res) req->result = res; } -static void io_ring_ctx_ref_free(struct percpu_ref *ref) +static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref) { struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); @@ -1242,7 +1242,7 @@ static inline bool io_is_timeout_noseq(struct io_kiocb *req) return !req->timeout.off; } -static void io_fallback_req_func(struct work_struct *work) +static __cold void io_fallback_req_func(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, fallback_work.work); @@ -1262,7 +1262,7 @@ static void io_fallback_req_func(struct work_struct *work) } -static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) +static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; int hash_bits; @@ -1361,7 +1361,7 @@ static inline bool io_req_ffs_set(struct io_kiocb *req) return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); } -static void io_req_track_inflight(struct io_kiocb *req) +static inline void io_req_track_inflight(struct io_kiocb *req) { if (!(req->flags & REQ_F_INFLIGHT)) { req->flags |= REQ_F_INFLIGHT; @@ -1500,7 +1500,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status) } } -static void io_queue_deferred(struct io_ring_ctx *ctx) +static __cold void io_queue_deferred(struct io_ring_ctx *ctx) { while (!list_empty(&ctx->defer_list)) { struct io_defer_entry *de = list_first_entry(&ctx->defer_list, @@ -1514,7 +1514,7 @@ static void io_queue_deferred(struct io_ring_ctx *ctx) } } -static void io_flush_timeouts(struct io_ring_ctx *ctx) +static __cold void io_flush_timeouts(struct io_ring_ctx *ctx) __must_hold(&ctx->completion_lock) { u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); @@ -1547,7 +1547,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) spin_unlock_irq(&ctx->timeout_lock); } -static void __io_commit_cqring_flush(struct io_ring_ctx *ctx) +static __cold void __io_commit_cqring_flush(struct io_ring_ctx *ctx) { if (ctx->off_timeout_used) io_flush_timeouts(ctx); @@ -1903,7 +1903,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx) * Because of that, io_alloc_req() should be called only under ->uring_lock * and with extra caution to not get a request that is still worked on. */ -static bool __io_alloc_req_refill(struct io_ring_ctx *ctx) +static __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) __must_hold(&ctx->uring_lock) { struct io_submit_state *state = &ctx->submit_state; @@ -1975,7 +1975,7 @@ static inline void io_dismantle_req(struct io_kiocb *req) } } -static void __io_free_req(struct io_kiocb *req) +static __cold void __io_free_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; @@ -2467,7 +2467,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) * We can't just wait for polled events to come to us, we have to actively * find and complete them. */ -static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) +static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) { if (!(ctx->flags & IORING_SETUP_IOPOLL)) return; @@ -5632,8 +5632,8 @@ static bool io_poll_remove_one(struct io_kiocb *req) /* * Returns true if we found and killed one or more poll requests */ -static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, - bool cancel_all) +static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, + struct task_struct *tsk, bool cancel_all) { struct hlist_node *tmp; struct io_kiocb *req; @@ -6425,7 +6425,7 @@ static u32 io_get_sequence(struct io_kiocb *req) return seq; } -static void io_drain_req(struct io_kiocb *req) +static __cold void io_drain_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; struct io_defer_entry *de; @@ -7305,7 +7305,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) return ret; } -static void io_sqd_update_thread_idle(struct io_sq_data *sqd) +static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) { struct io_ring_ctx *ctx; unsigned sq_thread_idle = 0; @@ -7559,7 +7559,7 @@ static void io_free_page_table(void **table, size_t size) kfree(table); } -static void **io_alloc_page_table(size_t size) +static __cold void **io_alloc_page_table(size_t size) { unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); size_t init_size = size; @@ -7588,7 +7588,7 @@ static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) kfree(ref_node); } -static void io_rsrc_node_ref_zero(struct percpu_ref *ref) +static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref) { struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); struct io_ring_ctx *ctx = node->rsrc_data->ctx; @@ -7665,7 +7665,8 @@ static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) return ctx->rsrc_backup_node ? 0 : -ENOMEM; } -static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ctx) +static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data, + struct io_ring_ctx *ctx) { int ret; @@ -7721,9 +7722,9 @@ static void io_rsrc_data_free(struct io_rsrc_data *data) kfree(data); } -static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, - u64 __user *utags, unsigned nr, - struct io_rsrc_data **pdata) +static __cold int io_rsrc_data_alloc(struct io_ring_ctx *ctx, rsrc_put_fn *do_put, + u64 __user *utags, unsigned nr, + struct io_rsrc_data **pdata) { struct io_rsrc_data *data; int ret = -ENOMEM; @@ -8493,8 +8494,8 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, return io_wq_create(concurrency, &data); } -static int io_uring_alloc_task_context(struct task_struct *task, - struct io_ring_ctx *ctx) +static __cold int io_uring_alloc_task_context(struct task_struct *task, + struct io_ring_ctx *ctx) { struct io_uring_task *tctx; int ret; @@ -8541,8 +8542,8 @@ void __io_uring_free(struct task_struct *tsk) tsk->io_uring = NULL; } -static int io_sq_offload_create(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static __cold int io_sq_offload_create(struct io_ring_ctx *ctx, + struct io_uring_params *p) { int ret; @@ -9181,7 +9182,7 @@ static void io_wait_rsrc_data(struct io_rsrc_data *data) wait_for_completion(&data->done); } -static void io_ring_ctx_free(struct io_ring_ctx *ctx) +static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); @@ -9290,7 +9291,7 @@ struct io_tctx_exit { struct io_ring_ctx *ctx; }; -static void io_tctx_exit_cb(struct callback_head *cb) +static __cold void io_tctx_exit_cb(struct callback_head *cb) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_exit *work; @@ -9305,14 +9306,14 @@ static void io_tctx_exit_cb(struct callback_head *cb) complete(&work->completion); } -static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) +static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); return req->ctx == data; } -static void io_ring_exit_work(struct work_struct *work) +static __cold void io_ring_exit_work(struct work_struct *work) { struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); unsigned long timeout = jiffies + HZ * 60 * 5; @@ -9383,8 +9384,8 @@ static void io_ring_exit_work(struct work_struct *work) } /* Returns true if we found and killed one or more timeouts */ -static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, - bool cancel_all) +static __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, + struct task_struct *tsk, bool cancel_all) { struct io_kiocb *req, *tmp; int canceled = 0; @@ -9406,7 +9407,7 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, return canceled != 0; } -static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) +static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) { unsigned long index; struct creds *creds; @@ -9468,8 +9469,9 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data) return ret; } -static bool io_cancel_defer_files(struct io_ring_ctx *ctx, - struct task_struct *task, bool cancel_all) +static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all) { struct io_defer_entry *de; LIST_HEAD(list); @@ -9494,7 +9496,7 @@ static bool io_cancel_defer_files(struct io_ring_ctx *ctx, return true; } -static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) +static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) { struct io_tctx_node *node; enum io_wq_cancel cret; @@ -9518,9 +9520,9 @@ static bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) return ret; } -static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, - struct task_struct *task, - bool cancel_all) +static __cold void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, + struct task_struct *task, + bool cancel_all) { struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; struct io_uring_task *tctx = task ? task->io_uring : NULL; @@ -9610,7 +9612,7 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) /* * Remove this io_uring_file -> task mapping. */ -static void io_uring_del_tctx_node(unsigned long index) +static __cold void io_uring_del_tctx_node(unsigned long index) { struct io_uring_task *tctx = current->io_uring; struct io_tctx_node *node; @@ -9633,7 +9635,7 @@ static void io_uring_del_tctx_node(unsigned long index) kfree(node); } -static void io_uring_clean_tctx(struct io_uring_task *tctx) +static __cold void io_uring_clean_tctx(struct io_uring_task *tctx) { struct io_wq *wq = tctx->io_wq; struct io_tctx_node *node; @@ -9660,7 +9662,7 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) return percpu_counter_sum(&tctx->inflight); } -static void io_uring_drop_tctx_refs(struct task_struct *task) +static __cold void io_uring_drop_tctx_refs(struct task_struct *task) { struct io_uring_task *tctx = task->io_uring; unsigned int refs = tctx->cached_refs; @@ -9676,7 +9678,8 @@ static void io_uring_drop_tctx_refs(struct task_struct *task) * Find any io_uring ctx that this task has registered or done IO on, and cancel * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation. */ -static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) +static __cold void io_uring_cancel_generic(bool cancel_all, + struct io_sq_data *sqd) { struct io_uring_task *tctx = current->io_uring; struct io_ring_ctx *ctx; @@ -9769,7 +9772,7 @@ static void *io_uring_validate_mmap_request(struct file *file, #ifdef CONFIG_MMU -static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) +static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) { size_t sz = vma->vm_end - vma->vm_start; unsigned long pfn; @@ -9954,7 +9957,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, } #ifdef CONFIG_PROC_FS -static int io_uring_show_cred(struct seq_file *m, unsigned int id, +static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, const struct cred *cred) { struct user_namespace *uns = seq_user_ns(m); @@ -9986,7 +9989,8 @@ static int io_uring_show_cred(struct seq_file *m, unsigned int id, return 0; } -static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) +static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, + struct seq_file *m) { struct io_sq_data *sq = NULL; struct io_overflow_cqe *ocqe; @@ -10098,7 +10102,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) spin_unlock(&ctx->completion_lock); } -static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) +static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) { struct io_ring_ctx *ctx = f->private_data; @@ -10122,8 +10126,8 @@ static const struct file_operations io_uring_fops = { #endif }; -static int io_allocate_scq_urings(struct io_ring_ctx *ctx, - struct io_uring_params *p) +static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, + struct io_uring_params *p) { struct io_rings *rings; size_t size, sq_array_offset; @@ -10212,8 +10216,8 @@ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) return file; } -static int io_uring_create(unsigned entries, struct io_uring_params *p, - struct io_uring_params __user *params) +static __cold int io_uring_create(unsigned entries, struct io_uring_params *p, + struct io_uring_params __user *params) { struct io_ring_ctx *ctx; struct file *file; @@ -10371,7 +10375,8 @@ SYSCALL_DEFINE2(io_uring_setup, u32, entries, return io_uring_setup(entries, params); } -static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args) +static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, + unsigned nr_args) { struct io_uring_probe *p; size_t size; @@ -10427,8 +10432,8 @@ static int io_register_personality(struct io_ring_ctx *ctx) return id; } -static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg, - unsigned int nr_args) +static __cold int io_register_restrictions(struct io_ring_ctx *ctx, + void __user *arg, unsigned int nr_args) { struct io_uring_restriction *res; size_t size; @@ -10562,7 +10567,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, return __io_register_rsrc_update(ctx, type, &up, up.nr); } -static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, +static __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, unsigned int size, unsigned int type) { struct io_uring_rsrc_register rr; @@ -10588,8 +10593,8 @@ static int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, return -EINVAL; } -static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg, - unsigned len) +static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, + void __user *arg, unsigned len) { struct io_uring_task *tctx = current->io_uring; cpumask_var_t new_mask; @@ -10615,7 +10620,7 @@ static int io_register_iowq_aff(struct io_ring_ctx *ctx, void __user *arg, return ret; } -static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) +static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) { struct io_uring_task *tctx = current->io_uring; @@ -10625,8 +10630,8 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx) return io_wq_cpu_affinity(tctx->io_wq, NULL); } -static int io_register_iowq_max_workers(struct io_ring_ctx *ctx, - void __user *arg) +static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, + void __user *arg) { struct io_uring_task *tctx = NULL; struct io_sq_data *sqd = NULL; @@ -10707,7 +10712,7 @@ static bool io_register_op_must_quiesce(int op) } } -static int io_ctx_quiesce(struct io_ring_ctx *ctx) +static __cold int io_ctx_quiesce(struct io_ring_ctx *ctx) { long ret; From c1e53a6988b9c83dd8bbc759414bc0f13ff1fe0c Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:55 +0100 Subject: [PATCH 158/205] io_uring: optimise io_free_batch_list() Delay reading the next node in io_free_batch_list(), allows the compiler to load the value a bit later improving register spilling in some cases. With gcc 11.1 it helped to move @task_refs variable from the stack to a register and optimises out a couple of per request instructions. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/cc9fdfb6f72a4e8bc9918a5e9f2d97869a263ae4.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e404c98e846ad..1b007f77fe52a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2280,9 +2280,10 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - node = req->comp_list.next; - if (!req_ref_put_and_test(req)) + if (!req_ref_put_and_test(req)) { + node = req->comp_list.next; continue; + } io_queue_next(req); io_dismantle_req(req); @@ -2294,6 +2295,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, task_refs = 0; } task_refs++; + node = req->comp_list.next; wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list); } while (node); From d886e185a128a4f350c4df6471c0f403c2982ae8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:56 +0100 Subject: [PATCH 159/205] io_uring: control ->async_data with a REQ_F flag ->async_data is a slow path, so it won't matter much if we do the clean up inside io_clean_op(). Moreover, in many cases it's allocated together with setting one or more of IO_REQ_CLEAN_FLAGS flags, so it'd go through io_clean_op() anyway. Control ->async_data allocation with a new flag REQ_F_ASYNC_DATA, so we can do all the maintainence under io_req_needs_clean() fast check. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6892cf5883c459f36bda26f30ceb16742b20b84b.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 72 ++++++++++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 26 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 1b007f77fe52a..aee8ecc09168b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -109,7 +109,8 @@ #define SQE_VALID_FLAGS (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN) #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \ - REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS) + REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \ + REQ_F_ASYNC_DATA) #define IO_TCTX_REFS_CACHE_NR (1U << 10) @@ -732,6 +733,7 @@ enum { REQ_F_CREDS_BIT, REQ_F_REFCOUNT_BIT, REQ_F_ARM_LTIMEOUT_BIT, + REQ_F_ASYNC_DATA_BIT, /* keep async read/write and isreg together and in order */ REQ_F_NOWAIT_READ_BIT, REQ_F_NOWAIT_WRITE_BIT, @@ -787,6 +789,8 @@ enum { REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT), /* there is a linked timeout that has to be armed */ REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT), + /* ->async_data allocated */ + REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT), }; struct async_poll { @@ -847,8 +851,6 @@ struct io_kiocb { struct io_completion compl; }; - /* opcode allocated if it needs to store data for async defer */ - void *async_data; u8 opcode; /* polled IO has completed */ u8 iopoll_completed; @@ -863,6 +865,8 @@ struct io_kiocb { u64 user_data; struct percpu_ref *fixed_rsrc_refs; + /* store used ubuf, so we can prevent reloading */ + struct io_mapped_ubuf *imu; /* used by request caches, completion batching and iopoll */ struct io_wq_work_node comp_list; @@ -872,8 +876,9 @@ struct io_kiocb { struct hlist_node hash_node; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; - /* store used ubuf, so we can prevent reloading */ - struct io_mapped_ubuf *imu; + + /* opcode allocated if it needs to store data for async defer */ + void *async_data; struct io_wq_work work; /* custom credentials, valid IFF REQ_F_CREDS is set */ const struct cred *creds; @@ -1219,6 +1224,11 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task, return false; } +static inline bool req_has_async_data(struct io_kiocb *req) +{ + return req->flags & REQ_F_ASYNC_DATA; +} + static inline void req_set_fail(struct io_kiocb *req) { req->flags |= REQ_F_FAIL; @@ -1969,10 +1979,6 @@ static inline void io_dismantle_req(struct io_kiocb *req) io_put_file(req->file); if (req->fixed_rsrc_refs) percpu_ref_put(req->fixed_rsrc_refs); - if (req->async_data) { - kfree(req->async_data); - req->async_data = NULL; - } } static __cold void __io_free_req(struct io_kiocb *req) @@ -2566,7 +2572,7 @@ static bool io_resubmit_prep(struct io_kiocb *req) { struct io_async_rw *rw = req->async_data; - if (!rw) + if (!req_has_async_data(req)) return !io_req_prep_async(req); iov_iter_restore(&rw->iter, &rw->iter_state); return true; @@ -2878,7 +2884,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_async_rw *io = req->async_data; /* add previously done IO, if any */ - if (io && io->bytes_done > 0) { + if (req_has_async_data(req) && io->bytes_done > 0) { if (ret < 0) ret = io->bytes_done; else @@ -3257,7 +3263,11 @@ static inline bool io_alloc_async_data(struct io_kiocb *req) { WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); - return req->async_data == NULL; + if (req->async_data) { + req->flags |= REQ_F_ASYNC_DATA; + return false; + } + return true; } static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, @@ -3266,7 +3276,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, { if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; - if (!req->async_data) { + if (!req_has_async_data(req)) { struct io_async_rw *iorw; if (io_alloc_async_data(req)) { @@ -3399,12 +3409,13 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; - struct io_async_rw *rw = req->async_data; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct iov_iter_state __state, *state; + struct io_async_rw *rw; ssize_t ret, ret2; - if (rw) { + if (req_has_async_data(req)) { + rw = req->async_data; iter = &rw->iter; state = &rw->iter_state; /* @@ -3534,12 +3545,13 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; struct iov_iter __iter, *iter = &__iter; - struct io_async_rw *rw = req->async_data; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct iov_iter_state __state, *state; + struct io_async_rw *rw; ssize_t ret, ret2; - if (rw) { + if (req_has_async_data(req)) { + rw = req->async_data; iter = &rw->iter; state = &rw->iter_state; iov_iter_restore(iter, state); @@ -4708,8 +4720,9 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!sock)) return -ENOTSOCK; - kmsg = req->async_data; - if (!kmsg) { + if (req_has_async_data(req)) { + kmsg = req->async_data; + } else { ret = io_sendmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -4925,8 +4938,9 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!sock)) return -ENOTSOCK; - kmsg = req->async_data; - if (!kmsg) { + if (req_has_async_data(req)) { + kmsg = req->async_data; + } else { ret = io_recvmsg_copy_hdr(req, &iomsg); if (ret) return ret; @@ -5117,7 +5131,7 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags) int ret; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - if (req->async_data) { + if (req_has_async_data(req)) { io = req->async_data; } else { ret = move_addr_to_kernel(req->connect.addr, @@ -5133,7 +5147,7 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags) ret = __sys_connect_file(req->file, &io->address, req->connect.addr_len, file_flags); if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { - if (req->async_data) + if (req_has_async_data(req)) return -EAGAIN; if (io_alloc_async_data(req)) { ret = -ENOMEM; @@ -5424,7 +5438,10 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); req_ref_get(req); poll->wait.private = req; + *poll_ptr = poll; + if (req->opcode == IORING_OP_POLL_ADD) + req->flags |= REQ_F_ASYNC_DATA; } pt->nr_entries++; @@ -6086,7 +6103,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (unlikely(off && !req->ctx->off_timeout_used)) req->ctx->off_timeout_used = true; - if (!req->async_data && io_alloc_async_data(req)) + if (!req_has_async_data(req) && io_alloc_async_data(req)) return -ENOMEM; data = req->async_data; @@ -6395,7 +6412,7 @@ static int io_req_prep_async(struct io_kiocb *req) { if (!io_op_defs[req->opcode].needs_async_setup) return 0; - if (WARN_ON_ONCE(req->async_data)) + if (WARN_ON_ONCE(req_has_async_data(req))) return -EFAULT; if (io_alloc_async_data(req)) return -EAGAIN; @@ -6538,7 +6555,10 @@ static void io_clean_op(struct io_kiocb *req) } if (req->flags & REQ_F_CREDS) put_cred(req->creds); - + if (req->flags & REQ_F_ASYNC_DATA) { + kfree(req->async_data); + req->async_data = NULL; + } req->flags &= ~IO_REQ_CLEAN_FLAGS; } From d17e56eb4907c72054e63c71a2123d32b04ebd67 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:57 +0100 Subject: [PATCH 160/205] io_uring: remove struct io_completion We keep struct io_completion only as a temporal storage of cflags, Place it in io_kiocb, it's cleaner, removes extra bits and even might be used for future optimisations. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5299bd5c223204065464bd87a515d0e405316086.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index aee8ecc09168b..c7dea155f43af 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -684,11 +684,6 @@ struct io_hardlink { int flags; }; -struct io_completion { - struct file *file; - u32 cflags; -}; - struct io_async_connect { struct sockaddr_storage address; }; @@ -847,22 +842,20 @@ struct io_kiocb { struct io_mkdir mkdir; struct io_symlink symlink; struct io_hardlink hardlink; - /* use only after cleaning per-op data, see io_clean_op() */ - struct io_completion compl; }; u8 opcode; /* polled IO has completed */ u8 iopoll_completed; - u16 buf_index; + unsigned int flags; + + u64 user_data; u32 result; + u32 cflags; struct io_ring_ctx *ctx; - unsigned int flags; - atomic_t refs; struct task_struct *task; - u64 user_data; struct percpu_ref *fixed_rsrc_refs; /* store used ubuf, so we can prevent reloading */ @@ -870,13 +863,13 @@ struct io_kiocb { /* used by request caches, completion batching and iopoll */ struct io_wq_work_node comp_list; + atomic_t refs; struct io_kiocb *link; struct io_task_work io_task_work; /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ struct hlist_node hash_node; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; - /* opcode allocated if it needs to store data for async defer */ void *async_data; struct io_wq_work work; @@ -1831,11 +1824,8 @@ static inline bool io_req_needs_clean(struct io_kiocb *req) static inline void io_req_complete_state(struct io_kiocb *req, long res, unsigned int cflags) { - /* clean per-opcode space, because req->compl is aliased with it */ - if (io_req_needs_clean(req)) - io_clean_op(req); req->result = res; - req->compl.cflags = cflags; + req->cflags = cflags; req->flags |= REQ_F_COMPLETE_INLINE; } @@ -2321,7 +2311,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) comp_list); __io_cqring_fill_event(ctx, req->user_data, req->result, - req->compl.cflags); + req->cflags); } io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); From 867f8fa5aeb7fa51290bf3567ce2bbc45580a469 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:58 +0100 Subject: [PATCH 161/205] io_uring: inline io_req_needs_clean() There is only a single user of io_req_needs_clean() inline it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6111d0221ef4b439cad401e135dd6a5f990a0501.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c7dea155f43af..dc83effa8317b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1816,11 +1816,6 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_cqring_ev_posted(ctx); } -static inline bool io_req_needs_clean(struct io_kiocb *req) -{ - return req->flags & IO_REQ_CLEAN_FLAGS; -} - static inline void io_req_complete_state(struct io_kiocb *req, long res, unsigned int cflags) { @@ -1963,7 +1958,7 @@ static inline void io_dismantle_req(struct io_kiocb *req) { unsigned int flags = req->flags; - if (unlikely(io_req_needs_clean(req))) + if (unlikely(flags & IO_REQ_CLEAN_FLAGS)) io_clean_op(req); if (!(flags & REQ_F_FIXED_FILE)) io_put_file(req->file); From eb6e6f0690c846f7de46181bab3954c12c96e11e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:02:59 +0100 Subject: [PATCH 162/205] io_uring: inline io_poll_complete Inline io_poll_complete(), it's simple and doesn't have any particular purpose. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/933d7ee3e4450749a2d892235462c8f18d030293.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index dc83effa8317b..815a7ec68dbcf 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5292,16 +5292,6 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask) return !(flags & IORING_CQE_F_MORE); } -static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask) - __must_hold(&req->ctx->completion_lock) -{ - bool done; - - done = __io_poll_complete(req, mask); - io_commit_cqring(req->ctx); - return done; -} - static void io_poll_task_func(struct io_kiocb *req, bool *locked) { struct io_ring_ctx *ctx = req->ctx; @@ -5791,7 +5781,8 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) if (mask) { /* no async, we'd stolen it */ ipt.error = 0; - done = io_poll_complete(req, mask); + done = __io_poll_complete(req, mask); + io_commit_cqring(req->ctx); } spin_unlock(&ctx->completion_lock); From 54daa9b2d80ab35824464b35a99f716e1cdf2ccb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 4 Oct 2021 20:03:00 +0100 Subject: [PATCH 163/205] io_uring: correct fill events helpers types CQE result is a 32-bit integer, so the functions generating CQEs are better to accept not long but ints. Convert io_cqring_fill_event() and other helpers. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7ca6f15255e9117eae28adcac272744cae29b113.1633373302.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 815a7ec68dbcf..44946711fe62a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1072,7 +1072,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx, static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags); + s32 res, u32 cflags); static void io_put_req(struct io_kiocb *req); static void io_put_req_deferred(struct io_kiocb *req); static void io_dismantle_req(struct io_kiocb *req); @@ -1730,7 +1730,7 @@ static inline void io_get_task_refs(int nr) } static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags) + s32 res, u32 cflags) { struct io_overflow_cqe *ocqe; @@ -1758,7 +1758,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, } static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags) + s32 res, u32 cflags) { struct io_uring_cqe *cqe; @@ -1781,13 +1781,13 @@ static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data /* not as hot to bloat with inlining */ static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data, - long res, unsigned int cflags) + s32 res, u32 cflags) { return __io_cqring_fill_event(ctx, user_data, res, cflags); } -static void io_req_complete_post(struct io_kiocb *req, long res, - unsigned int cflags) +static void io_req_complete_post(struct io_kiocb *req, s32 res, + u32 cflags) { struct io_ring_ctx *ctx = req->ctx; @@ -1816,8 +1816,8 @@ static void io_req_complete_post(struct io_kiocb *req, long res, io_cqring_ev_posted(ctx); } -static inline void io_req_complete_state(struct io_kiocb *req, long res, - unsigned int cflags) +static inline void io_req_complete_state(struct io_kiocb *req, s32 res, + u32 cflags) { req->result = res; req->cflags = cflags; @@ -1825,7 +1825,7 @@ static inline void io_req_complete_state(struct io_kiocb *req, long res, } static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, - long res, unsigned cflags) + s32 res, u32 cflags) { if (issue_flags & IO_URING_F_COMPLETE_DEFER) io_req_complete_state(req, res, cflags); @@ -1833,12 +1833,12 @@ static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, io_req_complete_post(req, res, cflags); } -static inline void io_req_complete(struct io_kiocb *req, long res) +static inline void io_req_complete(struct io_kiocb *req, s32 res) { __io_req_complete(req, 0, res, 0); } -static void io_req_complete_failed(struct io_kiocb *req, long res) +static void io_req_complete_failed(struct io_kiocb *req, s32 res) { req_set_fail(req); io_req_complete_post(req, res, 0); @@ -2618,7 +2618,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) static void io_req_task_complete(struct io_kiocb *req, bool *locked) { unsigned int cflags = io_put_rw_kbuf(req); - long res = req->result; + int res = req->result; if (*locked) { io_req_complete_state(req, res, cflags); From 6d63416dc57eb27a3d35e7b7526e9915479d7eff Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:46 +0100 Subject: [PATCH 164/205] io_uring: optimise plugging Plugging is only needed with requests that also need a file, so hide plugging under a ->needs_file check. Also, place ->needs_file and ->plug bits into the same byte of io_op_defs, it may matter for compilers, e.g. only with the change a tested one decided to optimise two memory testb into a more with two register testb. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1600d1287bb7d16451d4ef3343252787a5314927.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 44946711fe62a..7759fabf9ae01 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -894,12 +894,12 @@ struct io_defer_entry { struct io_op_def { /* needs req->file assigned */ unsigned needs_file : 1; + /* should block plug */ + unsigned plug : 1; /* hash wq insertion if file is a regular file */ unsigned hash_reg_file : 1; /* unbound wq insertion if file is a non-regular file */ unsigned unbound_nonreg_file : 1; - /* opcode is not supported by this kernel */ - unsigned not_supported : 1; /* set if opcode supports polled "wait" */ unsigned pollin : 1; unsigned pollout : 1; @@ -907,8 +907,8 @@ struct io_op_def { unsigned buffer_select : 1; /* do prep async if is going to be punted */ unsigned needs_async_setup : 1; - /* should block plug */ - unsigned plug : 1; + /* opcode is not supported by this kernel */ + unsigned not_supported : 1; /* size of async data needed, if any */ unsigned short async_size; }; @@ -6978,7 +6978,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) __must_hold(&ctx->uring_lock) { - struct io_submit_state *state; unsigned int sqe_flags; int personality; @@ -7025,19 +7024,20 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, get_cred(req->creds); req->flags |= REQ_F_CREDS; } - state = &ctx->submit_state; - - /* - * Plug now if we have more than 1 IO left after this, and the target - * is potentially a read/write to block based storage. - */ - if (state->need_plug && io_op_defs[req->opcode].plug) { - blk_start_plug(&state->plug); - state->plug_started = true; - state->need_plug = false; - } if (io_op_defs[req->opcode].needs_file) { + struct io_submit_state *state = &ctx->submit_state; + + /* + * Plug now if we have more than 2 IO left after this, and the + * target is potentially a read/write to block based storage. + */ + if (state->need_plug && io_op_defs[req->opcode].plug) { + state->plug_started = true; + state->need_plug = false; + blk_start_plug(&state->plug); + } + req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), (sqe_flags & IOSQE_FIXED_FILE)); if (unlikely(!req->file)) From 756ab7c0ec71e5e1e8e6ea11af53324b23d5efd2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:47 +0100 Subject: [PATCH 165/205] io_uring: safer fallback_work free Add extra wq flushing for fallback_work, that's not necessary but safer if invariants of io_fallback_req_func() change. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/24179419d6748516299600bc914f50b9e0b02275.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7759fabf9ae01..90bef813556e3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1262,7 +1262,6 @@ static __cold void io_fallback_req_func(struct work_struct *work) mutex_unlock(&ctx->uring_lock); } percpu_ref_put(&ctx->refs); - } static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) @@ -9212,6 +9211,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) if (ctx->rsrc_backup_node) io_rsrc_node_destroy(ctx->rsrc_backup_node); flush_delayed_work(&ctx->rsrc_put_work); + flush_delayed_work(&ctx->fallback_work); WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); From 5a158c6b0d033893cc80c28b182e1207253768a5 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:48 +0100 Subject: [PATCH 166/205] io_uring: reshuffle io_submit_state bits struct io_submit_state's ->free_list and ->link are hotter and smaller than ->plug, place them first. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6ad3c15849f50b27ad012c042c73e6e069d22df7.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 90bef813556e3..04d6e35ea0df7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -308,19 +308,15 @@ struct io_submit_link { }; struct io_submit_state { - struct blk_plug plug; + /* inline/task_work completion list, under ->uring_lock */ + struct io_wq_work_node free_list; + /* batch completion logic */ + struct io_wq_work_list compl_reqs; struct io_submit_link link; bool plug_started; bool need_plug; - - /* - * Batch completion logic - */ - struct io_wq_work_list compl_reqs; - - /* inline/task_work completion list, under ->uring_lock */ - struct io_wq_work_node free_list; + struct blk_plug plug; }; struct io_ring_ctx { From 4a04d1d14831d31f2cd0e31eb1568cc9c1be0095 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:49 +0100 Subject: [PATCH 167/205] io_uring: optimise out req->opcode reloading Looking at the assembly, the compiler decided to reload req->opcode in io_op_defs[opcode].needs_file instead of one it had in a register, so store it in a temp variable so it can be optimised out. Also move the personality block later, it's better for spilling/etc. as it only depends on @sqe, which we're keeping anyway. By the way, zero req->opcode if it over IORING_OP_LAST, not a problem, at the moment but is safer. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6ba869f5f8b7b0f991c87fdf089f0abf87cbe06b.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 04d6e35ea0df7..7918a320104d1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6975,9 +6975,10 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, { unsigned int sqe_flags; int personality; + u8 opcode; /* req is partially pre-initialised, see io_preinit_req() */ - req->opcode = READ_ONCE(sqe->opcode); + req->opcode = opcode = READ_ONCE(sqe->opcode); /* same numerical values with corresponding REQ_F_*, safe to copy */ req->flags = sqe_flags = READ_ONCE(sqe->flags); req->user_data = READ_ONCE(sqe->user_data); @@ -6985,14 +6986,16 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->fixed_rsrc_refs = NULL; req->task = current; - if (unlikely(req->opcode >= IORING_OP_LAST)) + if (unlikely(opcode >= IORING_OP_LAST)) { + req->opcode = 0; return -EINVAL; + } if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { /* enforce forwards compatibility on users */ if (sqe_flags & ~SQE_VALID_FLAGS) return -EINVAL; if ((sqe_flags & IOSQE_BUFFER_SELECT) && - !io_op_defs[req->opcode].buffer_select) + !io_op_defs[opcode].buffer_select) return -EOPNOTSUPP; if (sqe_flags & IOSQE_IO_DRAIN) io_init_req_drain(req); @@ -7011,23 +7014,14 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, } } - personality = READ_ONCE(sqe->personality); - if (personality) { - req->creds = xa_load(&ctx->personalities, personality); - if (!req->creds) - return -EINVAL; - get_cred(req->creds); - req->flags |= REQ_F_CREDS; - } - - if (io_op_defs[req->opcode].needs_file) { + if (io_op_defs[opcode].needs_file) { struct io_submit_state *state = &ctx->submit_state; /* * Plug now if we have more than 2 IO left after this, and the * target is potentially a read/write to block based storage. */ - if (state->need_plug && io_op_defs[req->opcode].plug) { + if (state->need_plug && io_op_defs[opcode].plug) { state->plug_started = true; state->need_plug = false; blk_start_plug(&state->plug); @@ -7039,6 +7033,15 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, return -EBADF; } + personality = READ_ONCE(sqe->personality); + if (personality) { + req->creds = xa_load(&ctx->personalities, personality); + if (!req->creds) + return -EINVAL; + get_cred(req->creds); + req->flags |= REQ_F_CREDS; + } + return io_req_prep(req, sqe); } From 0cd3e3ddb4f60062c401929b2005eb7ff6399a4d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 6 Oct 2021 16:06:50 +0100 Subject: [PATCH 168/205] io_uring: remove extra io_ring_exit_work wake up task_work_add() takes care of waking up the thread, remove useless wake_up_process(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/de9a71ee255112dcaed3b5d426be24934e74722c.1633532552.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 1 - 1 file changed, 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7918a320104d1..383b8f61359d0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -9367,7 +9367,6 @@ static __cold void io_ring_exit_work(struct work_struct *work) ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); if (WARN_ON_ONCE(ret)) continue; - wake_up_process(node->task); mutex_unlock(&ctx->uring_lock); wait_for_completion(&exit.completion); From def77acf4396a850544b41623d7720ea3a2674a9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 12 Oct 2021 15:02:14 +0100 Subject: [PATCH 169/205] io_uring: fix io_free_batch_list races [ 158.514382] WARNING: CPU: 5 PID: 15251 at fs/io_uring.c:1141 io_free_batch_list+0x269/0x360 [ 158.514426] RIP: 0010:io_free_batch_list+0x269/0x360 [ 158.514437] Call Trace: [ 158.514440] __io_submit_flush_completions+0xde/0x180 [ 158.514444] tctx_task_work+0x14a/0x220 [ 158.514447] task_work_run+0x64/0xa0 [ 158.514448] __do_sys_io_uring_enter+0x7c/0x970 [ 158.514450] __x64_sys_io_uring_enter+0x22/0x30 [ 158.514451] do_syscall_64+0x43/0x90 [ 158.514453] entry_SYSCALL_64_after_hwframe+0x44/0xae We should not touch request internals including req->comp_list.next after putting our ref if it's not final, e.g. we can start freeing requests from the free cache. Fixed: 62ca9cb93e7f8 ("io_uring: optimise io_free_batch_list()") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b1f4df38fbb8f111f52911a02fd418d0283a4e6f.1634047298.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 383b8f61359d0..141bfe720ce2e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2266,9 +2266,10 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); - if (!req_ref_put_and_test(req)) { + if (unlikely(req->flags & REQ_F_REFCOUNT)) { node = req->comp_list.next; - continue; + if (!req_ref_put_and_test(req)) + continue; } io_queue_next(req); From a46be971edb69fe4b1dcc4359c3ddf9127629dab Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 23:14:40 +0100 Subject: [PATCH 170/205] io_uring: optimise io_req_set_rsrc_node() io_req_set_rsrc_node() reloads loads req->ctx, however it's already in registers in all use cases, so better to pass it as a parameter. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/67a25557b8a51e90bfd578447a6f1671911b05ae.1633817310.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 141bfe720ce2e..6486ebf318667 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1174,10 +1174,9 @@ static inline void io_req_set_refcount(struct io_kiocb *req) __io_req_set_refcount(req, 1); } -static inline void io_req_set_rsrc_node(struct io_kiocb *req) +static inline void io_req_set_rsrc_node(struct io_kiocb *req, + struct io_ring_ctx *ctx) { - struct io_ring_ctx *ctx = req->ctx; - if (!req->fixed_rsrc_refs) { req->fixed_rsrc_refs = &ctx->rsrc_node->refs; percpu_ref_get(req->fixed_rsrc_refs); @@ -2828,7 +2827,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (req->opcode == IORING_OP_READ_FIXED || req->opcode == IORING_OP_WRITE_FIXED) { req->imu = NULL; - io_req_set_rsrc_node(req); + io_req_set_rsrc_node(req, ctx); } req->rw.addr = READ_ONCE(sqe->addr); @@ -6757,7 +6756,7 @@ static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx, file_ptr &= ~FFS_MASK; /* mask in overlapping REQ_F and FFS bits */ req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); - io_req_set_rsrc_node(req); + io_req_set_rsrc_node(req, ctx); return file; } From ab409402478462b5da007bfc46d165587c3adfc3 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 9 Oct 2021 23:14:41 +0100 Subject: [PATCH 171/205] io_uring: optimise rsrc referencing Apparently, percpu_ref_put/get() are expensive enough if done per request, get them in a batch and cache on the submission side to avoid getting it over and over again. Also, if we're completing under uring_lock, return refs back into the cache instead of perfcpu_ref_put(). Pretty similar to how we do tctx->cached_refs accounting, but fall back to normal putting when we already changed a rsrc node by the time of free. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b40d8c5bc77d3c9550df8a319117a374ac85f8f4.1633817310.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6486ebf318667..6859dae218f90 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -360,6 +360,7 @@ struct io_ring_ctx { * uring_lock, and updated through io_uring_register(2) */ struct io_rsrc_node *rsrc_node; + int rsrc_cached_refs; struct io_file_table file_table; unsigned nr_user_files; unsigned nr_user_bufs; @@ -1174,12 +1175,52 @@ static inline void io_req_set_refcount(struct io_kiocb *req) __io_req_set_refcount(req, 1); } +#define IO_RSRC_REF_BATCH 100 + +static inline void io_req_put_rsrc_locked(struct io_kiocb *req, + struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + struct percpu_ref *ref = req->fixed_rsrc_refs; + + if (ref) { + if (ref == &ctx->rsrc_node->refs) + ctx->rsrc_cached_refs++; + else + percpu_ref_put(ref); + } +} + +static inline void io_req_put_rsrc(struct io_kiocb *req, struct io_ring_ctx *ctx) +{ + if (req->fixed_rsrc_refs) + percpu_ref_put(req->fixed_rsrc_refs); +} + +static __cold void io_rsrc_refs_drop(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + if (ctx->rsrc_cached_refs) { + percpu_ref_put_many(&ctx->rsrc_node->refs, ctx->rsrc_cached_refs); + ctx->rsrc_cached_refs = 0; + } +} + +static void io_rsrc_refs_refill(struct io_ring_ctx *ctx) + __must_hold(&ctx->uring_lock) +{ + ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH; + percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH); +} + static inline void io_req_set_rsrc_node(struct io_kiocb *req, struct io_ring_ctx *ctx) { if (!req->fixed_rsrc_refs) { req->fixed_rsrc_refs = &ctx->rsrc_node->refs; - percpu_ref_get(req->fixed_rsrc_refs); + ctx->rsrc_cached_refs--; + if (unlikely(ctx->rsrc_cached_refs < 0)) + io_rsrc_refs_refill(ctx); } } @@ -1800,6 +1841,7 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res, req->link = NULL; } } + io_req_put_rsrc(req, ctx); io_dismantle_req(req); io_put_task(req->task, 1); wq_list_add_head(&req->comp_list, &ctx->locked_free_list); @@ -1956,14 +1998,13 @@ static inline void io_dismantle_req(struct io_kiocb *req) io_clean_op(req); if (!(flags & REQ_F_FIXED_FILE)) io_put_file(req->file); - if (req->fixed_rsrc_refs) - percpu_ref_put(req->fixed_rsrc_refs); } static __cold void __io_free_req(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; + io_req_put_rsrc(req, ctx); io_dismantle_req(req); io_put_task(req->task, 1); @@ -2271,6 +2312,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, continue; } + io_req_put_rsrc_locked(req, ctx); io_queue_next(req); io_dismantle_req(req); @@ -7630,10 +7672,13 @@ static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) static void io_rsrc_node_switch(struct io_ring_ctx *ctx, struct io_rsrc_data *data_to_kill) + __must_hold(&ctx->uring_lock) { WARN_ON_ONCE(!ctx->rsrc_backup_node); WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); + io_rsrc_refs_drop(ctx); + if (data_to_kill) { struct io_rsrc_node *rsrc_node = ctx->rsrc_node; @@ -9187,6 +9232,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ctx->mm_account = NULL; } + io_rsrc_refs_drop(ctx); /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ io_wait_rsrc_data(ctx->buf_data); io_wait_rsrc_data(ctx->file_data); From 04f34081c5de35749274cf3bea8d8eb1c79b6ad1 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:12 +0100 Subject: [PATCH 172/205] io_uring: consistent typing for issue_flags Some of the functions keep issue_flags as int, change those to unsigned. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/04ad43797783bc9cc7567f287ab545518f8e8cf2.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 6859dae218f90..094cd5f49214a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3786,7 +3786,7 @@ static int io_mkdirat_prep(struct io_kiocb *req, return 0; } -static int io_mkdirat(struct io_kiocb *req, int issue_flags) +static int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) { struct io_mkdir *mkd = &req->mkdir; int ret; @@ -3835,7 +3835,7 @@ static int io_symlinkat_prep(struct io_kiocb *req, return 0; } -static int io_symlinkat(struct io_kiocb *req, int issue_flags) +static int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) { struct io_symlink *sl = &req->symlink; int ret; @@ -3885,7 +3885,7 @@ static int io_linkat_prep(struct io_kiocb *req, return 0; } -static int io_linkat(struct io_kiocb *req, int issue_flags) +static int io_linkat(struct io_kiocb *req, unsigned int issue_flags) { struct io_hardlink *lnk = &req->hardlink; int ret; From f80a50a632d6f5c64729563892b0deb95d563f6d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:13 +0100 Subject: [PATCH 173/205] io_uring: prioritise read success path over fails Rearrange io_read return handling so first we expect it completing successfully and only then checking for errors, which is a colder path. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/c91c7c2da11815ec8b04b5d872f60dc4cde662c5.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 094cd5f49214a..86ef803db27a0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3487,7 +3487,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) ret = 0; } else if (ret == -EIOCBQUEUED) { goto out_free; - } else if (ret <= 0 || ret == req->result || !force_nonblock || + } else if (ret == req->result || ret <= 0 || !force_nonblock || (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { /* read all, failed, already did sync or don't want to retry */ goto done; From 258f3a7f84d11ce1d6cb1f60fe524bb37f7c1019 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:14 +0100 Subject: [PATCH 174/205] io_uring: optimise rw comletion handlers Don't override req->result in io_complete_rw_iopoll() when it's already of the same value, we have an if just above it, so move the assignment there. Also, add one simle unlikely() in __io_complete_rw_common(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8dfeb4f84026a20172bcf82c05010abe955874ae.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 86ef803db27a0..18a91d268fb6b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2640,7 +2640,7 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res) { if (req->rw.kiocb.ki_flags & IOCB_WRITE) kiocb_end_write(req); - if (res != req->result) { + if (unlikely(res != req->result)) { if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) { req->flags |= REQ_F_REISSUE; @@ -2695,9 +2695,9 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) req->flags |= REQ_F_REISSUE; return; } + req->result = res; } - req->result = res; /* order with io_iopoll_complete() checking ->iopoll_completed */ smp_store_release(&req->iopoll_completed, 1); } From 538941e2681c76ba6d6a7dd28ce691a9e02a9bdf Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:15 +0100 Subject: [PATCH 175/205] io_uring: encapsulate rw state Add a new struct io_rw_state storing all iov related bits: fast iov, iterator and iterator state. Not much changes here, simply convert struct io_async_rw to use it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/e8245ffcb568b228a009ec1eb79c993c813679f1.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 18a91d268fb6b..47365610de5d1 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -694,11 +694,15 @@ struct io_async_msghdr { struct sockaddr_storage addr; }; -struct io_async_rw { +struct io_rw_state { struct iovec fast_iov[UIO_FASTIOV]; - const struct iovec *free_iovec; struct iov_iter iter; struct iov_iter_state iter_state; +}; + +struct io_async_rw { + struct io_rw_state s; + const struct iovec *free_iovec; size_t bytes_done; struct wait_page_queue wpq; }; @@ -2596,7 +2600,7 @@ static bool io_resubmit_prep(struct io_kiocb *req) if (!req_has_async_data(req)) return !io_req_prep_async(req); - iov_iter_restore(&rw->iter, &rw->iter_state); + iov_iter_restore(&rw->s.iter, &rw->s.iter_state); return true; } @@ -3259,7 +3263,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, { struct io_async_rw *rw = req->async_data; - memcpy(&rw->iter, iter, sizeof(*iter)); + memcpy(&rw->s.iter, iter, sizeof(*iter)); rw->free_iovec = iovec; rw->bytes_done = 0; /* can only be fixed buffers, no need to do anything */ @@ -3268,13 +3272,13 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, if (!iovec) { unsigned iov_off = 0; - rw->iter.iov = rw->fast_iov; + rw->s.iter.iov = rw->s.fast_iov; if (iter->iov != fast_iov) { iov_off = iter->iov - fast_iov; - rw->iter.iov += iov_off; + rw->s.iter.iov += iov_off; } - if (rw->fast_iov != fast_iov) - memcpy(rw->fast_iov + iov_off, fast_iov + iov_off, + if (rw->s.fast_iov != fast_iov) + memcpy(rw->s.fast_iov + iov_off, fast_iov + iov_off, sizeof(struct iovec) * iter->nr_segs); } else { req->flags |= REQ_F_NEED_CLEANUP; @@ -3309,7 +3313,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, io_req_map_rw(req, iovec, fast_iov, iter); iorw = req->async_data; /* we've copied and mapped the iter, ensure state is saved */ - iov_iter_save_state(&iorw->iter, &iorw->iter_state); + iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); } return 0; } @@ -3317,10 +3321,10 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { struct io_async_rw *iorw = req->async_data; - struct iovec *iov = iorw->fast_iov; + struct iovec *iov = iorw->s.fast_iov; int ret; - ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); + ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false); if (unlikely(ret < 0)) return ret; @@ -3328,7 +3332,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; - iov_iter_save_state(&iorw->iter, &iorw->iter_state); + iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); return 0; } @@ -3438,8 +3442,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->iter; - state = &rw->iter_state; + iter = &rw->s.iter; + state = &rw->s.iter_state; /* * We come here from an earlier attempt, restore our state to * match in case it doesn't. It's cheap enough that we don't @@ -3510,9 +3514,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * Now use our persistent iterator and state, if we aren't already. * We've restored and mapped the iter to match. */ - if (iter != &rw->iter) { - iter = &rw->iter; - state = &rw->iter_state; + if (iter != &rw->s.iter) { + iter = &rw->s.iter; + state = &rw->s.iter_state; } do { @@ -3574,8 +3578,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->iter; - state = &rw->iter_state; + iter = &rw->s.iter; + state = &rw->s.iter_state; iov_iter_restore(iter, state); iovec = NULL; } else { From c88598a92a587109d915b4d97831bcea774c8b6f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:16 +0100 Subject: [PATCH 176/205] io_uring: optimise read/write iov state storing Currently io_read() and io_write() keep separate pointers to an iter and to struct iov_iter_state, which is not great for register spilling and requires more on-stack copies. They are both either on-stack or in req->async_data at the same time, so use struct io_rw_state and keep a pointer only to it, so having all the state with just one pointer. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/5c5e7ffd7dc25fc35075c70411ba99df72f237fa.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 79 ++++++++++++++++++++++++--------------------------- 1 file changed, 37 insertions(+), 42 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 47365610de5d1..bb5cd8715f618 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -695,9 +695,9 @@ struct io_async_msghdr { }; struct io_rw_state { - struct iovec fast_iov[UIO_FASTIOV]; struct iov_iter iter; struct iov_iter_state iter_state; + struct iovec fast_iov[UIO_FASTIOV]; }; struct io_async_rw { @@ -3297,8 +3297,7 @@ static inline bool io_alloc_async_data(struct io_kiocb *req) } static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, - const struct iovec *fast_iov, - struct iov_iter *iter, bool force) + struct io_rw_state *s, bool force) { if (!force && !io_op_defs[req->opcode].needs_async_setup) return 0; @@ -3310,7 +3309,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, return -ENOMEM; } - io_req_map_rw(req, iovec, fast_iov, iter); + io_req_map_rw(req, iovec, s->fast_iov, &s->iter); iorw = req->async_data; /* we've copied and mapped the iter, ensure state is saved */ iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); @@ -3432,33 +3431,33 @@ static bool need_read_all(struct io_kiocb *req) static int io_read(struct io_kiocb *req, unsigned int issue_flags) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct io_rw_state __s, *s; + struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; - struct iov_iter __iter, *iter = &__iter; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - struct iov_iter_state __state, *state; struct io_async_rw *rw; ssize_t ret, ret2; if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->s.iter; - state = &rw->s.iter_state; + s = &rw->s; /* * We come here from an earlier attempt, restore our state to * match in case it doesn't. It's cheap enough that we don't * need to make this conditional. */ - iov_iter_restore(iter, state); + iov_iter_restore(&s->iter, &s->iter_state); iovec = NULL; } else { - ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); + s = &__s; + iovec = s->fast_iov; + ret = io_import_iovec(READ, req, &iovec, &s->iter, !force_nonblock); if (ret < 0) return ret; - state = &__state; - iov_iter_save_state(iter, state); + + iov_iter_save_state(&s->iter, &s->iter_state); } - req->result = iov_iter_count(iter); + req->result = iov_iter_count(&s->iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3468,7 +3467,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) /* If the file doesn't support async, just async punt */ if (force_nonblock && !io_file_supports_nowait(req, READ)) { - ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + ret = io_setup_async_rw(req, iovec, s, true); return ret ?: -EAGAIN; } @@ -3478,7 +3477,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) return ret; } - ret = io_iter_do_read(req, iter); + ret = io_iter_do_read(req, &s->iter); if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; @@ -3502,22 +3501,19 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * untouched in case of error. Restore it and we'll advance it * manually if we need to. */ - iov_iter_restore(iter, state); + iov_iter_restore(&s->iter, &s->iter_state); - ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + ret2 = io_setup_async_rw(req, iovec, s, true); if (ret2) return ret2; iovec = NULL; rw = req->async_data; + s = &rw->s; /* * Now use our persistent iterator and state, if we aren't already. * We've restored and mapped the iter to match. */ - if (iter != &rw->s.iter) { - iter = &rw->s.iter; - state = &rw->s.iter_state; - } do { /* @@ -3525,11 +3521,11 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * above or inside this loop. Advance the iter by the bytes * that were consumed. */ - iov_iter_advance(iter, ret); - if (!iov_iter_count(iter)) + iov_iter_advance(&s->iter, ret); + if (!iov_iter_count(&s->iter)) break; rw->bytes_done += ret; - iov_iter_save_state(iter, state); + iov_iter_save_state(&s->iter, &s->iter_state); /* if we can retry, do so with the callbacks armed */ if (!io_rw_should_retry(req)) { @@ -3543,12 +3539,12 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) * desired page gets unlocked. We can also get a partial read * here, and if we do, then just retry at the new offset. */ - ret = io_iter_do_read(req, iter); + ret = io_iter_do_read(req, &s->iter); if (ret == -EIOCBQUEUED) return 0; /* we got some bytes, but not all. retry. */ kiocb->ki_flags &= ~IOCB_WAITQ; - iov_iter_restore(iter, state); + iov_iter_restore(&s->iter, &s->iter_state); } while (ret > 0); done: kiocb_done(kiocb, ret, issue_flags); @@ -3568,28 +3564,27 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_write(struct io_kiocb *req, unsigned int issue_flags) { - struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + struct io_rw_state __s, *s; + struct io_async_rw *rw; + struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; - struct iov_iter __iter, *iter = &__iter; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - struct iov_iter_state __state, *state; - struct io_async_rw *rw; ssize_t ret, ret2; if (req_has_async_data(req)) { rw = req->async_data; - iter = &rw->s.iter; - state = &rw->s.iter_state; - iov_iter_restore(iter, state); + s = &rw->s; + iov_iter_restore(&s->iter, &s->iter_state); iovec = NULL; } else { - ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); + s = &__s; + iovec = s->fast_iov; + ret = io_import_iovec(WRITE, req, &iovec, &s->iter, !force_nonblock); if (ret < 0) return ret; - state = &__state; - iov_iter_save_state(iter, state); + iov_iter_save_state(&s->iter, &s->iter_state); } - req->result = iov_iter_count(iter); + req->result = iov_iter_count(&s->iter); /* Ensure we clear previously set non-block flag */ if (!force_nonblock) @@ -3625,9 +3620,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) kiocb->ki_flags |= IOCB_WRITE; if (req->file->f_op->write_iter) - ret2 = call_write_iter(req->file, kiocb, iter); + ret2 = call_write_iter(req->file, kiocb, &s->iter); else if (req->file->f_op->write) - ret2 = loop_rw_iter(WRITE, req, iter); + ret2 = loop_rw_iter(WRITE, req, &s->iter); else ret2 = -EINVAL; @@ -3653,8 +3648,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) kiocb_done(kiocb, ret2, issue_flags); } else { copy_iov: - iov_iter_restore(iter, state); - ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); + iov_iter_restore(&s->iter, &s->iter_state); + ret = io_setup_async_rw(req, iovec, s, false); return ret ?: -EAGAIN; } out_free: From 51aac424aef980a6238d4907a5bb5a411fa926eb Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:17 +0100 Subject: [PATCH 177/205] io_uring: optimise io_import_iovec nonblock passing First, change IO_URING_F_NONBLOCK to take sign bit of the int, so checking for it can be turned into test + sign-based-jump, makes the binary smaller and may be faster. Then, instead of passing need_lock boolean into io_import_iovec() just give it issue_flags, which is already stored somewhere. Saves some space on stack, a couple of test + cmov operations and other conversions. note: we still leave force_nonblock = issue_flags & IO_URING_F_NONBLOCK variable, but it's optimised out by the compiler into testing issue_flags directly. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ee96547e692f6c975c229cd82fc721679571a734.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bb5cd8715f618..0a0f84b28eba3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -198,8 +198,9 @@ struct io_rings { }; enum io_uring_cmd_flags { - IO_URING_F_NONBLOCK = 1, - IO_URING_F_COMPLETE_DEFER = 2, + IO_URING_F_COMPLETE_DEFER = 1, + /* int's last bit, sign checks are usually faster than a bit test */ + IO_URING_F_NONBLOCK = INT_MIN, }; struct io_mapped_ubuf { @@ -3037,10 +3038,11 @@ static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock) } static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, - int bgid, bool needs_lock) + int bgid, unsigned int issue_flags) { struct io_buffer *kbuf = req->kbuf; struct io_buffer *head; + bool needs_lock = !(issue_flags & IO_URING_F_NONBLOCK); if (req->flags & REQ_F_BUFFER_SELECTED) return kbuf; @@ -3072,13 +3074,13 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, } static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, - bool needs_lock) + unsigned int issue_flags) { struct io_buffer *kbuf; u16 bgid; bgid = req->buf_index; - kbuf = io_buffer_select(req, len, bgid, needs_lock); + kbuf = io_buffer_select(req, len, bgid, issue_flags); if (IS_ERR(kbuf)) return kbuf; return u64_to_user_ptr(kbuf->addr); @@ -3086,7 +3088,7 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, #ifdef CONFIG_COMPAT static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, - bool needs_lock) + unsigned int issue_flags) { struct compat_iovec __user *uiov; compat_ssize_t clen; @@ -3102,7 +3104,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, return -EINVAL; len = clen; - buf = io_rw_buffer_select(req, &len, needs_lock); + buf = io_rw_buffer_select(req, &len, issue_flags); if (IS_ERR(buf)) return PTR_ERR(buf); iov[0].iov_base = buf; @@ -3112,7 +3114,7 @@ static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, #endif static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, - bool needs_lock) + unsigned int issue_flags) { struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); void __user *buf; @@ -3124,7 +3126,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, len = iov[0].iov_len; if (len < 0) return -EINVAL; - buf = io_rw_buffer_select(req, &len, needs_lock); + buf = io_rw_buffer_select(req, &len, issue_flags); if (IS_ERR(buf)) return PTR_ERR(buf); iov[0].iov_base = buf; @@ -3133,7 +3135,7 @@ static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, } static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, - bool needs_lock) + unsigned int issue_flags) { if (req->flags & REQ_F_BUFFER_SELECTED) { struct io_buffer *kbuf = req->kbuf; @@ -3147,14 +3149,14 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, #ifdef CONFIG_COMPAT if (req->ctx->compat) - return io_compat_import(req, iov, needs_lock); + return io_compat_import(req, iov, issue_flags); #endif - return __io_iov_buffer_select(req, iov, needs_lock); + return __io_iov_buffer_select(req, iov, issue_flags); } static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, - struct iov_iter *iter, bool needs_lock) + struct iov_iter *iter, unsigned int issue_flags) { void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; @@ -3172,7 +3174,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (req->flags & REQ_F_BUFFER_SELECT) { - buf = io_rw_buffer_select(req, &sqe_len, needs_lock); + buf = io_rw_buffer_select(req, &sqe_len, issue_flags); if (IS_ERR(buf)) return PTR_ERR(buf); req->rw.len = sqe_len; @@ -3184,7 +3186,7 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, } if (req->flags & REQ_F_BUFFER_SELECT) { - ret = io_iov_buffer_select(req, *iovec, needs_lock); + ret = io_iov_buffer_select(req, *iovec, issue_flags); if (!ret) iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); *iovec = NULL; @@ -3323,7 +3325,8 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) struct iovec *iov = iorw->s.fast_iov; int ret; - ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, false); + /* submission path, ->uring_lock should already be taken */ + ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK); if (unlikely(ret < 0)) return ret; @@ -3451,7 +3454,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) } else { s = &__s; iovec = s->fast_iov; - ret = io_import_iovec(READ, req, &iovec, &s->iter, !force_nonblock); + ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags); if (ret < 0) return ret; @@ -3579,7 +3582,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) } else { s = &__s; iovec = s->fast_iov; - ret = io_import_iovec(WRITE, req, &iovec, &s->iter, !force_nonblock); + ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags); if (ret < 0) return ret; iov_iter_save_state(&s->iter, &s->iter_state); @@ -4902,11 +4905,11 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, } static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, - bool needs_lock) + unsigned int issue_flags) { struct io_sr_msg *sr = &req->sr_msg; - return io_buffer_select(req, &sr->len, sr->bgid, needs_lock); + return io_buffer_select(req, &sr->len, sr->bgid, issue_flags); } static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) @@ -4969,7 +4972,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) } if (req->flags & REQ_F_BUFFER_SELECT) { - kbuf = io_recv_buffer_select(req, !force_nonblock); + kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); @@ -5021,7 +5024,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags) return -ENOTSOCK; if (req->flags & REQ_F_BUFFER_SELECT) { - kbuf = io_recv_buffer_select(req, !force_nonblock); + kbuf = io_recv_buffer_select(req, issue_flags); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); buf = u64_to_user_ptr(kbuf->addr); From 5e49c973fc39ffce287ab0763cb6f1e607962689 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:18 +0100 Subject: [PATCH 178/205] io_uring: clean up io_import_iovec Make io_import_iovec taking struct io_rw_state instead of an iter pointer. First it takes care of initialising iovec pointer, which can be forgotten. Even more, we can not init it if not needed, e.g. in case of IORING_OP_READ_FIXED or IORING_OP_READ. Also hide saving iter_state inside of it by splitting out an inline function of it to avoid extra ifs. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/b1bbc213a95e5272d4da5867bb977d9acb6f2109.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 0a0f84b28eba3..cf208d713f494 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3155,9 +3155,10 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, issue_flags); } -static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, - struct iov_iter *iter, unsigned int issue_flags) +static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, + struct io_rw_state *s, unsigned int issue_flags) { + struct iov_iter *iter = &s->iter; void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; u8 opcode = req->opcode; @@ -3180,11 +3181,13 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, req->rw.len = sqe_len; } - ret = import_single_range(rw, buf, sqe_len, *iovec, iter); + ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter); *iovec = NULL; return ret; } + *iovec = s->fast_iov; + if (req->flags & REQ_F_BUFFER_SELECT) { ret = io_iov_buffer_select(req, *iovec, issue_flags); if (!ret) @@ -3197,6 +3200,19 @@ static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, req->ctx->compat); } +static inline int io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct io_rw_state *s, + unsigned int issue_flags) +{ + int ret; + + ret = __io_import_iovec(rw, req, iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + iov_iter_save_state(&s->iter, &s->iter_state); + return ret; +} + static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) { return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; @@ -3322,11 +3338,11 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, static inline int io_rw_prep_async(struct io_kiocb *req, int rw) { struct io_async_rw *iorw = req->async_data; - struct iovec *iov = iorw->s.fast_iov; + struct iovec *iov; int ret; /* submission path, ->uring_lock should already be taken */ - ret = io_import_iovec(rw, req, &iov, &iorw->s.iter, IO_URING_F_NONBLOCK); + ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK); if (unlikely(ret < 0)) return ret; @@ -3334,7 +3350,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) iorw->free_iovec = iov; if (iov) req->flags |= REQ_F_NEED_CLEANUP; - iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); return 0; } @@ -3453,12 +3468,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) iovec = NULL; } else { s = &__s; - iovec = s->fast_iov; - ret = io_import_iovec(READ, req, &iovec, &s->iter, issue_flags); - if (ret < 0) + ret = io_import_iovec(READ, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) return ret; - - iov_iter_save_state(&s->iter, &s->iter_state); } req->result = iov_iter_count(&s->iter); @@ -3581,11 +3593,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) iovec = NULL; } else { s = &__s; - iovec = s->fast_iov; - ret = io_import_iovec(WRITE, req, &iovec, &s->iter, issue_flags); - if (ret < 0) + ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) return ret; - iov_iter_save_state(&s->iter, &s->iter_state); } req->result = iov_iter_count(&s->iter); From 607b6fb8017a684f3f1567ccf776bdc0fe6d120e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 14 Oct 2021 16:10:19 +0100 Subject: [PATCH 179/205] io_uring: rearrange io_read()/write() Combine force_nonblock branches (which is already optimised by compiler), flip branches so the most hot/common path is the first, e.g. as with non on-stack iov setup, and add extra likely/unlikely attributions for errror paths. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/2c2536c5896d70994de76e387ea09a0402173a3f.1634144845.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 75 +++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index cf208d713f494..a2f989e845837 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3433,7 +3433,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) { - if (req->file->f_op->read_iter) + if (likely(req->file->f_op->read_iter)) return call_read_iter(req->file, &req->rw.kiocb, iter); else if (req->file->f_op->read) return loop_rw_iter(READ, req, iter); @@ -3449,14 +3449,18 @@ static bool need_read_all(struct io_kiocb *req) static int io_read(struct io_kiocb *req, unsigned int issue_flags) { - struct io_rw_state __s, *s; + struct io_rw_state __s, *s = &__s; struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct io_async_rw *rw; ssize_t ret, ret2; - if (req_has_async_data(req)) { + if (!req_has_async_data(req)) { + ret = io_import_iovec(READ, req, &iovec, s, issue_flags); + if (unlikely(ret < 0)) + return ret; + } else { rw = req->async_data; s = &rw->s; /* @@ -3466,24 +3470,19 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) */ iov_iter_restore(&s->iter, &s->iter_state); iovec = NULL; - } else { - s = &__s; - ret = io_import_iovec(READ, req, &iovec, s, issue_flags); - if (unlikely(ret < 0)) - return ret; } req->result = iov_iter_count(&s->iter); - /* Ensure we clear previously set non-block flag */ - if (!force_nonblock) - kiocb->ki_flags &= ~IOCB_NOWAIT; - else + if (force_nonblock) { + /* If the file doesn't support async, just async punt */ + if (unlikely(!io_file_supports_nowait(req, READ))) { + ret = io_setup_async_rw(req, iovec, s, true); + return ret ?: -EAGAIN; + } kiocb->ki_flags |= IOCB_NOWAIT; - - /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_nowait(req, READ)) { - ret = io_setup_async_rw(req, iovec, s, true); - return ret ?: -EAGAIN; + } else { + /* Ensure we clear previously set non-block flag */ + kiocb->ki_flags &= ~IOCB_NOWAIT; } ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), req->result); @@ -3579,40 +3578,40 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_write(struct io_kiocb *req, unsigned int issue_flags) { - struct io_rw_state __s, *s; - struct io_async_rw *rw; + struct io_rw_state __s, *s = &__s; struct iovec *iovec; struct kiocb *kiocb = &req->rw.kiocb; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; ssize_t ret, ret2; - if (req_has_async_data(req)) { - rw = req->async_data; - s = &rw->s; - iov_iter_restore(&s->iter, &s->iter_state); - iovec = NULL; - } else { - s = &__s; + if (!req_has_async_data(req)) { ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags); if (unlikely(ret < 0)) return ret; + } else { + struct io_async_rw *rw = req->async_data; + + s = &rw->s; + iov_iter_restore(&s->iter, &s->iter_state); + iovec = NULL; } req->result = iov_iter_count(&s->iter); - /* Ensure we clear previously set non-block flag */ - if (!force_nonblock) - kiocb->ki_flags &= ~IOCB_NOWAIT; - else - kiocb->ki_flags |= IOCB_NOWAIT; + if (force_nonblock) { + /* If the file doesn't support async, just async punt */ + if (unlikely(!io_file_supports_nowait(req, WRITE))) + goto copy_iov; - /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_nowait(req, WRITE)) - goto copy_iov; + /* file path doesn't support NOWAIT for non-direct_IO */ + if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && + (req->flags & REQ_F_ISREG)) + goto copy_iov; - /* file path doesn't support NOWAIT for non-direct_IO */ - if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) && - (req->flags & REQ_F_ISREG)) - goto copy_iov; + kiocb->ki_flags |= IOCB_NOWAIT; + } else { + /* Ensure we clear previously set non-block flag */ + kiocb->ki_flags &= ~IOCB_NOWAIT; + } ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), req->result); if (unlikely(ret)) From 9983028e7660a2cc5e58403d8ce29569dbf3162d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:11 +0100 Subject: [PATCH 180/205] io_uring: optimise req->ctx reloads Don't load req->ctx in advance, it takes an extra register and the field stays valid even after opcode handlers. It also optimises out req->ctx load in io_iopoll_req_issued() once it's inlined. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1e45ff671c44be0eb904f2e448a211734893fa0b.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index a2f989e845837..5a4d13ad1277c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6588,7 +6588,6 @@ static void io_clean_op(struct io_kiocb *req) static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) { - struct io_ring_ctx *ctx = req->ctx; const struct cred *creds = NULL; int ret; @@ -6715,7 +6714,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (ret) return ret; /* If the op doesn't have a file, we're not polling for it */ - if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file) io_iopoll_req_issued(req); return 0; From 9882131cd9de525d484de117644e5008c6557ac7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:12 +0100 Subject: [PATCH 181/205] io_uring: kill io_wq_current_is_worker() in iopoll Don't decide about locking based on io_wq_current_is_worker(), it's not consistent with all other code and is expensive, use issue_flags. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7546d5a58efa4360173541c6fe02ee6b8c7b4ea7.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5a4d13ad1277c..348e587524cf3 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2713,13 +2713,13 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) * find it from a io_do_iopoll() thread before the issuer is done * accessing the kiocb cookie. */ -static void io_iopoll_req_issued(struct io_kiocb *req) +static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - const bool in_async = io_wq_current_is_worker(); + const bool need_lock = !(issue_flags & IO_URING_F_NONBLOCK); /* workqueue context doesn't hold uring_lock, grab it now */ - if (unlikely(in_async)) + if (unlikely(need_lock)) mutex_lock(&ctx->uring_lock); /* @@ -2747,7 +2747,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req) else wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); - if (unlikely(in_async)) { + if (unlikely(need_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle * in sq thread task context or in io worker task context. If @@ -6715,7 +6715,7 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) return ret; /* If the op doesn't have a file, we're not polling for it */ if ((req->ctx->flags & IORING_SETUP_IOPOLL) && req->file) - io_iopoll_req_issued(req); + io_iopoll_req_issued(req, issue_flags); return 0; } From d1d681b0846af8585904be562610bdfc70bf21aa Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:13 +0100 Subject: [PATCH 182/205] io_uring: optimise io_import_iovec fixed path Delay loading req->rw.{addr,len} in io_import_iovec until it's really needed, so removing extra loads for the fixed path, which doesn't use them. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/3cc48dd0c4f1a37c4ce9aab5784281a2d83ad8be.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 348e587524cf3..00267e799ac24 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3159,9 +3159,9 @@ static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct io_rw_state *s, unsigned int issue_flags) { struct iov_iter *iter = &s->iter; - void __user *buf = u64_to_user_ptr(req->rw.addr); - size_t sqe_len = req->rw.len; u8 opcode = req->opcode; + void __user *buf; + size_t sqe_len; ssize_t ret; if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { @@ -3170,9 +3170,12 @@ static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, } /* buffer index only valid with fixed read/write, or buffer select */ - if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) + if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))) return -EINVAL; + buf = u64_to_user_ptr(req->rw.addr); + sqe_len = req->rw.len; + if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, issue_flags); From caa8fe6e86fd32e26d05f724e07ec826807c6ad4 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:14 +0100 Subject: [PATCH 183/205] io_uring: return iovec from __io_import_iovec We pass iovec** into __io_import_iovec(), which should keep it, initialise and modify accordingly. It's expensive, return it directly from __io_import_iovec encoding errors with ERR_PTR if needed. io_import_iovec keeps the old interface, but it's inline and so everything is optimised nicely. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6230e9769982f03a8f86fa58df24666088c44d3e.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 00267e799ac24..d97545e43a16c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3155,23 +3155,25 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, issue_flags); } -static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, - struct io_rw_state *s, unsigned int issue_flags) +static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, + struct io_rw_state *s, + unsigned int issue_flags) { struct iov_iter *iter = &s->iter; u8 opcode = req->opcode; + struct iovec *iovec; void __user *buf; size_t sqe_len; ssize_t ret; - if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { - *iovec = NULL; - return io_import_fixed(req, rw, iter); - } + BUILD_BUG_ON(ERR_PTR(0) != NULL); + + if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) + return ERR_PTR(io_import_fixed(req, rw, iter)); /* buffer index only valid with fixed read/write, or buffer select */ if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))) - return -EINVAL; + return ERR_PTR(-EINVAL); buf = u64_to_user_ptr(req->rw.addr); sqe_len = req->rw.len; @@ -3180,40 +3182,39 @@ static int __io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, issue_flags); if (IS_ERR(buf)) - return PTR_ERR(buf); + return ERR_PTR(PTR_ERR(buf)); req->rw.len = sqe_len; } ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter); - *iovec = NULL; - return ret; + return ERR_PTR(ret); } - *iovec = s->fast_iov; - + iovec = s->fast_iov; if (req->flags & REQ_F_BUFFER_SELECT) { - ret = io_iov_buffer_select(req, *iovec, issue_flags); + ret = io_iov_buffer_select(req, iovec, issue_flags); if (!ret) - iov_iter_init(iter, rw, *iovec, 1, (*iovec)->iov_len); - *iovec = NULL; - return ret; + iov_iter_init(iter, rw, iovec, 1, iovec->iov_len); + return ERR_PTR(ret); } - return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter, + ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter, req->ctx->compat); + if (unlikely(ret < 0)) + return ERR_PTR(ret); + return iovec; } static inline int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, struct io_rw_state *s, unsigned int issue_flags) { - int ret; + *iovec = __io_import_iovec(rw, req, s, issue_flags); + if (unlikely(IS_ERR(*iovec))) + return PTR_ERR(*iovec); - ret = __io_import_iovec(rw, req, iovec, s, issue_flags); - if (unlikely(ret < 0)) - return ret; iov_iter_save_state(&s->iter, &s->iter_state); - return ret; + return 0; } static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) From 578c0ee234e51d9746e526a26bdb2c1b7ad81dba Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:15 +0100 Subject: [PATCH 184/205] io_uring: optimise fixed rw rsrc node setting Move fixed rw io_req_set_rsrc_node() from rw prep into io_import_fixed(), if we're using fixed buffers it will always be called during submission as we save the state in advance, Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/68c06f66d5aa9661f1e4b88d08c52d23528297ec.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d97545e43a16c..014363abc2b20 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2871,12 +2871,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_complete = io_complete_rw; } - if (req->opcode == IORING_OP_READ_FIXED || - req->opcode == IORING_OP_WRITE_FIXED) { - req->imu = NULL; - io_req_set_rsrc_node(req, ctx); - } - + req->imu = NULL; req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); req->buf_index = READ_ONCE(sqe->buf_index); @@ -3005,13 +3000,15 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) { - struct io_ring_ctx *ctx = req->ctx; struct io_mapped_ubuf *imu = req->imu; u16 index, buf_index = req->buf_index; if (likely(!imu)) { + struct io_ring_ctx *ctx = req->ctx; + if (unlikely(buf_index >= ctx->nr_user_bufs)) return -EFAULT; + io_req_set_rsrc_node(req, ctx); index = array_index_nospec(buf_index, ctx->nr_user_bufs); imu = READ_ONCE(ctx->user_bufs[index]); req->imu = imu; From 5cb03d63420bcf533111d30690141a5b28c172fa Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 15 Oct 2021 17:09:16 +0100 Subject: [PATCH 185/205] io_uring: clean io_prep_rw() We already store req->file in a variable in io_prep_rw(), just use it instead of a couple of left references to kicob->ki_filp. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/2f5889fc7ab670daefd5ccaedd99416d8355f0ad.1634314022.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 014363abc2b20..d5d06e0349598 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2832,8 +2832,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->flags |= REQ_F_CUR_POS; kiocb->ki_pos = file->f_pos; } - kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); - kiocb->ki_flags = iocb_flags(kiocb->ki_filp); + kiocb->ki_hint = ki_hint_validate(file_write_hint(file)); + kiocb->ki_flags = iocb_flags(file); ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); if (unlikely(ret)) return ret; @@ -2858,8 +2858,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_ioprio = get_current_ioprio(); if (ctx->flags & IORING_SETUP_IOPOLL) { - if (!(kiocb->ki_flags & IOCB_DIRECT) || - !kiocb->ki_filp->f_op->iopoll) + if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) return -EOPNOTSUPP; kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; From b10841c98c8980ee50cc1e90640b04c935a24285 Mon Sep 17 00:00:00 2001 From: Noah Goldstein Date: Sat, 16 Oct 2021 20:32:29 -0500 Subject: [PATCH 186/205] fs/io_uring: Prioritise checking faster conditions first in io_write This commit reorders the conditions in a branch in io_write. The reorder to check 'ret2 == -EAGAIN' first as checking '(req->ctx->flags & IORING_SETUP_IOPOLL)' will likely be more expensive due to 2x memory derefences. Signed-off-by: Noah Goldstein Link: https://lore.kernel.org/r/20211017013229.4124279-1-goldstein.w.n@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d5d06e0349598..11bd0dd1425a6 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3654,7 +3654,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) goto done; if (!force_nonblock || ret2 != -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ - if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) + if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) goto copy_iov; done: kiocb_done(kiocb, ret2, issue_flags); From e74ead135bc4459f7d40b1f8edab1333a28b54e8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 Oct 2021 00:07:08 +0100 Subject: [PATCH 187/205] io_uring: arm poll for non-nowait files Don't check if we can do nowait before arming apoll, there are several reasons for that. First, we don't care much about files that don't support nowait. Second, it may be useful -- we don't want to be taking away extra workers from io-wq when it can go in some async. Even if it will go through io-wq eventually, it make difference in the numbers of workers actually used. And the last one, it's needed to clean nowait in future commits. [kernel test robot: fix unused-var] Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9d06f3cb2c8b686d970269a87986f154edb83043.1634425438.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 11bd0dd1425a6..8a5b20eafccf2 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5588,7 +5588,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI; - int rw; if (!req->file || !file_can_poll(req->file)) return IO_APOLL_ABORTED; @@ -5598,7 +5597,6 @@ static int io_arm_poll_handler(struct io_kiocb *req) return IO_APOLL_ABORTED; if (def->pollin) { - rw = READ; mask |= POLLIN | POLLRDNORM; /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ @@ -5606,14 +5604,9 @@ static int io_arm_poll_handler(struct io_kiocb *req) (req->sr_msg.msg_flags & MSG_ERRQUEUE)) mask &= ~POLLIN; } else { - rw = WRITE; mask |= POLLOUT | POLLWRNORM; } - /* if we can't nonblock try, then no point in arming a poll handler */ - if (!io_file_supports_nowait(req, rw)) - return IO_APOLL_ABORTED; - apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) return IO_APOLL_ABORTED; From 35645ac3c1853fbb54d8acd50fd12184f7905d5f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 Oct 2021 00:07:09 +0100 Subject: [PATCH 188/205] io_uring: combine REQ_F_NOWAIT_{READ,WRITE} flags Merge REQ_F_NOWAIT_READ and REQ_F_NOWAIT_WRITE into one flag, i.e. REQ_F_SUPPORT_NOWAIT. First it gets rid of dependence on CONFIG_64BIT but also simplifies the code. One thing to consider is when we don't have ->{read,write}_iter and go through loop_rw_iter(). Just fail it with -EAGAIN if we expect nowait behaviour but not sure whether it supports it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/f832a20e5186c2e79c6519280c238f559a1d2bbc.1634425438.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 61 ++++++++++++++++++--------------------------------- 1 file changed, 21 insertions(+), 40 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 8a5b20eafccf2..d7bf2002ab0ce 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -732,8 +732,7 @@ enum { REQ_F_ARM_LTIMEOUT_BIT, REQ_F_ASYNC_DATA_BIT, /* keep async read/write and isreg together and in order */ - REQ_F_NOWAIT_READ_BIT, - REQ_F_NOWAIT_WRITE_BIT, + REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, /* not a real bit, just to check we're not overflowing the space */ @@ -774,10 +773,8 @@ enum { REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), /* caller should reissue async */ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), - /* supports async reads */ - REQ_F_NOWAIT_READ = BIT(REQ_F_NOWAIT_READ_BIT), - /* supports async writes */ - REQ_F_NOWAIT_WRITE = BIT(REQ_F_NOWAIT_WRITE_BIT), + /* supports async reads/writes */ + REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT), /* regular file */ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), /* has creds assigned */ @@ -1390,18 +1387,13 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq) return false; } -#define FFS_ASYNC_READ 0x1UL -#define FFS_ASYNC_WRITE 0x2UL -#ifdef CONFIG_64BIT -#define FFS_ISREG 0x4UL -#else -#define FFS_ISREG 0x0UL -#endif -#define FFS_MASK ~(FFS_ASYNC_READ|FFS_ASYNC_WRITE|FFS_ISREG) +#define FFS_NOWAIT 0x1UL +#define FFS_ISREG 0x2UL +#define FFS_MASK ~(FFS_NOWAIT|FFS_ISREG) static inline bool io_req_ffs_set(struct io_kiocb *req) { - return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); + return req->flags & REQ_F_FIXED_FILE; } static inline void io_req_track_inflight(struct io_kiocb *req) @@ -2772,7 +2764,7 @@ static bool io_bdev_nowait(struct block_device *bdev) * any file. For now, just ensure that anything potentially problematic is done * inline. */ -static bool __io_file_supports_nowait(struct file *file, int rw) +static bool __io_file_supports_nowait(struct file *file) { umode_t mode = file_inode(file)->i_mode; @@ -2795,24 +2787,14 @@ static bool __io_file_supports_nowait(struct file *file, int rw) /* any ->read/write should understand O_NONBLOCK */ if (file->f_flags & O_NONBLOCK) return true; - - if (!(file->f_mode & FMODE_NOWAIT)) - return false; - - if (rw == READ) - return file->f_op->read_iter != NULL; - - return file->f_op->write_iter != NULL; + return file->f_mode & FMODE_NOWAIT; } -static bool io_file_supports_nowait(struct io_kiocb *req, int rw) +static inline bool io_file_supports_nowait(struct io_kiocb *req) { - if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) + if (likely(req->flags & REQ_F_SUPPORT_NOWAIT)) return true; - else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) - return true; - - return __io_file_supports_nowait(req->file, rw); + return __io_file_supports_nowait(req->file); } static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, @@ -2844,7 +2826,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, * reliably. If not, or it IOCB_NOWAIT is set, don't retry. */ if ((kiocb->ki_flags & IOCB_NOWAIT) || - ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) + ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) req->flags |= REQ_F_NOWAIT; ioprio = READ_ONCE(sqe->ioprio); @@ -3235,7 +3217,8 @@ static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) */ if (kiocb->ki_flags & IOCB_HIPRI) return -EOPNOTSUPP; - if (kiocb->ki_flags & IOCB_NOWAIT) + if ((kiocb->ki_flags & IOCB_NOWAIT) && + !(kiocb->ki_filp->f_flags & O_NONBLOCK)) return -EAGAIN; while (iov_iter_count(iter)) { @@ -3475,7 +3458,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags) if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req, READ))) { + if (unlikely(!io_file_supports_nowait(req))) { ret = io_setup_async_rw(req, iovec, s, true); return ret ?: -EAGAIN; } @@ -3599,7 +3582,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req, WRITE))) + if (unlikely(!io_file_supports_nowait(req))) goto copy_iov; /* file path doesn't support NOWAIT for non-direct_IO */ @@ -3631,7 +3614,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags) } kiocb->ki_flags |= IOCB_WRITE; - if (req->file->f_op->write_iter) + if (likely(req->file->f_op->write_iter)) ret2 = call_write_iter(req->file, kiocb, &s->iter); else if (req->file->f_op->write) ret2 = loop_rw_iter(WRITE, req, &s->iter); @@ -6778,10 +6761,8 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file { unsigned long file_ptr = (unsigned long) file; - if (__io_file_supports_nowait(file, READ)) - file_ptr |= FFS_ASYNC_READ; - if (__io_file_supports_nowait(file, WRITE)) - file_ptr |= FFS_ASYNC_WRITE; + if (__io_file_supports_nowait(file)) + file_ptr |= FFS_NOWAIT; if (S_ISREG(file_inode(file)->i_mode)) file_ptr |= FFS_ISREG; file_slot->file_ptr = file_ptr; @@ -6800,7 +6781,7 @@ static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx, file = (struct file *) (file_ptr & FFS_MASK); file_ptr &= ~FFS_MASK; /* mask in overlapping REQ_F and FFS bits */ - req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); + req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT); io_req_set_rsrc_node(req, ctx); return file; } From 88459b50b42a4bd58e528006663afabd0b8652f2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sun, 17 Oct 2021 00:07:10 +0100 Subject: [PATCH 189/205] io_uring: simplify io_file_supports_nowait() Make sure that REQ_F_SUPPORT_NOWAIT is always set io_prep_rw(), and so we can stop caring about setting it down the line simplifying io_file_supports_nowait(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/60c8f1f5e2cb45e00f4897b2cec10c5b3669da91.1634425438.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d7bf2002ab0ce..b834cf2778f65 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2764,10 +2764,8 @@ static bool io_bdev_nowait(struct block_device *bdev) * any file. For now, just ensure that anything potentially problematic is done * inline. */ -static bool __io_file_supports_nowait(struct file *file) +static bool __io_file_supports_nowait(struct file *file, umode_t mode) { - umode_t mode = file_inode(file)->i_mode; - if (S_ISBLK(mode)) { if (IS_ENABLED(CONFIG_BLOCK) && io_bdev_nowait(I_BDEV(file->f_mapping->host))) @@ -2790,11 +2788,26 @@ static bool __io_file_supports_nowait(struct file *file) return file->f_mode & FMODE_NOWAIT; } +/* + * If we tracked the file through the SCM inflight mechanism, we could support + * any file. For now, just ensure that anything potentially problematic is done + * inline. + */ +static unsigned int io_file_get_flags(struct file *file) +{ + umode_t mode = file_inode(file)->i_mode; + unsigned int res = 0; + + if (S_ISREG(mode)) + res |= FFS_ISREG; + if (__io_file_supports_nowait(file, mode)) + res |= FFS_NOWAIT; + return res; +} + static inline bool io_file_supports_nowait(struct io_kiocb *req) { - if (likely(req->flags & REQ_F_SUPPORT_NOWAIT)) - return true; - return __io_file_supports_nowait(req->file); + return req->flags & REQ_F_SUPPORT_NOWAIT; } static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, @@ -2806,8 +2819,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, unsigned ioprio; int ret; - if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) - req->flags |= REQ_F_ISREG; + if (!io_req_ffs_set(req)) + req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT; kiocb->ki_pos = READ_ONCE(sqe->off); if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) { @@ -6761,10 +6774,7 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file { unsigned long file_ptr = (unsigned long) file; - if (__io_file_supports_nowait(file)) - file_ptr |= FFS_NOWAIT; - if (S_ISREG(file_inode(file)->i_mode)) - file_ptr |= FFS_ISREG; + file_ptr |= io_file_get_flags(file); file_slot->file_ptr = file_ptr; } From 5ca7a8b3f698b111729ed8e133b14e30f961de0f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 6 Oct 2021 11:01:42 -0600 Subject: [PATCH 190/205] io_uring: inform block layer of how many requests we are submitting The block layer can use this knowledge to make smarter decisions on how to handle the request, if it knows that N more may be coming. Switch to using blk_start_plug_nr_ios() to pass in that information. Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index b834cf2778f65..240222da2989e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -317,6 +317,7 @@ struct io_submit_state { bool plug_started; bool need_plug; + unsigned short submit_nr; struct blk_plug plug; }; @@ -7060,7 +7061,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, if (state->need_plug && io_op_defs[opcode].plug) { state->plug_started = true; state->need_plug = false; - blk_start_plug(&state->plug); + blk_start_plug_nr_ios(&state->plug, state->submit_nr); } req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), @@ -7181,6 +7182,7 @@ static void io_submit_state_start(struct io_submit_state *state, { state->plug_started = false; state->need_plug = max_ios > 2; + state->submit_nr = max_ios; /* set only head, no need to init link_last in advance */ state->link.head = NULL; } From 00169246e6981752e53266c62d0ab0c827493634 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 19 Oct 2021 17:34:53 +0200 Subject: [PATCH 191/205] io_uring: warning about unused-but-set parameter When enabling -Wunused warnings by building with W=1, I get an instance of the -Wunused-but-set-parameter warning in the io_uring code: fs/io_uring.c: In function 'io_queue_async_work': fs/io_uring.c:1445:61: error: parameter 'locked' set but not used [-Werror=unused-but-set-parameter] 1445 | static void io_queue_async_work(struct io_kiocb *req, bool *locked) | ~~~~~~^~~~~~ There are very few warnings of this type, so it would be nice to enable this by default and fix all the existing instances. As the assignment serves no purpose by itself other than to prevent developers from using the variable, an easy workaround is to remove the assignment and just rename the argument to "dont_use". Fixes: f237c30a5610 ("io_uring: batch task work locking") Link: https://lore.kernel.org/lkml/20210920121352.93063-1-arnd@kernel.org/ Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20211019153507.348480-1-arnd@kernel.org Signed-off-by: Jens Axboe --- fs/io_uring.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 240222da2989e..df07ce2fa6bc9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1487,15 +1487,12 @@ static inline void io_req_add_compl_list(struct io_kiocb *req) wq_list_add_tail(&req->comp_list, &state->compl_reqs); } -static void io_queue_async_work(struct io_kiocb *req, bool *locked) +static void io_queue_async_work(struct io_kiocb *req, bool *dont_use) { struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->task->io_uring; - /* must not take the lock, NULL it as a precaution */ - locked = NULL; - BUG_ON(!tctx); BUG_ON(!tctx->io_wq); From 3b44b3712c5b19b0af11c25cd978abdc3680d5e7 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Mon, 18 Oct 2021 21:34:31 +0800 Subject: [PATCH 192/205] io_uring: split logic of force_nonblock Currently force_nonblock stands for three meanings: - nowait or not - in an io-worker or not(hold uring_lock or not) Let's split the logic to two flags, IO_URING_F_NONBLOCK and IO_URING_F_UNLOCKED for convenience of the next patch. Suggested-by: Pavel Begunkov Signed-off-by: Hao Xu Reviewed-by: Pavel Begunkov Link: https://lore.kernel.org/r/20211018133431.103298-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 48 ++++++++++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index df07ce2fa6bc9..c87931d8b503b 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -199,6 +199,7 @@ struct io_rings { enum io_uring_cmd_flags { IO_URING_F_COMPLETE_DEFER = 1, + IO_URING_F_UNLOCKED = 2, /* int's last bit, sign checks are usually faster than a bit test */ IO_URING_F_NONBLOCK = INT_MIN, }; @@ -2706,10 +2707,10 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; - const bool need_lock = !(issue_flags & IO_URING_F_NONBLOCK); + const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; /* workqueue context doesn't hold uring_lock, grab it now */ - if (unlikely(need_lock)) + if (unlikely(needs_lock)) mutex_lock(&ctx->uring_lock); /* @@ -2737,7 +2738,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) else wq_list_add_tail(&req->comp_list, &ctx->iopoll_list); - if (unlikely(need_lock)) { + if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle * in sq thread task context or in io worker task context. If @@ -2921,7 +2922,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_ring_ctx *ctx = req->ctx; req_set_fail(req); - if (!(issue_flags & IO_URING_F_NONBLOCK)) { + if (issue_flags & IO_URING_F_UNLOCKED) { mutex_lock(&ctx->uring_lock); __io_req_complete(req, issue_flags, ret, cflags); mutex_unlock(&ctx->uring_lock); @@ -3031,7 +3032,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, { struct io_buffer *kbuf = req->kbuf; struct io_buffer *head; - bool needs_lock = !(issue_flags & IO_URING_F_NONBLOCK); + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; if (req->flags & REQ_F_BUFFER_SELECTED) return kbuf; @@ -3336,7 +3337,7 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw) int ret; /* submission path, ->uring_lock should already be taken */ - ret = io_import_iovec(rw, req, &iov, &iorw->s, IO_URING_F_NONBLOCK); + ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); if (unlikely(ret < 0)) return ret; @@ -4309,9 +4310,9 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head; int ret = 0; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; - io_ring_submit_lock(ctx, !force_nonblock); + io_ring_submit_lock(ctx, needs_lock); lockdep_assert_held(&ctx->uring_lock); @@ -4324,7 +4325,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) /* complete before unlock, IOPOLL may need the lock */ __io_req_complete(req, issue_flags, ret, 0); - io_ring_submit_unlock(ctx, !force_nonblock); + io_ring_submit_unlock(ctx, needs_lock); return 0; } @@ -4396,9 +4397,9 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; struct io_buffer *head, *list; int ret = 0; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; - io_ring_submit_lock(ctx, !force_nonblock); + io_ring_submit_lock(ctx, needs_lock); lockdep_assert_held(&ctx->uring_lock); @@ -4414,7 +4415,7 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) req_set_fail(req); /* complete before unlock, IOPOLL may need the lock */ __io_req_complete(req, issue_flags, ret, 0); - io_ring_submit_unlock(ctx, !force_nonblock); + io_ring_submit_unlock(ctx, needs_lock); return 0; } @@ -6271,6 +6272,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; u64 sqe_addr = req->cancel.addr; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; struct io_tctx_node *node; int ret; @@ -6279,7 +6281,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) goto done; /* slow path, try all io-wq's */ - io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_lock(ctx, needs_lock); ret = -ENOENT; list_for_each_entry(node, &ctx->tctx_list, ctx_node) { struct io_uring_task *tctx = node->task->io_uring; @@ -6288,7 +6290,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) if (ret != -ENOENT) break; } - io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_unlock(ctx, needs_lock); done: if (ret < 0) req_set_fail(req); @@ -6315,6 +6317,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req, static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *ctx = req->ctx; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; struct io_uring_rsrc_update2 up; int ret; @@ -6324,10 +6327,10 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) up.tags = 0; up.resv = 0; - io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_lock(ctx, needs_lock); ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, req->rsrc_update.nr_args); - io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_unlock(ctx, needs_lock); if (ret < 0) req_set_fail(req); @@ -6737,7 +6740,7 @@ static void io_wq_submit_work(struct io_wq_work *work) if (!ret) { do { - ret = io_issue_sqe(req, 0); + ret = io_issue_sqe(req, IO_URING_F_UNLOCKED); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't @@ -8326,12 +8329,12 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, unsigned int issue_flags, u32 slot_index) { struct io_ring_ctx *ctx = req->ctx; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; bool needs_switch = false; struct io_fixed_file *file_slot; int ret = -EBADF; - io_ring_submit_lock(ctx, !force_nonblock); + io_ring_submit_lock(ctx, needs_lock); if (file->f_op == &io_uring_fops) goto err; ret = -ENXIO; @@ -8372,7 +8375,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file, err: if (needs_switch) io_rsrc_node_switch(ctx, ctx->file_data); - io_ring_submit_unlock(ctx, !force_nonblock); + io_ring_submit_unlock(ctx, needs_lock); if (ret) fput(file); return ret; @@ -8382,11 +8385,12 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) { unsigned int offset = req->close.file_slot - 1; struct io_ring_ctx *ctx = req->ctx; + bool needs_lock = issue_flags & IO_URING_F_UNLOCKED; struct io_fixed_file *file_slot; struct file *file; int ret, i; - io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_lock(ctx, needs_lock); ret = -ENXIO; if (unlikely(!ctx->file_data)) goto out; @@ -8412,7 +8416,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) io_rsrc_node_switch(ctx, ctx->file_data); ret = 0; out: - io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); + io_ring_submit_unlock(ctx, needs_lock); return ret; } From 898df2447b9ee8d759e85d33087505d3905bf2f0 Mon Sep 17 00:00:00 2001 From: Changcheng Deng Date: Wed, 20 Oct 2021 08:49:48 +0000 Subject: [PATCH 193/205] io_uring: Use ERR_CAST() instead of ERR_PTR(PTR_ERR()) Use ERR_CAST() instead of ERR_PTR(PTR_ERR()). This makes it more readable and also fix this warning detected by err_cast.cocci: ./fs/io_uring.c: WARNING: 3208: 11-18: ERR_CAST can be used with buf Reported-by: Zeal Robot Signed-off-by: Changcheng Deng Link: https://lore.kernel.org/r/20211020084948.1038420-1-deng.changcheng@zte.com.cn Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c87931d8b503b..88c5ee4dc242f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3172,7 +3172,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req, if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, issue_flags); if (IS_ERR(buf)) - return ERR_PTR(PTR_ERR(buf)); + return ERR_CAST(buf); req->rw.len = sqe_len; } From 90fa02883f063b971ebfd9f5b2184b38b83b7ee3 Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Mon, 18 Oct 2021 21:34:45 +0800 Subject: [PATCH 194/205] io_uring: implement async hybrid mode for pollable requests The current logic of requests with IOSQE_ASYNC is first queueing it to io-worker, then execute it in a synchronous way. For unbound works like pollable requests(e.g. read/write a socketfd), the io-worker may stuck there waiting for events for a long time. And thus other works wait in the list for a long time too. Let's introduce a new way for unbound works (currently pollable requests), with this a request will first be queued to io-worker, then executed in a nonblock try rather than a synchronous way. Failure of that leads it to arm poll stuff and then the worker can begin to handle other works. The detail process of this kind of requests is: step1: original context: queue it to io-worker step2: io-worker context: nonblock try(the old logic is a synchronous try here) | |--fail--> arm poll | |--(fail/ready)-->synchronous issue | |--(succeed)-->worker finish it's job, tw take over the req This works much better than the old IOSQE_ASYNC logic in cases where unbound max_worker is relatively small. In this case, number of io-worker eazily increments to max_worker, new worker cannot be created and running workers stuck there handling old works in IOSQE_ASYNC mode. In my 64-core machine, set unbound max_worker to 20, run echo-server, turns out: (arguments: register_file, connetion number is 1000, message size is 12 Byte) original IOSQE_ASYNC: 76664.151 tps after this patch: 166934.985 tps Suggested-by: Jens Axboe Signed-off-by: Hao Xu Link: https://lore.kernel.org/r/20211018133445.103438-1-haoxu@linux.alibaba.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 36 +++++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 88c5ee4dc242f..736d456e7913f 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6739,8 +6739,18 @@ static void io_wq_submit_work(struct io_wq_work *work) ret = -ECANCELED; if (!ret) { + bool needs_poll = false; + unsigned int issue_flags = IO_URING_F_UNLOCKED; + + if (req->flags & REQ_F_FORCE_ASYNC) { + needs_poll = req->file && file_can_poll(req->file); + if (needs_poll) + issue_flags |= IO_URING_F_NONBLOCK; + } + do { - ret = io_issue_sqe(req, IO_URING_F_UNLOCKED); +issue_sqe: + ret = io_issue_sqe(req, issue_flags); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't @@ -6748,6 +6758,30 @@ static void io_wq_submit_work(struct io_wq_work *work) */ if (ret != -EAGAIN) break; + if (needs_poll) { + bool armed = false; + + ret = 0; + needs_poll = false; + issue_flags &= ~IO_URING_F_NONBLOCK; + + switch (io_arm_poll_handler(req)) { + case IO_APOLL_READY: + goto issue_sqe; + case IO_APOLL_ABORTED: + /* + * somehow we failed to arm the poll infra, + * fallback it to a normal async worker try. + */ + break; + case IO_APOLL_OK: + armed = true; + break; + } + + if (armed) + break; + } cond_resched(); } while (1); } From c907e52c72dee710f3bf7c751822df8371924a84 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:55 +0100 Subject: [PATCH 195/205] io-wq: use helper for worker refcounting Use io_worker_release() instead of hand coding it in io_worker_exit(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6f95f09d2cdbafcbb2e22ad0d1a2bc4d3962bf65.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 811299ac96848..0c283bb18fb24 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -178,8 +178,7 @@ static void io_worker_exit(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; - if (refcount_dec_and_test(&worker->ref)) - complete(&worker->ref_done); + io_worker_release(worker); wait_for_completion(&worker->ref_done); raw_spin_lock(&wqe->lock); From 255657d237042fd51673aef6f22463f662f9933f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:56 +0100 Subject: [PATCH 196/205] io_uring: clean io_wq_submit_work()'s main loop Do a bit of cleaning for the main loop of io_wq_submit_work(). Get rid of switch, just replace it with a single if as we're retrying in both other cases. Kill issue_sqe label, Get rid of needs_poll nesting and disambiguate a bit the comment. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ed12ce0c64e051f9a6b8a37a24f8ea554d299c29.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 40 ++++++++++++---------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 736d456e7913f..7f92523c1282d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6749,40 +6749,24 @@ static void io_wq_submit_work(struct io_wq_work *work) } do { -issue_sqe: ret = io_issue_sqe(req, issue_flags); + if (ret != -EAGAIN) + break; /* - * We can get EAGAIN for polled IO even though we're + * We can get EAGAIN for iopolled IO even though we're * forcing a sync submission from here, since we can't * wait for request slots on the block side. */ - if (ret != -EAGAIN) - break; - if (needs_poll) { - bool armed = false; - - ret = 0; - needs_poll = false; - issue_flags &= ~IO_URING_F_NONBLOCK; - - switch (io_arm_poll_handler(req)) { - case IO_APOLL_READY: - goto issue_sqe; - case IO_APOLL_ABORTED: - /* - * somehow we failed to arm the poll infra, - * fallback it to a normal async worker try. - */ - break; - case IO_APOLL_OK: - armed = true; - break; - } - - if (armed) - break; + if (!needs_poll) { + cond_resched(); + continue; } - cond_resched(); + + if (io_arm_poll_handler(req) == IO_APOLL_OK) + return; + /* aborted or ready, in either case retry blocking */ + needs_poll = false; + issue_flags &= ~IO_URING_F_NONBLOCK; } while (1); } From d01905db14eb6223dd1c375001f4daa26cb15c1f Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:57 +0100 Subject: [PATCH 197/205] io_uring: clean iowq submit work cancellation If we've got IO_WQ_WORK_CANCEL in io_wq_submit_work(), handle the error on the same lines as the check instead of having a weird code flow. The main loop doesn't change but goes one indention left. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/ff4a09cf41f7a22bbb294b6f1faea721e21fe615.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 59 +++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7f92523c1282d..58cb3a14d58e8 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6721,6 +6721,8 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work) static void io_wq_submit_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); + unsigned int issue_flags = IO_URING_F_UNLOCKED; + bool needs_poll = false; struct io_kiocb *timeout; int ret = 0; @@ -6735,40 +6737,37 @@ static void io_wq_submit_work(struct io_wq_work *work) io_queue_linked_timeout(timeout); /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ - if (work->flags & IO_WQ_WORK_CANCEL) - ret = -ECANCELED; + if (work->flags & IO_WQ_WORK_CANCEL) { + io_req_task_queue_fail(req, -ECANCELED); + return; + } - if (!ret) { - bool needs_poll = false; - unsigned int issue_flags = IO_URING_F_UNLOCKED; + if (req->flags & REQ_F_FORCE_ASYNC) { + needs_poll = req->file && file_can_poll(req->file); + if (needs_poll) + issue_flags |= IO_URING_F_NONBLOCK; + } - if (req->flags & REQ_F_FORCE_ASYNC) { - needs_poll = req->file && file_can_poll(req->file); - if (needs_poll) - issue_flags |= IO_URING_F_NONBLOCK; + do { + ret = io_issue_sqe(req, issue_flags); + if (ret != -EAGAIN) + break; + /* + * We can get EAGAIN for iopolled IO even though we're + * forcing a sync submission from here, since we can't + * wait for request slots on the block side. + */ + if (!needs_poll) { + cond_resched(); + continue; } - do { - ret = io_issue_sqe(req, issue_flags); - if (ret != -EAGAIN) - break; - /* - * We can get EAGAIN for iopolled IO even though we're - * forcing a sync submission from here, since we can't - * wait for request slots on the block side. - */ - if (!needs_poll) { - cond_resched(); - continue; - } - - if (io_arm_poll_handler(req) == IO_APOLL_OK) - return; - /* aborted or ready, in either case retry blocking */ - needs_poll = false; - issue_flags &= ~IO_URING_F_NONBLOCK; - } while (1); - } + if (io_arm_poll_handler(req) == IO_APOLL_OK) + return; + /* aborted or ready, in either case retry blocking */ + needs_poll = false; + issue_flags &= ~IO_URING_F_NONBLOCK; + } while (1); /* avoid locking problems by failing it from a clean context */ if (ret) From 658d0a401637ffbc733b0f03723e9e1255289429 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:58 +0100 Subject: [PATCH 198/205] io_uring: check if opcode needs poll first on arming ->pollout or ->pollin are set only for opcodes that need a file, so if io_arm_poll_handler() tests them first we can be sure that the request has file set and the ->file check can be removed. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/9adfe4f543d984875e516fce6da35348aab48668.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 58cb3a14d58e8..bff911f951ed5 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5584,12 +5584,10 @@ static int io_arm_poll_handler(struct io_kiocb *req) struct io_poll_table ipt; __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI; - if (!req->file || !file_can_poll(req->file)) - return IO_APOLL_ABORTED; - if (req->flags & REQ_F_POLLED) - return IO_APOLL_ABORTED; if (!def->pollin && !def->pollout) return IO_APOLL_ABORTED; + if (!file_can_poll(req->file) || (req->flags & REQ_F_POLLED)) + return IO_APOLL_ABORTED; if (def->pollin) { mask |= POLLIN | POLLRDNORM; From afb7f56fc624fb6ade8fde70a67eda4d831b4ed0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:13:59 +0100 Subject: [PATCH 199/205] io_uring: don't try io-wq polling if not supported If an opcode doesn't support polling, just let it be executed synchronously in iowq, otherwise it will do a nonblock attempt just to fail in io_arm_poll_handler() and return back to blocking execution. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/6401256db01b88f448f15fcd241439cb76f5b940.1634987320.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- fs/io_uring.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bff911f951ed5..c6f32fcf387b4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6741,9 +6741,13 @@ static void io_wq_submit_work(struct io_wq_work *work) } if (req->flags & REQ_F_FORCE_ASYNC) { - needs_poll = req->file && file_can_poll(req->file); - if (needs_poll) + const struct io_op_def *def = &io_op_defs[req->opcode]; + bool opcode_poll = def->pollin || def->pollout; + + if (opcode_poll && file_can_poll(req->file)) { + needs_poll = true; issue_flags |= IO_URING_F_NONBLOCK; + } } do { From d6a644a795451d5fd063a5c08d6bb3a91d021887 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:14:00 +0100 Subject: [PATCH 200/205] io_uring: clean up timeout async_data allocation opcode prep functions are one of the first things that are called, we can't have ->async_data allocated at this point and it's certainly a bug. Reflect this assumption in io_timeout_prep() and add a WARN_ONCE just in case. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/75a28ca7dbcc5af8b6cd9092819e8384c24dedd4.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index c6f32fcf387b4..e775529a36d89 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -6113,7 +6113,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (unlikely(off && !req->ctx->off_timeout_used)) req->ctx->off_timeout_used = true; - if (!req_has_async_data(req) && io_alloc_async_data(req)) + if (WARN_ON_ONCE(req_has_async_data(req))) + return -EFAULT; + if (io_alloc_async_data(req)) return -ENOMEM; data = req->async_data; From b9a6b8f92f6feebf40609e4f5f22a3c0404afb60 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:14:01 +0100 Subject: [PATCH 201/205] io_uring: kill unused param from io_file_supports_nowait io_file_supports_nowait() doesn't use rw argument anymore, remove it. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/4bd6709fc573d70c866ea656cb7a7dbe94be8026.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index e775529a36d89..7042ed870b524 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2809,8 +2809,7 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req) return req->flags & REQ_F_SUPPORT_NOWAIT; } -static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, - int rw) +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; struct kiocb *kiocb = &req->rw.kiocb; @@ -3352,7 +3351,7 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_READ))) return -EBADF; - return io_prep_rw(req, sqe, READ); + return io_prep_rw(req, sqe); } /* @@ -3568,7 +3567,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - return io_prep_rw(req, sqe, WRITE); + return io_prep_rw(req, sqe); } static int io_write(struct io_kiocb *req, unsigned int issue_flags) From fb27274a90eac5a687fe73229775ad36df737d8b Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Sat, 23 Oct 2021 12:14:02 +0100 Subject: [PATCH 202/205] io_uring: clusterise ki_flags access in rw_prep ioprio setup doesn't depend on other fields that are modified in io_prep_rw() and we can move it down in the function without worrying about performance. It's useful as it makes iocb->ki_flags accesses/modifications closer together, so it's more likely the compiler will cache it in a register and avoid extra reloads. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/8ee98779c06f1b59f6039b1e292db4332efd664b.1634987320.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io_uring.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 7042ed870b524..bba2f77ae7e79 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2840,16 +2840,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) req->flags |= REQ_F_NOWAIT; - ioprio = READ_ONCE(sqe->ioprio); - if (ioprio) { - ret = ioprio_check_cap(ioprio); - if (ret) - return ret; - - kiocb->ki_ioprio = ioprio; - } else - kiocb->ki_ioprio = get_current_ioprio(); - if (ctx->flags & IORING_SETUP_IOPOLL) { if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) return -EOPNOTSUPP; @@ -2863,6 +2853,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) kiocb->ki_complete = io_complete_rw; } + ioprio = READ_ONCE(sqe->ioprio); + if (ioprio) { + ret = ioprio_check_cap(ioprio); + if (ret) + return ret; + + kiocb->ki_ioprio = ioprio; + } else { + kiocb->ki_ioprio = get_current_ioprio(); + } + req->imu = NULL; req->rw.addr = READ_ONCE(sqe->addr); req->rw.len = READ_ONCE(sqe->len); From 3884b83dff245e41def99ceacca8ed2056baf0a8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 25 Oct 2021 13:45:12 -0600 Subject: [PATCH 203/205] io_uring: don't assign write hint in the read path Move this out of the generic read/write prep path, and place it in the write specific kiocb setup instead. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index bba2f77ae7e79..d001cd7a6c518 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2825,7 +2825,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) req->flags |= REQ_F_CUR_POS; kiocb->ki_pos = file->f_pos; } - kiocb->ki_hint = ki_hint_validate(file_write_hint(file)); kiocb->ki_flags = iocb_flags(file); ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); if (unlikely(ret)) @@ -3568,6 +3567,7 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; + req->rw.kiocb.ki_hint = ki_hint_validate(file_write_hint(req->file)); return io_prep_rw(req, sqe); } From f75d118349be055d47407b4ba4ceb98e6437e472 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 29 Oct 2021 06:36:45 -0600 Subject: [PATCH 204/205] io_uring: harder fdinfo sq/cq ring iterating The ring iteration is racy, which isn't necessarily a problem except it can cause us to iterate the whole thing. That isn't desired or ideal, and it can lead to excessive runtimes of reading fdinfo. Cap the iteration at tail - head OR the ring size. While in there, clean up the ring masking and just dump the raw values along with the masks. That provides more useful debug info. Fixes: 83f84356bc8f ("io_uring: add more uring info to fdinfo for debug") Reported-by: Eric Dumazet Signed-off-by: Jens Axboe --- fs/io_uring.c | 51 +++++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index d001cd7a6c518..c887e4e19e9e4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -10056,12 +10056,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct io_overflow_cqe *ocqe; struct io_rings *r = ctx->rings; unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; - unsigned int cached_sq_head = ctx->cached_sq_head; - unsigned int cached_cq_tail = ctx->cached_cq_tail; unsigned int sq_head = READ_ONCE(r->sq.head); unsigned int sq_tail = READ_ONCE(r->sq.tail); unsigned int cq_head = READ_ONCE(r->cq.head); unsigned int cq_tail = READ_ONCE(r->cq.tail); + unsigned int sq_entries, cq_entries; bool has_lock; unsigned int i; @@ -10071,29 +10070,37 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, * and sq_tail and cq_head are changed by userspace. But it's ok since * we usually use these info when it is stuck. */ - seq_printf(m, "SqHead:\t%u\n", sq_head & sq_mask); - seq_printf(m, "SqTail:\t%u\n", sq_tail & sq_mask); - seq_printf(m, "CachedSqHead:\t%u\n", cached_sq_head & sq_mask); - seq_printf(m, "CqHead:\t%u\n", cq_head & cq_mask); - seq_printf(m, "CqTail:\t%u\n", cq_tail & cq_mask); - seq_printf(m, "CachedCqTail:\t%u\n", cached_cq_tail & cq_mask); - seq_printf(m, "SQEs:\t%u\n", sq_tail - cached_sq_head); - for (i = cached_sq_head; i < sq_tail; i++) { - unsigned int sq_idx = READ_ONCE(ctx->sq_array[i & sq_mask]); - - if (likely(sq_idx <= sq_mask)) { - struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; - - seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", - sq_idx, sqe->opcode, sqe->fd, sqe->flags, sqe->user_data); - } + seq_printf(m, "SqMask:\t\t0x%x\n", sq_mask); + seq_printf(m, "SqHead:\t%u\n", sq_head); + seq_printf(m, "SqTail:\t%u\n", sq_tail); + seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); + seq_printf(m, "CqMask:\t0x%x\n", cq_mask); + seq_printf(m, "CqHead:\t%u\n", cq_head); + seq_printf(m, "CqTail:\t%u\n", cq_tail); + seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); + seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head); + sq_entries = min(sq_tail - sq_head, ctx->sq_entries); + for (i = 0; i < sq_entries; i++) { + unsigned int entry = i + sq_head; + unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); + struct io_uring_sqe *sqe = &ctx->sq_sqes[sq_idx]; + + if (sq_idx > sq_mask) + continue; + sqe = &ctx->sq_sqes[sq_idx]; + seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", + sq_idx, sqe->opcode, sqe->fd, sqe->flags, + sqe->user_data); } - seq_printf(m, "CQEs:\t%u\n", cached_cq_tail - cq_head); - for (i = cq_head; i < cached_cq_tail; i++) { - struct io_uring_cqe *cqe = &r->cqes[i & cq_mask]; + seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); + cq_entries = min(cq_tail - cq_head, ctx->cq_entries); + for (i = 0; i < cq_entries; i++) { + unsigned int entry = i + cq_head; + struct io_uring_cqe *cqe = &r->cqes[entry & cq_mask]; seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n", - i & cq_mask, cqe->user_data, cqe->res, cqe->flags); + entry & cq_mask, cqe->user_data, cqe->res, + cqe->flags); } /* From 1d5f5ea7cb7d15b9fb1cc82673ebb054f02cd7d2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 29 Oct 2021 13:11:33 +0100 Subject: [PATCH 205/205] io-wq: remove worker to owner tw dependency INFO: task iou-wrk-6609:6612 blocked for more than 143 seconds. Not tainted 5.15.0-rc5-syzkaller #0 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. task:iou-wrk-6609 state:D stack:27944 pid: 6612 ppid: 6526 flags:0x00004006 Call Trace: context_switch kernel/sched/core.c:4940 [inline] __schedule+0xb44/0x5960 kernel/sched/core.c:6287 schedule+0xd3/0x270 kernel/sched/core.c:6366 schedule_timeout+0x1db/0x2a0 kernel/time/timer.c:1857 do_wait_for_common kernel/sched/completion.c:85 [inline] __wait_for_common kernel/sched/completion.c:106 [inline] wait_for_common kernel/sched/completion.c:117 [inline] wait_for_completion+0x176/0x280 kernel/sched/completion.c:138 io_worker_exit fs/io-wq.c:183 [inline] io_wqe_worker+0x66d/0xc40 fs/io-wq.c:597 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:295 io-wq worker may submit a task_work to the master task and upon io_worker_exit() wait for the tw to get executed. The problem appears when the master task is waiting in coredump.c: 468 freezer_do_not_count(); 469 wait_for_completion(&core_state->startup); 470 freezer_count(); Apparently having some dependency on children threads getting everything stuck. Workaround it by cancelling the taks_work callback that causes it before going into io_worker_exit() waiting. p.s. probably a better option is to not submit tw elevating the refcount in the first place, but let's leave this excercise for the future. Cc: stable@vger.kernel.org Reported-and-tested-by: syzbot+27d62ee6f256b186883e@syzkaller.appspotmail.com Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/142a716f4ed936feae868959059154362bfa8c19.1635509451.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- fs/io-wq.c | 46 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/fs/io-wq.c b/fs/io-wq.c index 0c283bb18fb24..fe6b2abcaa49a 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -140,6 +140,7 @@ static void io_wqe_dec_running(struct io_worker *worker); static bool io_acct_cancel_pending_work(struct io_wqe *wqe, struct io_wqe_acct *acct, struct io_cb_cancel_data *match); +static void create_worker_cb(struct callback_head *cb); static bool io_worker_get(struct io_worker *worker) { @@ -174,9 +175,44 @@ static void io_worker_ref_put(struct io_wq *wq) complete(&wq->worker_done); } +static void io_worker_cancel_cb(struct io_worker *worker) +{ + struct io_wqe_acct *acct = io_wqe_get_acct(worker); + struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + atomic_dec(&acct->nr_running); + raw_spin_lock(&worker->wqe->lock); + acct->nr_workers--; + raw_spin_unlock(&worker->wqe->lock); + io_worker_ref_put(wq); + clear_bit_unlock(0, &worker->create_state); + io_worker_release(worker); +} + +static bool io_task_worker_match(struct callback_head *cb, void *data) +{ + struct io_worker *worker; + + if (cb->func != create_worker_cb) + return false; + worker = container_of(cb, struct io_worker, create_work); + return worker == data; +} + static void io_worker_exit(struct io_worker *worker) { struct io_wqe *wqe = worker->wqe; + struct io_wq *wq = wqe->wq; + + while (1) { + struct callback_head *cb = task_work_cancel_match(wq->task, + io_task_worker_match, worker); + + if (!cb) + break; + io_worker_cancel_cb(worker); + } io_worker_release(worker); wait_for_completion(&worker->ref_done); @@ -1148,17 +1184,9 @@ static void io_wq_exit_workers(struct io_wq *wq) while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) { struct io_worker *worker; - struct io_wqe_acct *acct; worker = container_of(cb, struct io_worker, create_work); - acct = io_wqe_get_acct(worker); - atomic_dec(&acct->nr_running); - raw_spin_lock(&worker->wqe->lock); - acct->nr_workers--; - raw_spin_unlock(&worker->wqe->lock); - io_worker_ref_put(wq); - clear_bit_unlock(0, &worker->create_state); - io_worker_release(worker); + io_worker_cancel_cb(worker); } rcu_read_lock();