Skip to content

Commit

Permalink
Merge branch 'for-next-next-v6.12-20241111' into for-next-20241111
Browse files Browse the repository at this point in the history
  • Loading branch information
kdave committed Nov 11, 2024
2 parents 2d5404c + 0dd5356 commit 5f1be03
Show file tree
Hide file tree
Showing 67 changed files with 2,460 additions and 1,422 deletions.
26 changes: 26 additions & 0 deletions fs/btrfs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,32 @@ config BTRFS_ASSERT

If unsure, say N.

config BTRFS_EXPERIMENTAL
bool "Btrfs experimental features"
depends on BTRFS_FS
default n
help
Enable experimental features. These features may not be stable enough
for end users. This is meant for btrfs developers or users who wish
to test the functionality and report problems.

Current list:

- extent map shrinker - performance problems with too frequent shrinks

- send stream protocol v3 - fs-verity support

- checksum offload mode - sysfs knob to affect when checksums are
calculated (at IO time, or in a thread)

- raid-stripe-tree - additional mapping of extents to devices to
support RAID1* profiles on zoned devices,
RAID56 not yet supported

- extent tree v2 - complex rework of extent tracking

If unsure, say N.

config BTRFS_FS_REF_VERIFY
bool "Btrfs with the ref verify tool compiled in"
depends on BTRFS_FS
Expand Down
3 changes: 2 additions & 1 deletion fs/btrfs/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,5 @@ btrfs-$(CONFIG_FS_VERITY) += verity.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
tests/extent-buffer-tests.o tests/btrfs-tests.o \
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
tests/free-space-tree-tests.o tests/extent-map-tests.o
tests/free-space-tree-tests.o tests/extent-map-tests.o \
tests/raid-stripe-tree-tests.o
3 changes: 2 additions & 1 deletion fs/btrfs/backref.c
Original file line number Diff line number Diff line change
Expand Up @@ -1442,7 +1442,8 @@ static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
*/
delayed_refs = &ctx->trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
ctx->bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
refcount_inc(&head->refs);
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,7 @@ static bool should_async_write(struct btrfs_bio *bbio)
{
bool auto_csum_mode = true;

#ifdef CONFIG_BTRFS_DEBUG
#ifdef CONFIG_BTRFS_EXPERIMENTAL
struct btrfs_fs_devices *fs_devices = bbio->fs_info->fs_devices;
enum btrfs_offload_csum_mode csum_mode = READ_ONCE(fs_devices->offload_csum_mode);

Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/block-group.c
Original file line number Diff line number Diff line change
Expand Up @@ -2797,7 +2797,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
* uncompressed data size, because the compression is only done
* when writeback triggered and we don't know how much space we
* are actually going to need, so we reserve the uncompressed
* size because the data may be uncompressible in the worst case.
* size because the data may be incompressible in the worst case.
*/
if (ret == 0) {
bool used;
Expand Down
15 changes: 10 additions & 5 deletions fs/btrfs/btrfs_inode.h
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,6 @@ void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state
struct extent_state *other);
void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
struct extent_state *orig, u64 split);
void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
void btrfs_evict_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
Expand Down Expand Up @@ -613,11 +612,17 @@ int btrfs_writepage_cow_fixup(struct folio *folio);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
u64 disk_io_size,
struct page **pages);
u64 disk_bytenr, u64 disk_io_size,
struct page **pages, void *uring_ctx);
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
struct btrfs_ioctl_encoded_io_args *encoded);
struct btrfs_ioctl_encoded_io_args *encoded,
struct extent_state **cached_state,
u64 *disk_bytenr, u64 *disk_io_size);
ssize_t btrfs_encoded_read_regular(struct kiocb *iocb, struct iov_iter *iter,
u64 start, u64 lockend,
struct extent_state **cached_state,
u64 disk_bytenr, u64 disk_io_size,
size_t count, bool compressed, bool *unlocked);
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
const struct btrfs_ioctl_encoded_io_args *encoded);

Expand Down
14 changes: 8 additions & 6 deletions fs/btrfs/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (pg_index > end_index)
break;

folio = __filemap_get_folio(mapping, pg_index, 0, 0);
folio = filemap_get_folio(mapping, pg_index);
if (!IS_ERR(folio)) {
u64 folio_sz = folio_size(folio);
u64 offset = offset_in_folio(folio, cur);
Expand Down Expand Up @@ -545,8 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
btrfs_subpage_start_reader(fs_info, folio, cur,
add_size);
btrfs_folio_set_lock(fs_info, folio, cur, add_size);
folio_put(folio);
cur += add_size;
}
Expand Down Expand Up @@ -702,7 +701,7 @@ static void free_heuristic_ws(struct list_head *ws)
kfree(workspace);
}

static struct list_head *alloc_heuristic_ws(unsigned int level)
static struct list_head *alloc_heuristic_ws(void)
{
struct heuristic_ws *ws;

Expand Down Expand Up @@ -744,9 +743,9 @@ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
static struct list_head *alloc_workspace(int type, unsigned int level)
{
switch (type) {
case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws();
case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace();
case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
default:
/*
Expand Down Expand Up @@ -1030,13 +1029,16 @@ int btrfs_compress_folios(unsigned int type_level, struct address_space *mapping
{
int type = btrfs_compress_type(type_level);
int level = btrfs_compress_level(type_level);
const unsigned long orig_len = *total_out;
struct list_head *workspace;
int ret;

level = btrfs_compress_set_level(type, level);
workspace = get_workspace(type, level);
ret = compression_compress_pages(type, workspace, mapping, start, folios,
out_folios, total_in, total_out);
/* The total read-in bytes should be no larger than the input. */
ASSERT(*total_in <= orig_len);
put_workspace(type, workspace);
return ret;
}
Expand Down
2 changes: 1 addition & 1 deletion fs/btrfs/compression.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, const u8 *data_in,
struct folio *dest_folio, unsigned long dest_pgoff, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
struct list_head *lzo_alloc_workspace(void);
void lzo_free_workspace(struct list_head *ws);

int zstd_compress_folios(struct list_head *ws, struct address_space *mapping,
Expand Down
132 changes: 83 additions & 49 deletions fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1508,26 +1508,26 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
*/
static int
read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
struct extent_buffer **eb_ret, int level, int slot,
struct extent_buffer **eb_ret, int slot,
const struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_tree_parent_check check = { 0 };
u64 blocknr;
u64 gen;
struct extent_buffer *tmp;
int ret;
struct extent_buffer *tmp = NULL;
int ret = 0;
int parent_level;
bool unlock_up;
int err;
bool read_tmp = false;
bool tmp_locked = false;
bool path_released = false;

unlock_up = ((level + 1 < BTRFS_MAX_LEVEL) && p->locks[level + 1]);
blocknr = btrfs_node_blockptr(*eb_ret, slot);
gen = btrfs_node_ptr_generation(*eb_ret, slot);
parent_level = btrfs_header_level(*eb_ret);
btrfs_node_key_to_cpu(*eb_ret, &check.first_key, slot);
check.has_first_key = true;
check.level = parent_level - 1;
check.transid = gen;
check.transid = btrfs_node_ptr_generation(*eb_ret, slot);
check.owner_root = btrfs_root_id(root);

/*
Expand All @@ -1540,79 +1540,115 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
tmp = find_extent_buffer(fs_info, blocknr);
if (tmp) {
if (p->reada == READA_FORWARD_ALWAYS)
reada_for_search(fs_info, p, level, slot, key->objectid);
reada_for_search(fs_info, p, parent_level, slot, key->objectid);

/* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
if (btrfs_buffer_uptodate(tmp, check.transid, 1) > 0) {
/*
* Do extra check for first_key, eb can be stale due to
* being cached, read from scrub, or have multiple
* parents (shared tree blocks).
*/
if (btrfs_verify_level_key(tmp,
parent_level - 1, &check.first_key, gen)) {
free_extent_buffer(tmp);
return -EUCLEAN;
if (btrfs_verify_level_key(tmp, &check)) {
ret = -EUCLEAN;
goto out;
}
*eb_ret = tmp;
return 0;
tmp = NULL;
ret = 0;
goto out;
}

if (p->nowait) {
free_extent_buffer(tmp);
return -EAGAIN;
ret = -EAGAIN;
goto out;
}

if (unlock_up)
btrfs_unlock_up_safe(p, level + 1);

/* now we're allowed to do a blocking uptodate check */
ret = btrfs_read_extent_buffer(tmp, &check);
if (ret) {
free_extent_buffer(tmp);
if (!p->skip_locking) {
btrfs_unlock_up_safe(p, parent_level + 1);
tmp_locked = true;
btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
return ret;
ret = -EAGAIN;
path_released = true;
}

if (unlock_up)
ret = -EAGAIN;
/* Now we're allowed to do a blocking uptodate check. */
err = btrfs_read_extent_buffer(tmp, &check);
if (err) {
ret = err;
goto out;
}

if (ret == 0) {
ASSERT(!tmp_locked);
*eb_ret = tmp;
tmp = NULL;
}
goto out;
} else if (p->nowait) {
return -EAGAIN;
ret = -EAGAIN;
goto out;
}

if (unlock_up) {
btrfs_unlock_up_safe(p, level + 1);
if (!p->skip_locking) {
btrfs_unlock_up_safe(p, parent_level + 1);
ret = -EAGAIN;
} else {
ret = 0;
}

if (p->reada != READA_NONE)
reada_for_search(fs_info, p, level, slot, key->objectid);
reada_for_search(fs_info, p, parent_level, slot, key->objectid);

tmp = read_tree_block(fs_info, blocknr, &check);
tmp = btrfs_find_create_tree_block(fs_info, blocknr, check.owner_root, check.level);
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
tmp = NULL;
goto out;
}
read_tmp = true;

if (!p->skip_locking) {
ASSERT(ret == -EAGAIN);
tmp_locked = true;
btrfs_tree_read_lock(tmp);
btrfs_release_path(p);
return PTR_ERR(tmp);
path_released = true;
}

/* Now we're allowed to do a blocking uptodate check. */
err = btrfs_read_extent_buffer(tmp, &check);
if (err) {
ret = err;
goto out;
}

/*
* If the read above didn't mark this buffer up to date,
* it will never end up being up to date. Set ret to EIO now
* and give up so that our caller doesn't loop forever
* on our EAGAINs.
*/
if (!extent_buffer_uptodate(tmp))
if (!extent_buffer_uptodate(tmp)) {
ret = -EIO;
goto out;
}

out:
if (ret == 0) {
ASSERT(!tmp_locked);
*eb_ret = tmp;
} else {
free_extent_buffer(tmp);
btrfs_release_path(p);
tmp = NULL;
}
out:
if (tmp) {
if (tmp_locked)
btrfs_tree_read_unlock(tmp);
if (read_tmp && ret && ret != -EAGAIN)
free_extent_buffer_stale(tmp);
else
free_extent_buffer(tmp);
}
if (ret && !path_released)
btrfs_release_path(p);

return ret;
}
Expand Down Expand Up @@ -2197,8 +2233,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
goto done;
}

err = read_block_for_search(root, p, &b, level, slot, key);
if (err == -EAGAIN)
err = read_block_for_search(root, p, &b, slot, key);
if (err == -EAGAIN && !p->nowait)
goto again;
if (err) {
ret = err;
Expand Down Expand Up @@ -2324,8 +2360,8 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
goto done;
}

err = read_block_for_search(root, p, &b, level, slot, key);
if (err == -EAGAIN)
err = read_block_for_search(root, p, &b, slot, key);
if (err == -EAGAIN && !p->nowait)
goto again;
if (err) {
ret = err;
Expand All @@ -2334,7 +2370,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,

level = btrfs_header_level(b);
btrfs_tree_read_lock(b);
b = btrfs_tree_mod_log_rewind(fs_info, p, b, time_seq);
b = btrfs_tree_mod_log_rewind(fs_info, b, time_seq);
if (!b) {
ret = -ENOMEM;
goto done;
Expand Down Expand Up @@ -4930,8 +4966,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
}

next = c;
ret = read_block_for_search(root, path, &next, level,
slot, &key);
ret = read_block_for_search(root, path, &next, slot, &key);
if (ret == -EAGAIN && !path->nowait)
goto again;

Expand Down Expand Up @@ -4974,8 +5009,7 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
if (!level)
break;

ret = read_block_for_search(root, path, &next, level,
0, &key);
ret = read_block_for_search(root, path, &next, 0, &key);
if (ret == -EAGAIN && !path->nowait)
goto again;

Expand Down
Loading

0 comments on commit 5f1be03

Please sign in to comment.