Skip to content

Commit 98f97e2

Browse files
fdmananagregkh
authored andcommitted
btrfs: use refcount_t type for the extent buffer reference counter
[ Upstream commit b769777 ] Instead of using a bare atomic, use the refcount_t type, which despite being a structure that contains only an atomic, has an API that checks for underflows and other hazards. This doesn't change the size of the extent_buffer structure. This removes the need to do things like this: WARN_ON(atomic_read(&eb->refs) == 0); if (atomic_dec_and_test(&eb->refs)) { (...) } And do just: if (refcount_dec_and_test(&eb->refs)) { (...) } Since refcount_dec_and_test() already triggers a warning when we decrement a ref count that has a value of 0 (or below zero). Reviewed-by: Boris Burkov <boris@bur.io> Signed-off-by: Filipe Manana <fdmanana@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com> Stable-dep-of: ad580df ("btrfs: fix subpage deadlock in try_release_subpage_extent_buffer()") Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 71f50f3 commit 98f97e2

File tree

11 files changed

+42
-43
lines changed

11 files changed

+42
-43
lines changed

fs/btrfs/ctree.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
198198
* the inc_not_zero dance and if it doesn't work then
199199
* synchronize_rcu and try again.
200200
*/
201-
if (atomic_inc_not_zero(&eb->refs)) {
201+
if (refcount_inc_not_zero(&eb->refs)) {
202202
rcu_read_unlock();
203203
break;
204204
}
@@ -556,7 +556,7 @@ int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
556556
btrfs_abort_transaction(trans, ret);
557557
goto error_unlock_cow;
558558
}
559-
atomic_inc(&cow->refs);
559+
refcount_inc(&cow->refs);
560560
rcu_assign_pointer(root->node, cow);
561561

562562
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), buf,
@@ -1088,7 +1088,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
10881088
/* update the path */
10891089
if (left) {
10901090
if (btrfs_header_nritems(left) > orig_slot) {
1091-
atomic_inc(&left->refs);
1091+
refcount_inc(&left->refs);
10921092
/* left was locked after cow */
10931093
path->nodes[level] = left;
10941094
path->slots[level + 1] -= 1;
@@ -1692,7 +1692,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
16921692

16931693
if (p->search_commit_root) {
16941694
b = root->commit_root;
1695-
atomic_inc(&b->refs);
1695+
refcount_inc(&b->refs);
16961696
level = btrfs_header_level(b);
16971697
/*
16981698
* Ensure that all callers have set skip_locking when
@@ -2893,7 +2893,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
28932893
free_extent_buffer(old);
28942894

28952895
add_root_to_dirty_list(root);
2896-
atomic_inc(&c->refs);
2896+
refcount_inc(&c->refs);
28972897
path->nodes[level] = c;
28982898
path->locks[level] = BTRFS_WRITE_LOCK;
28992899
path->slots[level] = 0;
@@ -4450,7 +4450,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
44504450

44514451
root_sub_used_bytes(root);
44524452

4453-
atomic_inc(&leaf->refs);
4453+
refcount_inc(&leaf->refs);
44544454
ret = btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
44554455
free_extent_buffer_stale(leaf);
44564456
if (ret < 0)
@@ -4535,7 +4535,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
45354535
* for possible call to btrfs_del_ptr below
45364536
*/
45374537
slot = path->slots[1];
4538-
atomic_inc(&leaf->refs);
4538+
refcount_inc(&leaf->refs);
45394539
/*
45404540
* We want to be able to at least push one item to the
45414541
* left neighbour leaf, and that's the first item.

fs/btrfs/extent-tree.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6342,7 +6342,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
63426342

63436343
btrfs_assert_tree_write_locked(parent);
63446344
parent_level = btrfs_header_level(parent);
6345-
atomic_inc(&parent->refs);
6345+
refcount_inc(&parent->refs);
63466346
path->nodes[parent_level] = parent;
63476347
path->slots[parent_level] = btrfs_header_nritems(parent);
63486348

fs/btrfs/extent_io.c

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
7777
struct extent_buffer, leak_list);
7878
pr_err(
7979
"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
80-
eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
80+
eb->start, eb->len, refcount_read(&eb->refs), eb->bflags,
8181
btrfs_header_owner(eb));
8282
list_del(&eb->leak_list);
8383
WARN_ON_ONCE(1);
@@ -1961,7 +1961,7 @@ static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned l
19611961
if (!eb)
19621962
return NULL;
19631963

1964-
if (!atomic_inc_not_zero(&eb->refs)) {
1964+
if (!refcount_inc_not_zero(&eb->refs)) {
19651965
xas_reset(xas);
19661966
goto retry;
19671967
}
@@ -2012,7 +2012,7 @@ static struct extent_buffer *find_extent_buffer_nolock(
20122012

20132013
rcu_read_lock();
20142014
eb = xa_load(&fs_info->buffer_tree, index);
2015-
if (eb && !atomic_inc_not_zero(&eb->refs))
2015+
if (eb && !refcount_inc_not_zero(&eb->refs))
20162016
eb = NULL;
20172017
rcu_read_unlock();
20182018
return eb;
@@ -2842,7 +2842,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info
28422842
btrfs_leak_debug_add_eb(eb);
28432843

28442844
spin_lock_init(&eb->refs_lock);
2845-
atomic_set(&eb->refs, 1);
2845+
refcount_set(&eb->refs, 1);
28462846

28472847
ASSERT(eb->len <= BTRFS_MAX_METADATA_BLOCKSIZE);
28482848

@@ -2975,13 +2975,13 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
29752975
* once io is initiated, TREE_REF can no longer be cleared, so that is
29762976
* the moment at which any such race is best fixed.
29772977
*/
2978-
refs = atomic_read(&eb->refs);
2978+
refs = refcount_read(&eb->refs);
29792979
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
29802980
return;
29812981

29822982
spin_lock(&eb->refs_lock);
29832983
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2984-
atomic_inc(&eb->refs);
2984+
refcount_inc(&eb->refs);
29852985
spin_unlock(&eb->refs_lock);
29862986
}
29872987

@@ -3047,7 +3047,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
30473047
return ERR_PTR(ret);
30483048
}
30493049
if (exists) {
3050-
if (!atomic_inc_not_zero(&exists->refs)) {
3050+
if (!refcount_inc_not_zero(&exists->refs)) {
30513051
/* The extent buffer is being freed, retry. */
30523052
xa_unlock_irq(&fs_info->buffer_tree);
30533053
goto again;
@@ -3092,7 +3092,7 @@ static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
30923092
* just overwrite folio private.
30933093
*/
30943094
exists = folio_get_private(folio);
3095-
if (atomic_inc_not_zero(&exists->refs))
3095+
if (refcount_inc_not_zero(&exists->refs))
30963096
return exists;
30973097

30983098
WARN_ON(folio_test_dirty(folio));
@@ -3362,7 +3362,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
33623362
goto out;
33633363
}
33643364
if (existing_eb) {
3365-
if (!atomic_inc_not_zero(&existing_eb->refs)) {
3365+
if (!refcount_inc_not_zero(&existing_eb->refs)) {
33663366
xa_unlock_irq(&fs_info->buffer_tree);
33673367
goto again;
33683368
}
@@ -3391,7 +3391,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
33913391
return eb;
33923392

33933393
out:
3394-
WARN_ON(!atomic_dec_and_test(&eb->refs));
3394+
WARN_ON(!refcount_dec_and_test(&eb->refs));
33953395

33963396
/*
33973397
* Any attached folios need to be detached before we unlock them. This
@@ -3437,8 +3437,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
34373437
{
34383438
lockdep_assert_held(&eb->refs_lock);
34393439

3440-
WARN_ON(atomic_read(&eb->refs) == 0);
3441-
if (atomic_dec_and_test(&eb->refs)) {
3440+
if (refcount_dec_and_test(&eb->refs)) {
34423441
struct btrfs_fs_info *fs_info = eb->fs_info;
34433442

34443443
spin_unlock(&eb->refs_lock);
@@ -3484,7 +3483,7 @@ void free_extent_buffer(struct extent_buffer *eb)
34843483
if (!eb)
34853484
return;
34863485

3487-
refs = atomic_read(&eb->refs);
3486+
refs = refcount_read(&eb->refs);
34883487
while (1) {
34893488
if (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) {
34903489
if (refs == 1)
@@ -3494,16 +3493,16 @@ void free_extent_buffer(struct extent_buffer *eb)
34943493
}
34953494

34963495
/* Optimization to avoid locking eb->refs_lock. */
3497-
if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3496+
if (atomic_try_cmpxchg(&eb->refs.refs, &refs, refs - 1))
34983497
return;
34993498
}
35003499

35013500
spin_lock(&eb->refs_lock);
3502-
if (atomic_read(&eb->refs) == 2 &&
3501+
if (refcount_read(&eb->refs) == 2 &&
35033502
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
35043503
!extent_buffer_under_io(eb) &&
35053504
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3506-
atomic_dec(&eb->refs);
3505+
refcount_dec(&eb->refs);
35073506

35083507
/*
35093508
* I know this is terrible, but it's temporary until we stop tracking
@@ -3520,9 +3519,9 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
35203519
spin_lock(&eb->refs_lock);
35213520
set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
35223521

3523-
if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3522+
if (refcount_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
35243523
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3525-
atomic_dec(&eb->refs);
3524+
refcount_dec(&eb->refs);
35263525
release_extent_buffer(eb);
35273526
}
35283527

@@ -3580,7 +3579,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
35803579
btree_clear_folio_dirty_tag(folio);
35813580
folio_unlock(folio);
35823581
}
3583-
WARN_ON(atomic_read(&eb->refs) == 0);
3582+
WARN_ON(refcount_read(&eb->refs) == 0);
35843583
}
35853584

35863585
void set_extent_buffer_dirty(struct extent_buffer *eb)
@@ -3591,7 +3590,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
35913590

35923591
was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
35933592

3594-
WARN_ON(atomic_read(&eb->refs) == 0);
3593+
WARN_ON(refcount_read(&eb->refs) == 0);
35953594
WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
35963595
WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
35973596

@@ -3717,7 +3716,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
37173716

37183717
eb->read_mirror = 0;
37193718
check_buffer_tree_ref(eb);
3720-
atomic_inc(&eb->refs);
3719+
refcount_inc(&eb->refs);
37213720

37223721
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
37233722
REQ_OP_READ | REQ_META, eb->fs_info,
@@ -4312,7 +4311,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
43124311
* won't disappear out from under us.
43134312
*/
43144313
spin_lock(&eb->refs_lock);
4315-
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4314+
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
43164315
spin_unlock(&eb->refs_lock);
43174316
continue;
43184317
}
@@ -4378,7 +4377,7 @@ int try_release_extent_buffer(struct folio *folio)
43784377
* this page.
43794378
*/
43804379
spin_lock(&eb->refs_lock);
4381-
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4380+
if (refcount_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
43824381
spin_unlock(&eb->refs_lock);
43834382
spin_unlock(&folio->mapping->i_private_lock);
43844383
return 0;

fs/btrfs/extent_io.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ struct extent_buffer {
9898
void *addr;
9999

100100
spinlock_t refs_lock;
101-
atomic_t refs;
101+
refcount_t refs;
102102
int read_mirror;
103103
/* >= 0 if eb belongs to a log tree, -1 otherwise */
104104
s8 log_index;

fs/btrfs/fiemap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
320320
* the cost of allocating a new one.
321321
*/
322322
ASSERT(test_bit(EXTENT_BUFFER_UNMAPPED, &clone->bflags));
323-
atomic_inc(&clone->refs);
323+
refcount_inc(&clone->refs);
324324

325325
ret = btrfs_next_leaf(inode->root, path);
326326
if (ret != 0)

fs/btrfs/print-tree.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ static void print_eb_refs_lock(const struct extent_buffer *eb)
223223
{
224224
#ifdef CONFIG_BTRFS_DEBUG
225225
btrfs_info(eb->fs_info, "refs %u lock_owner %u current %u",
226-
atomic_read(&eb->refs), eb->lock_owner, current->pid);
226+
refcount_read(&eb->refs), eb->lock_owner, current->pid);
227227
#endif
228228
}
229229

fs/btrfs/qgroup.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2348,7 +2348,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
23482348
btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
23492349

23502350
/* For src_path */
2351-
atomic_inc(&src_eb->refs);
2351+
refcount_inc(&src_eb->refs);
23522352
src_path->nodes[root_level] = src_eb;
23532353
src_path->slots[root_level] = dst_path->slots[root_level];
23542354
src_path->locks[root_level] = 0;
@@ -2581,7 +2581,7 @@ static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
25812581
goto out;
25822582
}
25832583
/* For dst_path */
2584-
atomic_inc(&dst_eb->refs);
2584+
refcount_inc(&dst_eb->refs);
25852585
dst_path->nodes[level] = dst_eb;
25862586
dst_path->slots[level] = 0;
25872587
dst_path->locks[level] = 0;
@@ -2673,7 +2673,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
26732673
* walk back up the tree (adjusting slot pointers as we go)
26742674
* and restart the search process.
26752675
*/
2676-
atomic_inc(&root_eb->refs); /* For path */
2676+
refcount_inc(&root_eb->refs); /* For path */
26772677
path->nodes[root_level] = root_eb;
26782678
path->slots[root_level] = 0;
26792679
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */

fs/btrfs/relocation.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1535,7 +1535,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
15351535

15361536
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
15371537
level = btrfs_root_level(root_item);
1538-
atomic_inc(&reloc_root->node->refs);
1538+
refcount_inc(&reloc_root->node->refs);
15391539
path->nodes[level] = reloc_root->node;
15401540
path->slots[level] = 0;
15411541
} else {
@@ -4358,7 +4358,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
43584358
}
43594359

43604360
btrfs_backref_drop_node_buffer(node);
4361-
atomic_inc(&cow->refs);
4361+
refcount_inc(&cow->refs);
43624362
node->eb = cow;
43634363
node->new_bytenr = cow->start;
43644364

fs/btrfs/tree-log.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2747,7 +2747,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
27472747
level = btrfs_header_level(log->node);
27482748
orig_level = level;
27492749
path->nodes[level] = log->node;
2750-
atomic_inc(&log->node->refs);
2750+
refcount_inc(&log->node->refs);
27512751
path->slots[level] = 0;
27522752

27532753
while (1) {
@@ -3711,7 +3711,7 @@ static int clone_leaf(struct btrfs_path *path, struct btrfs_log_ctx *ctx)
37113711
* Add extra ref to scratch eb so that it is not freed when callers
37123712
* release the path, so we can reuse it later if needed.
37133713
*/
3714-
atomic_inc(&ctx->scratch_eb->refs);
3714+
refcount_inc(&ctx->scratch_eb->refs);
37153715

37163716
return 0;
37173717
}

fs/btrfs/zoned.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2491,7 +2491,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
24912491

24922492
/* For the work */
24932493
btrfs_get_block_group(bg);
2494-
atomic_inc(&eb->refs);
2494+
refcount_inc(&eb->refs);
24952495
bg->last_eb = eb;
24962496
INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
24972497
queue_work(system_unbound_wq, &bg->zone_finish_work);

0 commit comments

Comments
 (0)