@@ -77,7 +77,7 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
77
77
struct extent_buffer , leak_list );
78
78
pr_err (
79
79
"BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n" ,
80
- eb -> start , eb -> len , atomic_read (& eb -> refs ), eb -> bflags ,
80
+ eb -> start , eb -> len , refcount_read (& eb -> refs ), eb -> bflags ,
81
81
btrfs_header_owner (eb ));
82
82
list_del (& eb -> leak_list );
83
83
WARN_ON_ONCE (1 );
@@ -1961,7 +1961,7 @@ static inline struct extent_buffer *find_get_eb(struct xa_state *xas, unsigned l
1961
1961
if (!eb )
1962
1962
return NULL ;
1963
1963
1964
- if (!atomic_inc_not_zero (& eb -> refs )) {
1964
+ if (!refcount_inc_not_zero (& eb -> refs )) {
1965
1965
xas_reset (xas );
1966
1966
goto retry ;
1967
1967
}
@@ -2012,7 +2012,7 @@ static struct extent_buffer *find_extent_buffer_nolock(
2012
2012
2013
2013
rcu_read_lock ();
2014
2014
eb = xa_load (& fs_info -> buffer_tree , index );
2015
- if (eb && !atomic_inc_not_zero (& eb -> refs ))
2015
+ if (eb && !refcount_inc_not_zero (& eb -> refs ))
2016
2016
eb = NULL ;
2017
2017
rcu_read_unlock ();
2018
2018
return eb ;
@@ -2842,7 +2842,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct btrfs_fs_info *fs_info
2842
2842
btrfs_leak_debug_add_eb (eb );
2843
2843
2844
2844
spin_lock_init (& eb -> refs_lock );
2845
- atomic_set (& eb -> refs , 1 );
2845
+ refcount_set (& eb -> refs , 1 );
2846
2846
2847
2847
ASSERT (eb -> len <= BTRFS_MAX_METADATA_BLOCKSIZE );
2848
2848
@@ -2975,13 +2975,13 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
2975
2975
* once io is initiated, TREE_REF can no longer be cleared, so that is
2976
2976
* the moment at which any such race is best fixed.
2977
2977
*/
2978
- refs = atomic_read (& eb -> refs );
2978
+ refs = refcount_read (& eb -> refs );
2979
2979
if (refs >= 2 && test_bit (EXTENT_BUFFER_TREE_REF , & eb -> bflags ))
2980
2980
return ;
2981
2981
2982
2982
spin_lock (& eb -> refs_lock );
2983
2983
if (!test_and_set_bit (EXTENT_BUFFER_TREE_REF , & eb -> bflags ))
2984
- atomic_inc (& eb -> refs );
2984
+ refcount_inc (& eb -> refs );
2985
2985
spin_unlock (& eb -> refs_lock );
2986
2986
}
2987
2987
@@ -3047,7 +3047,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
3047
3047
return ERR_PTR (ret );
3048
3048
}
3049
3049
if (exists ) {
3050
- if (!atomic_inc_not_zero (& exists -> refs )) {
3050
+ if (!refcount_inc_not_zero (& exists -> refs )) {
3051
3051
/* The extent buffer is being freed, retry. */
3052
3052
xa_unlock_irq (& fs_info -> buffer_tree );
3053
3053
goto again ;
@@ -3092,7 +3092,7 @@ static struct extent_buffer *grab_extent_buffer(struct btrfs_fs_info *fs_info,
3092
3092
* just overwrite folio private.
3093
3093
*/
3094
3094
exists = folio_get_private (folio );
3095
- if (atomic_inc_not_zero (& exists -> refs ))
3095
+ if (refcount_inc_not_zero (& exists -> refs ))
3096
3096
return exists ;
3097
3097
3098
3098
WARN_ON (folio_test_dirty (folio ));
@@ -3362,7 +3362,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3362
3362
goto out ;
3363
3363
}
3364
3364
if (existing_eb ) {
3365
- if (!atomic_inc_not_zero (& existing_eb -> refs )) {
3365
+ if (!refcount_inc_not_zero (& existing_eb -> refs )) {
3366
3366
xa_unlock_irq (& fs_info -> buffer_tree );
3367
3367
goto again ;
3368
3368
}
@@ -3391,7 +3391,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3391
3391
return eb ;
3392
3392
3393
3393
out :
3394
- WARN_ON (!atomic_dec_and_test (& eb -> refs ));
3394
+ WARN_ON (!refcount_dec_and_test (& eb -> refs ));
3395
3395
3396
3396
/*
3397
3397
* Any attached folios need to be detached before we unlock them. This
@@ -3437,8 +3437,7 @@ static int release_extent_buffer(struct extent_buffer *eb)
3437
3437
{
3438
3438
lockdep_assert_held (& eb -> refs_lock );
3439
3439
3440
- WARN_ON (atomic_read (& eb -> refs ) == 0 );
3441
- if (atomic_dec_and_test (& eb -> refs )) {
3440
+ if (refcount_dec_and_test (& eb -> refs )) {
3442
3441
struct btrfs_fs_info * fs_info = eb -> fs_info ;
3443
3442
3444
3443
spin_unlock (& eb -> refs_lock );
@@ -3484,7 +3483,7 @@ void free_extent_buffer(struct extent_buffer *eb)
3484
3483
if (!eb )
3485
3484
return ;
3486
3485
3487
- refs = atomic_read (& eb -> refs );
3486
+ refs = refcount_read (& eb -> refs );
3488
3487
while (1 ) {
3489
3488
if (test_bit (EXTENT_BUFFER_UNMAPPED , & eb -> bflags )) {
3490
3489
if (refs == 1 )
@@ -3494,16 +3493,16 @@ void free_extent_buffer(struct extent_buffer *eb)
3494
3493
}
3495
3494
3496
3495
/* Optimization to avoid locking eb->refs_lock. */
3497
- if (atomic_try_cmpxchg (& eb -> refs , & refs , refs - 1 ))
3496
+ if (atomic_try_cmpxchg (& eb -> refs . refs , & refs , refs - 1 ))
3498
3497
return ;
3499
3498
}
3500
3499
3501
3500
spin_lock (& eb -> refs_lock );
3502
- if (atomic_read (& eb -> refs ) == 2 &&
3501
+ if (refcount_read (& eb -> refs ) == 2 &&
3503
3502
test_bit (EXTENT_BUFFER_STALE , & eb -> bflags ) &&
3504
3503
!extent_buffer_under_io (eb ) &&
3505
3504
test_and_clear_bit (EXTENT_BUFFER_TREE_REF , & eb -> bflags ))
3506
- atomic_dec (& eb -> refs );
3505
+ refcount_dec (& eb -> refs );
3507
3506
3508
3507
/*
3509
3508
* I know this is terrible, but it's temporary until we stop tracking
@@ -3520,9 +3519,9 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
3520
3519
spin_lock (& eb -> refs_lock );
3521
3520
set_bit (EXTENT_BUFFER_STALE , & eb -> bflags );
3522
3521
3523
- if (atomic_read (& eb -> refs ) == 2 && !extent_buffer_under_io (eb ) &&
3522
+ if (refcount_read (& eb -> refs ) == 2 && !extent_buffer_under_io (eb ) &&
3524
3523
test_and_clear_bit (EXTENT_BUFFER_TREE_REF , & eb -> bflags ))
3525
- atomic_dec (& eb -> refs );
3524
+ refcount_dec (& eb -> refs );
3526
3525
release_extent_buffer (eb );
3527
3526
}
3528
3527
@@ -3580,7 +3579,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3580
3579
btree_clear_folio_dirty_tag (folio );
3581
3580
folio_unlock (folio );
3582
3581
}
3583
- WARN_ON (atomic_read (& eb -> refs ) == 0 );
3582
+ WARN_ON (refcount_read (& eb -> refs ) == 0 );
3584
3583
}
3585
3584
3586
3585
void set_extent_buffer_dirty (struct extent_buffer * eb )
@@ -3591,7 +3590,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
3591
3590
3592
3591
was_dirty = test_and_set_bit (EXTENT_BUFFER_DIRTY , & eb -> bflags );
3593
3592
3594
- WARN_ON (atomic_read (& eb -> refs ) == 0 );
3593
+ WARN_ON (refcount_read (& eb -> refs ) == 0 );
3595
3594
WARN_ON (!test_bit (EXTENT_BUFFER_TREE_REF , & eb -> bflags ));
3596
3595
WARN_ON (test_bit (EXTENT_BUFFER_ZONED_ZEROOUT , & eb -> bflags ));
3597
3596
@@ -3717,7 +3716,7 @@ int read_extent_buffer_pages_nowait(struct extent_buffer *eb, int mirror_num,
3717
3716
3718
3717
eb -> read_mirror = 0 ;
3719
3718
check_buffer_tree_ref (eb );
3720
- atomic_inc (& eb -> refs );
3719
+ refcount_inc (& eb -> refs );
3721
3720
3722
3721
bbio = btrfs_bio_alloc (INLINE_EXTENT_BUFFER_PAGES ,
3723
3722
REQ_OP_READ | REQ_META , eb -> fs_info ,
@@ -4312,7 +4311,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
4312
4311
* won't disappear out from under us.
4313
4312
*/
4314
4313
spin_lock (& eb -> refs_lock );
4315
- if (atomic_read (& eb -> refs ) != 1 || extent_buffer_under_io (eb )) {
4314
+ if (refcount_read (& eb -> refs ) != 1 || extent_buffer_under_io (eb )) {
4316
4315
spin_unlock (& eb -> refs_lock );
4317
4316
continue ;
4318
4317
}
@@ -4378,7 +4377,7 @@ int try_release_extent_buffer(struct folio *folio)
4378
4377
* this page.
4379
4378
*/
4380
4379
spin_lock (& eb -> refs_lock );
4381
- if (atomic_read (& eb -> refs ) != 1 || extent_buffer_under_io (eb )) {
4380
+ if (refcount_read (& eb -> refs ) != 1 || extent_buffer_under_io (eb )) {
4382
4381
spin_unlock (& eb -> refs_lock );
4383
4382
spin_unlock (& folio -> mapping -> i_private_lock );
4384
4383
return 0 ;
0 commit comments