Skip to content

Commit

Permalink
concurrent small allocation defeats large allocation
Browse files Browse the repository at this point in the history
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
  • Loading branch information
ahrens authored and pcd1193182 committed May 30, 2019
1 parent 1e724f4 commit 8e54b09
Showing 1 changed file with 44 additions and 12 deletions.
56 changes: 44 additions & 12 deletions module/zfs/metaslab.c
Original file line number Diff line number Diff line change
Expand Up @@ -3560,12 +3560,36 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
continue;
}

if (metaslab_activate(msp, allocator, activation_weight) != 0) {
msp->ms_selected_txg = txg;

boolean_t activated;
int activation_error =
metaslab_activate(msp, allocator, activation_weight);
if (activation_error == 0) {
activated = B_TRUE;
} else if (activation_error == EBUSY ||
activation_error == EEXIST) {
/*
* The activation failed because this metaslab was
* concurrently activated by another thread (EBUSY)
* or this allocator concurrently had another
* metaslab activated as primary (EEXIST). However,
* the metaslab was loaded, so we should continue
* trying to allocate from this metaslab, rather than
* going on to a worse metaslab.
*
* In this case, when we're done with this metaslab,
* we can not passivate it, because it was not
* activated (i.e. is not the active metaslab in the
* mg_primaries array).
*/
activated = B_FALSE;
} else {
/* i/o error while loading */
mutex_exit(&msp->ms_lock);
continue;
}

msp->ms_selected_txg = txg;
ASSERT(msp->ms_loaded);

/*
* Now that we have the lock, recheck to see if we should
Expand All @@ -3592,15 +3616,19 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING, allocator);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
} else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_DISABLED, allocator);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
if (activated) {
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
}
mutex_exit(&msp->ms_lock);
continue;
}
Expand All @@ -3610,7 +3638,8 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,

if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
metaslab_segment_may_passivate(msp);
if (activated)
metaslab_segment_may_passivate(msp);
break;
}
next:
Expand All @@ -3637,14 +3666,17 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
* currently available for allocation and is accurate
* even within a sync pass.
*/
uint64_t weight;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
uint64_t weight = metaslab_block_maxsize(msp);
weight = metaslab_block_maxsize(msp);
WEIGHT_SET_SPACEBASED(weight);
metaslab_passivate(msp, weight);
} else {
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
weight = metaslab_weight_from_range_tree(msp);
}
if (activated)
metaslab_passivate(msp, weight);
else
metaslab_group_sort(mg, msp, weight);

/*
* We have just failed an allocation attempt, check
Expand Down

0 comments on commit 8e54b09

Please sign in to comment.