btrfs: refactor __btrfs_run_delayed_refs loop
Refactor the delayed refs loop by using the newly introduced btrfs_run_delayed_refs_for_head function. This greatly simplifies __btrfs_run_delayed_refs and makes it more obvious what is happening. We now have 1 loop which iterates the existing delayed_heads and then each selected ref head is processed by the new helper. All existing semantics of the code are preserved so no functional changes. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
e726138676
commit
0110a4c434
@ -2548,6 +2548,9 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
|
|||||||
|
|
||||||
delayed_refs = &trans->transaction->delayed_refs;
|
delayed_refs = &trans->transaction->delayed_refs;
|
||||||
|
|
||||||
|
lockdep_assert_held(&locked_ref->mutex);
|
||||||
|
lockdep_assert_held(&locked_ref->lock);
|
||||||
|
|
||||||
while ((ref = select_delayed_ref(locked_ref))) {
|
while ((ref = select_delayed_ref(locked_ref))) {
|
||||||
if (ref->seq &&
|
if (ref->seq &&
|
||||||
btrfs_check_delayed_seq(fs_info, ref->seq)) {
|
btrfs_check_delayed_seq(fs_info, ref->seq)) {
|
||||||
@ -2621,31 +2624,25 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||||||
{
|
{
|
||||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||||
struct btrfs_delayed_ref_root *delayed_refs;
|
struct btrfs_delayed_ref_root *delayed_refs;
|
||||||
struct btrfs_delayed_ref_node *ref;
|
|
||||||
struct btrfs_delayed_ref_head *locked_ref = NULL;
|
struct btrfs_delayed_ref_head *locked_ref = NULL;
|
||||||
struct btrfs_delayed_extent_op *extent_op;
|
|
||||||
ktime_t start = ktime_get();
|
ktime_t start = ktime_get();
|
||||||
int ret;
|
int ret;
|
||||||
unsigned long count = 0;
|
unsigned long count = 0;
|
||||||
unsigned long actual_count = 0;
|
unsigned long actual_count = 0;
|
||||||
int must_insert_reserved = 0;
|
|
||||||
|
|
||||||
delayed_refs = &trans->transaction->delayed_refs;
|
delayed_refs = &trans->transaction->delayed_refs;
|
||||||
while (1) {
|
do {
|
||||||
if (!locked_ref) {
|
if (!locked_ref) {
|
||||||
if (count >= nr)
|
|
||||||
break;
|
|
||||||
|
|
||||||
locked_ref = btrfs_obtain_ref_head(trans);
|
locked_ref = btrfs_obtain_ref_head(trans);
|
||||||
if (!locked_ref)
|
if (IS_ERR_OR_NULL(locked_ref)) {
|
||||||
break;
|
if (PTR_ERR(locked_ref) == -EAGAIN) {
|
||||||
else if (PTR_ERR(locked_ref) == -EAGAIN) {
|
continue;
|
||||||
locked_ref = NULL;
|
} else {
|
||||||
count++;
|
break;
|
||||||
continue;
|
}
|
||||||
}
|
}
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to try and merge add/drops of the same ref since we
|
* We need to try and merge add/drops of the same ref since we
|
||||||
* can run into issues with relocate dropping the implicit ref
|
* can run into issues with relocate dropping the implicit ref
|
||||||
@ -2661,23 +2658,19 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||||||
spin_lock(&locked_ref->lock);
|
spin_lock(&locked_ref->lock);
|
||||||
btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
|
btrfs_merge_delayed_refs(trans, delayed_refs, locked_ref);
|
||||||
|
|
||||||
ref = select_delayed_ref(locked_ref);
|
ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
|
||||||
|
&actual_count);
|
||||||
if (ref && ref->seq &&
|
if (ret < 0 && ret != -EAGAIN) {
|
||||||
btrfs_check_delayed_seq(fs_info, ref->seq)) {
|
/*
|
||||||
spin_unlock(&locked_ref->lock);
|
* Error, btrfs_run_delayed_refs_for_head already
|
||||||
unselect_delayed_ref_head(delayed_refs, locked_ref);
|
* unlocked everything so just bail out
|
||||||
locked_ref = NULL;
|
*/
|
||||||
cond_resched();
|
return ret;
|
||||||
count++;
|
} else if (!ret) {
|
||||||
continue;
|
/*
|
||||||
}
|
* Success, perform the usual cleanup of a processed
|
||||||
|
* head
|
||||||
/*
|
*/
|
||||||
* We're done processing refs in this ref_head, clean everything
|
|
||||||
* up and move on to the next ref_head.
|
|
||||||
*/
|
|
||||||
if (!ref) {
|
|
||||||
ret = cleanup_ref_head(trans, locked_ref);
|
ret = cleanup_ref_head(trans, locked_ref);
|
||||||
if (ret > 0 ) {
|
if (ret > 0 ) {
|
||||||
/* We dropped our lock, we need to loop. */
|
/* We dropped our lock, we need to loop. */
|
||||||
@ -2686,61 +2679,16 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||||||
} else if (ret) {
|
} else if (ret) {
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
locked_ref = NULL;
|
|
||||||
count++;
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
actual_count++;
|
|
||||||
ref->in_tree = 0;
|
|
||||||
rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
|
|
||||||
RB_CLEAR_NODE(&ref->ref_node);
|
|
||||||
if (!list_empty(&ref->add_list))
|
|
||||||
list_del(&ref->add_list);
|
|
||||||
/*
|
|
||||||
* When we play the delayed ref, also correct the ref_mod on
|
|
||||||
* head
|
|
||||||
*/
|
|
||||||
switch (ref->action) {
|
|
||||||
case BTRFS_ADD_DELAYED_REF:
|
|
||||||
case BTRFS_ADD_DELAYED_EXTENT:
|
|
||||||
locked_ref->ref_mod -= ref->ref_mod;
|
|
||||||
break;
|
|
||||||
case BTRFS_DROP_DELAYED_REF:
|
|
||||||
locked_ref->ref_mod += ref->ref_mod;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
WARN_ON(1);
|
|
||||||
}
|
|
||||||
atomic_dec(&delayed_refs->num_entries);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Record the must-insert_reserved flag before we drop the spin
|
* Either success case or btrfs_run_delayed_refs_for_head
|
||||||
* lock.
|
* returned -EAGAIN, meaning we need to select another head
|
||||||
*/
|
*/
|
||||||
must_insert_reserved = locked_ref->must_insert_reserved;
|
|
||||||
locked_ref->must_insert_reserved = 0;
|
|
||||||
|
|
||||||
extent_op = locked_ref->extent_op;
|
locked_ref = NULL;
|
||||||
locked_ref->extent_op = NULL;
|
|
||||||
spin_unlock(&locked_ref->lock);
|
|
||||||
|
|
||||||
ret = run_one_delayed_ref(trans, ref, extent_op,
|
|
||||||
must_insert_reserved);
|
|
||||||
|
|
||||||
btrfs_free_delayed_extent_op(extent_op);
|
|
||||||
if (ret) {
|
|
||||||
unselect_delayed_ref_head(delayed_refs, locked_ref);
|
|
||||||
btrfs_put_delayed_ref(ref);
|
|
||||||
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
|
|
||||||
ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
btrfs_put_delayed_ref(ref);
|
|
||||||
count++;
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
} while ((nr != -1 && count < nr) || locked_ref);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't want to include ref heads since we can have empty ref heads
|
* We don't want to include ref heads since we can have empty ref heads
|
||||||
|
Loading…
Reference in New Issue
Block a user