Home Home > GIT Browse > SLE12-SP3
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2019-05-17 15:37:54 +0800
committerQu Wenruo <wqu@suse.com>2019-05-17 15:37:54 +0800
commitefd3e755b58682c373fa103f002f76bc72038f9f (patch)
treed209b5e645cd40a17ed36a8e32998c3550940cd8
parent2185732a267ede1d63d1a67e8ca0e9d5ab662fab (diff)
- btrfs: move all ref head cleanup to the helper function
(bsc#1134813). - Refresh patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch. - Refresh patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch. suse-commit: b27bb39365ff35c8308957af35067359c060f3e8
-rw-r--r--fs/btrfs/extent-tree.c151
1 files changed, 66 insertions, 85 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d0ccd6ae05f3..eeab5e03c343 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2416,48 +2416,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
return 0;
}
- if (btrfs_delayed_ref_is_head(node)) {
- struct btrfs_delayed_ref_head *head;
- /*
- * we've hit the end of the chain and we were supposed
- * to insert this extent into the tree. But, it got
- * deleted before we ever needed to insert it, so all
- * we have to do is clean up the accounting
- */
- BUG_ON(extent_op);
- head = btrfs_delayed_node_to_head(node);
- trace_run_delayed_ref_head(root->fs_info, node, head,
- node->action);
-
- if (head->total_ref_mod < 0) {
- struct btrfs_space_info *space_info;
- u64 flags;
-
- if (head->is_data)
- flags = BTRFS_BLOCK_GROUP_DATA;
- else if (head->is_system)
- flags = BTRFS_BLOCK_GROUP_SYSTEM;
- else
- flags = BTRFS_BLOCK_GROUP_METADATA;
- space_info = __find_space_info(root->fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned,
- -node->num_bytes);
- }
-
- if (insert_reserved) {
- btrfs_pin_extent(root, node->bytenr,
- node->num_bytes, 1);
- if (head->is_data) {
- ret = btrfs_del_csums(trans, root,
- node->bytenr,
- node->num_bytes);
- }
- }
-
- return ret;
- }
-
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
ret = run_delayed_tree_ref(trans, root, node, extent_op,
@@ -2560,6 +2518,45 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
delayed_refs->num_heads--;
rb_erase(&head->href_node, &delayed_refs->href_root);
spin_unlock(&delayed_refs->lock);
+ spin_unlock(&head->lock);
+ atomic_dec(&delayed_refs->num_entries);
+
+ trace_run_delayed_ref_head(fs_info, &head->node, head,
+ head->node.action);
+
+ if (head->total_ref_mod < 0) {
+ struct btrfs_space_info *space_info;
+ u64 flags;
+
+ if (head->is_data)
+ flags = BTRFS_BLOCK_GROUP_DATA;
+ else if (head->is_system)
+ flags = BTRFS_BLOCK_GROUP_SYSTEM;
+ else
+ flags = BTRFS_BLOCK_GROUP_METADATA;
+ space_info = __find_space_info(fs_info, flags);
+ ASSERT(space_info);
+ percpu_counter_add(&space_info->total_bytes_pinned,
+ -head->node.num_bytes);
+ if (head->is_data) {
+ spin_lock(&delayed_refs->lock);
+ delayed_refs->pending_csums -= head->node.num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ }
+ }
+
+ if (head->must_insert_reserved) {
+ btrfs_pin_extent(fs_info->extent_root, head->node.bytenr,
+ head->node.num_bytes, 1);
+ if (head->is_data) {
+ ret = btrfs_del_csums(trans, fs_info->csum_root,
+ head->node.bytenr,
+ head->node.num_bytes);
+ }
+ }
+
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref(&head->node);
return 0;
}
@@ -2644,6 +2641,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
continue;
}
+ /*
+ * We're done processing refs in this ref_head, clean everything
+ * up and move on to the next ref_head.
+ */
if (!ref) {
ret = cleanup_ref_head(trans, fs_info, locked_ref);
if (ret > 0 ) {
@@ -2653,34 +2654,30 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
} else if (ret) {
return ret;
}
+ locked_ref = NULL;
+ count++;
+ continue;
+ }
- /*
- * All delayed refs have been processed, Go ahead and
- * send the head node to run_one_delayed_ref, so that
- * any accounting fixes can happen
- */
- ref = &locked_ref->node;
- } else {
- actual_count++;
- ref->in_tree = 0;
- list_del(&ref->list);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
- /*
- * when we play the delayed ref, also correct the
- * ref_mod on head
- */
- switch (ref->action) {
- case BTRFS_ADD_DELAYED_REF:
- case BTRFS_ADD_DELAYED_EXTENT:
- locked_ref->node.ref_mod -= ref->ref_mod;
- break;
- case BTRFS_DROP_DELAYED_REF:
- locked_ref->node.ref_mod += ref->ref_mod;
- break;
- default:
- WARN_ON(1);
- }
+ actual_count++;
+ ref->in_tree = 0;
+ list_del(&ref->list);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ /*
+ * When we play the delayed ref, also correct the ref_mod on
+ * head
+ */
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ locked_ref->node.ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ locked_ref->node.ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
}
atomic_dec(&delayed_refs->num_entries);
@@ -2707,22 +2704,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
return ret;
}
- /*
- * If this node is a head, that means all the refs in this head
- * have been dealt with, and we will pick the next head to deal
- * with, so we must unlock the head and drop it from the cluster
- * list before we release it.
- */
- if (btrfs_delayed_ref_is_head(ref)) {
- if (locked_ref->is_data &&
- locked_ref->total_ref_mod < 0) {
- spin_lock(&delayed_refs->lock);
- delayed_refs->pending_csums -= ref->num_bytes;
- spin_unlock(&delayed_refs->lock);
- }
- btrfs_delayed_ref_unlock(locked_ref);
- locked_ref = NULL;
- }
btrfs_put_delayed_ref(ref);
count++;
cond_resched();