Home Home > GIT Browse > SLE15-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-05-14 15:17:04 +0200
committerTakashi Iwai <tiwai@suse.de>2019-05-14 15:17:04 +0200
commit090f833e35727e6fe426a67792a0c12bcbe9a374 (patch)
treef61ced130bd736801e8c2896f5ea0cd53822c8a6
parent3b2528e70f15aa88596725731caf3e8aa6737e0f (diff)
parenta52dcc3ad071e26cd7f845946242c577b9e54af3 (diff)
Merge branch 'users/wqu/SLE15/for-next' into SLE15
Pull btrfs fixes from Qu Wenruo
-rw-r--r--patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch21
-rw-r--r--patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch85
-rw-r--r--patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch404
-rw-r--r--patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch75
-rw-r--r--patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch73
-rw-r--r--patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch128
-rw-r--r--patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch125
-rw-r--r--patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch102
-rw-r--r--patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch102
-rw-r--r--patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch159
-rw-r--r--patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch10
-rw-r--r--patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch16
-rw-r--r--patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch88
-rw-r--r--series.conf9
14 files changed, 1331 insertions, 66 deletions
diff --git a/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch b/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
index ef91535590..ca0ac6e584 100644
--- a/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
+++ b/patches.fixes/0001-btrfs-qgroup-Move-reserved-data-accounting-from-btrf.patch
@@ -102,17 +102,17 @@ Fixes: f64d5ca86821 ("btrfs: delayed_ref: Add new function to record reserved sp
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
---
- fs/btrfs/delayed-ref.c | 12 ++++--------
+ fs/btrfs/delayed-ref.c | 14 ++++----------
fs/btrfs/delayed-ref.h | 11 -----------
fs/btrfs/extent-tree.c | 3 ---
fs/btrfs/qgroup.c | 19 +++++++++++++++----
fs/btrfs/qgroup.h | 18 +++++++++++-------
include/trace/events/btrfs.h | 29 -----------------------------
- 6 files changed, 30 insertions(+), 62 deletions(-)
+ 6 files changed, 30 insertions(+), 64 deletions(-)
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -601,16 +601,14 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -590,15 +590,13 @@ static void init_delayed_ref_head(struct
RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
@@ -121,7 +121,6 @@ Signed-off-by: David Sterba <dsterba@suse.com>
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
- /* Record qgroup extent info if provided */
if (qrecord) {
if (ref_root && reserved) {
- head_ref->qgroup_ref_root = ref_root;
@@ -131,16 +130,18 @@ Signed-off-by: David Sterba <dsterba@suse.com>
}
qrecord->bytenr = bytenr;
-@@ -629,8 +627,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -641,10 +639,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
-- WARN_ON(ref_root && reserved && existing->qgroup_ref_root
+- WARN_ON(qrecord && head_ref->qgroup_ref_root
+- && head_ref->qgroup_reserved
+- && existing->qgroup_ref_root
- && existing->qgroup_reserved);
update_existing_head_ref(delayed_refs, existing, head_ref,
old_ref_mod);
/*
-@@ -797,7 +793,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -764,7 +758,7 @@ int btrfs_add_delayed_tree_ref(struct bt
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
@@ -149,7 +150,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!record)
goto free_head_ref;
}
-@@ -860,7 +856,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -848,7 +842,7 @@ int btrfs_add_delayed_data_ref(struct bt
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
is_fstree(ref_root)) {
@@ -160,7 +161,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
kmem_cache_free(btrfs_delayed_ref_head_cachep,
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -116,17 +116,6 @@ struct btrfs_delayed_ref_head {
+@@ -115,17 +115,6 @@ struct btrfs_delayed_ref_head {
int ref_mod;
/*
@@ -180,7 +181,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
* until the delayed ref is processed. must_insert_reserved is
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -2564,9 +2564,6 @@ static int cleanup_ref_head(struct btrfs
+@@ -2563,9 +2563,6 @@ static int cleanup_ref_head(struct btrfs
}
}
diff --git a/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch b/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
new file mode 100644
index 0000000000..7683027899
--- /dev/null
+++ b/patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
@@ -0,0 +1,85 @@
+From cb49a87b2a4edb469e4d295eca4b1d106f64083e Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:17 +0300
+Git-commit: cb49a87b2a4edb469e4d295eca4b1d106f64083e
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 1/8] btrfs: Factor out common delayed refs init code
+
+THe majority of the init code for struct btrfs_delayed_ref_node is
+duplicated in add_delayed_data_ref and add_delayed_tree_ref. Factor out
+the common bits in init_delayed_ref_common. This function is going to be
+used in future patches to clean that up. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 51 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 51 insertions(+)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 4fb041e14742..a0dc255792c7 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -644,6 +644,57 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
+ return head_ref;
+ }
+
++/*
++ * init_delayed_ref_common - Initialize the structure which represents a
++ * modification to a an extent.
++ *
++ * @fs_info: Internal to the mounted filesystem mount structure.
++ *
++ * @ref: The structure which is going to be initialized.
++ *
++ * @bytenr: The logical address of the extent for which a modification is
++ * going to be recorded.
++ *
++ * @num_bytes: Size of the extent whose modification is being recorded.
++ *
++ * @ref_root: The id of the root where this modification has originated, this
++ * can be either one of the well-known metadata trees or the
++ * subvolume id which references this extent.
++ *
++ * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
++ * BTRFS_ADD_DELAYED_EXTENT
++ *
++ * @ref_type: Holds the type of the extent which is being recorded, can be
++ * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
++ * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
++ * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
++ */
++static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
++ struct btrfs_delayed_ref_node *ref,
++ u64 bytenr, u64 num_bytes, u64 ref_root,
++ int action, u8 ref_type)
++{
++ u64 seq = 0;
++
++ if (action == BTRFS_ADD_DELAYED_EXTENT)
++ action = BTRFS_ADD_DELAYED_REF;
++
++ if (is_fstree(ref_root))
++ seq = atomic64_read(&fs_info->tree_mod_seq);
++
++ refcount_set(&ref->refs, 1);
++ ref->bytenr = bytenr;
++ ref->num_bytes = num_bytes;
++ ref->ref_mod = 1;
++ ref->action = action;
++ ref->is_head = 0;
++ ref->in_tree = 1;
++ ref->seq = seq;
++ ref->type = ref_type;
++ RB_CLEAR_NODE(&ref->ref_node);
++ INIT_LIST_HEAD(&ref->add_list);
++}
++
+ /*
+ * helper to insert a delayed tree ref into the rbtree.
+ */
+--
+2.21.0
+
diff --git a/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
new file mode 100644
index 0000000000..ff33d5ddc5
--- /dev/null
+++ b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
@@ -0,0 +1,404 @@
+From 0e0adbcfdc908684317c99a9bf5e13383f03b7ec Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Git-commit: 0e0adbcfdc908684317c99a9bf5e13383f03b7ec
+Patch-mainline: v4.15
+References: bsc#1134813
+Date: Thu, 19 Oct 2017 14:16:00 -0400
+Subject: [PATCH] btrfs: track refs in a rb_tree instead of a list
+
+If we get a significant amount of delayed refs for a single block (think
+modifying multiple snapshots) we can end up spending an ungodly amount
+of time looping through all of the entries trying to see if they can be
+merged. This is because we only add them to a list, so we have O(2n)
+for every ref head. This doesn't make any sense as we likely have refs
+for different roots, and so they cannot be merged. Tracking in a tree
+will allow us to break as soon as we hit an entry that doesn't match,
+making our worst case O(n).
+
+With this we can also merge entries more easily. Before we had to hope
+that matching refs were on the ends of our list, but with the tree we
+can search down to exact matches and merge them at insert time.
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/backref.c | 5 +-
+ fs/btrfs/delayed-ref.c | 108 +++++++++++++++++++++--------------------
+ fs/btrfs/delayed-ref.h | 5 +-
+ fs/btrfs/disk-io.c | 10 ++--
+ fs/btrfs/extent-tree.c | 21 +++++---
+ 5 files changed, 82 insertions(+), 67 deletions(-)
+
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 523d2dba7745..7d0dc100a09a 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -773,6 +773,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ struct btrfs_key key;
+ struct btrfs_key tmp_op_key;
+ struct btrfs_key *op_key = NULL;
++ struct rb_node *n;
+ int count;
+ int ret = 0;
+
+@@ -782,7 +783,9 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ }
+
+ spin_lock(&head->lock);
+- list_for_each_entry(node, &head->ref_list, list) {
++ for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
++ node = rb_entry(n, struct btrfs_delayed_ref_node,
++ ref_node);
+ if (node->seq > seq)
+ continue;
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 8c7d7db01f7a..83be8f9fd906 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -143,6 +143,34 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
+ return NULL;
+ }
+
++static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
++ struct btrfs_delayed_ref_node *ins)
++{
++ struct rb_node **p = &root->rb_node;
++ struct rb_node *node = &ins->ref_node;
++ struct rb_node *parent_node = NULL;
++ struct btrfs_delayed_ref_node *entry;
++
++ while (*p) {
++ int comp;
++
++ parent_node = *p;
++ entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
++ ref_node);
++ comp = comp_refs(ins, entry, true);
++ if (comp < 0)
++ p = &(*p)->rb_left;
++ else if (comp > 0)
++ p = &(*p)->rb_right;
++ else
++ return entry;
++ }
++
++ rb_link_node(node, parent_node, p);
++ rb_insert_color(node, root);
++ return NULL;
++}
++
+ /*
+ * find an head entry based on bytenr. This returns the delayed ref
+ * head if it was able to find one, or NULL if nothing was in that spot.
+@@ -212,7 +240,8 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_node *ref)
+ {
+ assert_spin_locked(&head->lock);
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ ref->in_tree = 0;
+@@ -229,24 +258,18 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
+ u64 seq)
+ {
+ struct btrfs_delayed_ref_node *next;
++ struct rb_node *node = rb_next(&ref->ref_node);
+ bool done = false;
+
+- next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
+- while (!done && &next->list != &head->ref_list) {
++ while (!done && node) {
+ int mod;
+- struct btrfs_delayed_ref_node *next2;
+-
+- next2 = list_next_entry(next, list);
+-
+- if (next == ref)
+- goto next;
+
++ next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
++ node = rb_next(node);
+ if (seq && next->seq >= seq)
+- goto next;
+-
++ break;
+ if (comp_refs(ref, next, false))
+- goto next;
++ break;
+
+ if (ref->action == next->action) {
+ mod = next->ref_mod;
+@@ -270,8 +293,6 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
+ }
+-next:
+- next = next2;
+ }
+
+ return done;
+@@ -283,11 +304,12 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+ {
+ struct btrfs_delayed_ref_node *ref;
++ struct rb_node *node;
+ u64 seq = 0;
+
+ assert_spin_locked(&head->lock);
+
+- if (list_empty(&head->ref_list))
++ if (RB_EMPTY_ROOT(&head->ref_tree))
+ return;
+
+ /* We don't have too many refs to merge for data. */
+@@ -304,22 +326,13 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
+ }
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
+- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
+- while (&ref->list != &head->ref_list) {
++again:
++ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
++ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ if (seq && ref->seq >= seq)
+- goto next;
+-
+- if (merge_ref(trans, delayed_refs, head, ref, seq)) {
+- if (list_empty(&head->ref_list))
+- break;
+- ref = list_first_entry(&head->ref_list,
+- struct btrfs_delayed_ref_node,
+- list);
+ continue;
+- }
+-next:
+- ref = list_next_entry(ref, list);
++ if (merge_ref(trans, delayed_refs, head, ref, seq))
++ goto again;
+ }
+ }
+
+@@ -402,25 +415,19 @@ btrfs_select_ref_head(struct btrfs_trans_handle *trans)
+ * Return 0 for insert.
+ * Return >0 for merge.
+ */
+-static int
+-add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_root *root,
+- struct btrfs_delayed_ref_head *href,
+- struct btrfs_delayed_ref_node *ref)
++static int insert_delayed_ref(struct btrfs_trans_handle *trans,
++ struct btrfs_delayed_ref_root *root,
++ struct btrfs_delayed_ref_head *href,
++ struct btrfs_delayed_ref_node *ref)
+ {
+ struct btrfs_delayed_ref_node *exist;
+ int mod;
+ int ret = 0;
+
+ spin_lock(&href->lock);
+- /* Check whether we can merge the tail node with ref */
+- if (list_empty(&href->ref_list))
+- goto add_tail;
+- exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
+- list);
+- /* No need to compare bytenr nor is_head */
+- if (comp_refs(exist, ref, true))
+- goto add_tail;
++ exist = tree_insert(&href->ref_tree, ref);
++ if (!exist)
++ goto inserted;
+
+ /* Now we are sure we can merge */
+ ret = 1;
+@@ -451,9 +458,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
+ drop_delayed_ref(trans, root, href, exist);
+ spin_unlock(&href->lock);
+ return ret;
+-
+-add_tail:
+- list_add_tail(&ref->list, &href->ref_list);
++inserted:
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ list_add_tail(&ref->add_list, &href->ref_add_list);
+ atomic_inc(&root->num_entries);
+@@ -593,7 +598,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ head_ref->ref_mod = count_mod;
+ head_ref->must_insert_reserved = must_insert_reserved;
+ head_ref->is_data = is_data;
+- INIT_LIST_HEAD(&head_ref->ref_list);
++ head_ref->ref_tree = RB_ROOT;
+ INIT_LIST_HEAD(&head_ref->ref_add_list);
+ RB_CLEAR_NODE(&head_ref->href_node);
+ head_ref->processing = 0;
+@@ -685,7 +690,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ ref->is_head = 0;
+ ref->in_tree = 1;
+ ref->seq = seq;
+- INIT_LIST_HEAD(&ref->list);
++ RB_CLEAR_NODE(&ref->ref_node);
+ INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+@@ -699,7 +704,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+
+ trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
+
+- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+
+ /*
+ * XXX: memory should be freed at the same level allocated.
+@@ -742,7 +747,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ ref->is_head = 0;
+ ref->in_tree = 1;
+ ref->seq = seq;
+- INIT_LIST_HEAD(&ref->list);
++ RB_CLEAR_NODE(&ref->ref_node);
+ INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+@@ -758,8 +763,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+
+ trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
+
+- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
+-
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
+ }
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 1ce11858d727..a43af432f859 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -27,8 +27,7 @@
+ #define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
+
+ struct btrfs_delayed_ref_node {
+- /*data/tree ref use list, stored in ref_head->ref_list. */
+- struct list_head list;
++ struct rb_node ref_node;
+ /*
+ * If action is BTRFS_ADD_DELAYED_REF, also link this node to
+ * ref_head->ref_add_list, then we do not need to iterate the
+@@ -92,7 +91,7 @@ struct btrfs_delayed_ref_head {
+ struct mutex mutex;
+
+ spinlock_t lock;
+- struct list_head ref_list;
++ struct rb_root ref_tree;
+ /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
+ struct list_head ref_add_list;
+
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index d1f396f72979..efce9a2fa9be 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4113,7 +4113,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+
+ while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+ struct btrfs_delayed_ref_head *head;
+- struct btrfs_delayed_ref_node *tmp;
++ struct rb_node *n;
+ bool pin_bytes = false;
+
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+@@ -4129,10 +4129,12 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
+ continue;
+ }
+ spin_lock(&head->lock);
+- list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
+- list) {
++ while ((n = rb_first(&head->ref_tree)) != NULL) {
++ ref = rb_entry(n, struct btrfs_delayed_ref_node,
++ ref_node);
+ ref->in_tree = 0;
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ atomic_dec(&delayed_refs->num_entries);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index fc9720e28005..673ac4e01dd0 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2519,7 +2519,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
+ {
+ struct btrfs_delayed_ref_node *ref;
+
+- if (list_empty(&head->ref_list))
++ if (RB_EMPTY_ROOT(&head->ref_tree))
+ return NULL;
+
+ /*
+@@ -2532,8 +2532,8 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
+
+- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
++ ref = rb_entry(rb_first(&head->ref_tree),
++ struct btrfs_delayed_ref_node, ref_node);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
+ }
+@@ -2593,7 +2593,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ spin_unlock(&head->lock);
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+- if (!list_empty(&head->ref_list) || head->extent_op) {
++ if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+@@ -2740,7 +2740,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
+
+ actual_count++;
+ ref->in_tree = 0;
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &locked_ref->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ /*
+@@ -3138,6 +3139,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ struct btrfs_delayed_data_ref *data_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_transaction *cur_trans;
++ struct rb_node *node;
+ int ret = 0;
+
+ cur_trans = root->fs_info->running_transaction;
+@@ -3170,7 +3172,12 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
+ spin_unlock(&delayed_refs->lock);
+
+ spin_lock(&head->lock);
+- list_for_each_entry(ref, &head->ref_list, list) {
++ /*
++ * XXX: We should replace this with a proper search function in the
++ * future.
++ */
++ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
++ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ /* If it's a shared ref we know a cross reference exists */
+ if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
+ ret = 1;
+@@ -7141,7 +7148,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
+ goto out_delayed_unlock;
+
+ spin_lock(&head->lock);
+- if (!list_empty(&head->ref_list))
++ if (!RB_EMPTY_ROOT(&head->ref_tree))
+ goto out;
+
+ if (head->extent_op) {
+--
+2.21.0
+
diff --git a/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch b/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
new file mode 100644
index 0000000000..c03208f122
--- /dev/null
+++ b/patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
@@ -0,0 +1,75 @@
+From 646f4dd76fb3ac0d1e8677890522d4c044ee2f06 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:18 +0300
+Git-commit: 646f4dd76fb3ac0d1e8677890522d4c044ee2f06
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 2/8] btrfs: Use init_delayed_ref_common in
+ add_delayed_tree_ref
+
+Use the newly introduced common helper. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 35 +++++++++++------------------------
+ 1 file changed, 11 insertions(+), 24 deletions(-)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index a0dc255792c7..1c27d3322198 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -708,38 +708,25 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ {
+ struct btrfs_delayed_tree_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- u64 seq = 0;
++ u8 ref_type;
+ int ret;
+
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- action = BTRFS_ADD_DELAYED_REF;
+-
+- if (is_fstree(ref_root))
+- seq = atomic64_read(&fs_info->tree_mod_seq);
+ delayed_refs = &trans->transaction->delayed_refs;
+-
+- /* first set the basic ref node struct up */
+- refcount_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = 1;
+- ref->action = action;
+- ref->is_head = 0;
+- ref->in_tree = 1;
+- ref->seq = seq;
+- RB_CLEAR_NODE(&ref->ref_node);
+- INIT_LIST_HEAD(&ref->add_list);
+-
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+- full_ref->parent = parent;
+- full_ref->root = ref_root;
+ if (parent)
+- ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
++ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+ else
+- ref->type = BTRFS_TREE_BLOCK_REF_KEY;
++ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
++
++ init_delayed_ref_common(fs_info, ref, bytenr, num_bytes, ref_root,
++ action, ref_type);
++ full_ref->root = ref_root;
++ full_ref->parent = parent;
+ full_ref->level = level;
+
+- trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
++ trace_add_delayed_tree_ref(fs_info, ref, full_ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+
+--
+2.21.0
+
diff --git a/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch b/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
new file mode 100644
index 0000000000..8396e83151
--- /dev/null
+++ b/patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
@@ -0,0 +1,73 @@
+From c812c8a857a00acae78341d5d4702eb8d7d02661 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:19 +0300
+Git-commit: c812c8a857a00acae78341d5d4702eb8d7d02661
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 3/8] btrfs: Use init_delayed_ref_common in
+ add_delayed_data_ref
+
+Use the newly introduced helper and remove the duplicate code. No
+functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 33 ++++++++++-----------------------
+ 1 file changed, 10 insertions(+), 23 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -768,41 +768,28 @@ add_delayed_data_ref(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_data_ref *full_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- u64 seq = 0;
++ u8 ref_type;
+ int ret;
+
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- action = BTRFS_ADD_DELAYED_REF;
+-
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- if (is_fstree(ref_root))
+- seq = atomic64_read(&fs_info->tree_mod_seq);
+-
+- /* first set the basic ref node struct up */
+- refcount_set(&ref->refs, 1);
+- ref->bytenr = bytenr;
+- ref->num_bytes = num_bytes;
+- ref->ref_mod = 1;
+- ref->action = action;
+- ref->is_head = 0;
+- ref->in_tree = 1;
+- ref->seq = seq;
+- RB_CLEAR_NODE(&ref->ref_node);
+- INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+- full_ref->parent = parent;
+- full_ref->root = ref_root;
+ if (parent)
+- ref->type = BTRFS_SHARED_DATA_REF_KEY;
++ ref_type = BTRFS_SHARED_DATA_REF_KEY;
+ else
+- ref->type = BTRFS_EXTENT_DATA_REF_KEY;
++ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+
++ init_delayed_ref_common(fs_info, ref, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ full_ref->root = ref_root;
++ full_ref->parent = parent;
+ full_ref->objectid = owner;
+ full_ref->offset = offset;
+
+- trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
++ trace_add_delayed_data_ref(fs_info, ref, full_ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+ if (ret > 0)
diff --git a/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch b/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
new file mode 100644
index 0000000000..b154cc6d0a
--- /dev/null
+++ b/patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
@@ -0,0 +1,128 @@
+From 70d640004ab5c2597084f6463dd39b36f4f026f8 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:20 +0300
+Git-commit: 70d640004ab5c2597084f6463dd39b36f4f026f8
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 4/8] btrfs: Open-code add_delayed_tree_ref
+
+Now that the initialization part and the critical section code have been
+split it's a lot easier to open code add_delayed_tree_ref. Do so in the
+following manner:
+
+1. The comming init code is put immediately after memory-to-be-initialized
+ is allocated, followed by the ref-specific member initialization.
+
+2. The only piece of code that remains in the critical section is
+ insert_delayed_ref call.
+
+3. Tracing and memory freeing code is put outside of the critical
+ section as well.
+
+The only real change here is an overall shorter critical section when
+dealing with delayed tree refs. From functional point of view - the code
+is unchanged.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 +++++++++++++++----------------------------------
+ 1 file changed, 20 insertions(+), 45 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -713,49 +713,6 @@ static void init_delayed_ref_common(stru
+ }
+
+ /*
+- * helper to insert a delayed tree ref into the rbtree.
+- */
+-static noinline void
+-add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+- struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_head *head_ref,
+- struct btrfs_delayed_ref_node *ref, u64 bytenr,
+- u64 num_bytes, u64 parent, u64 ref_root, int level,
+- int action)
+-{
+- struct btrfs_delayed_tree_ref *full_ref;
+- struct btrfs_delayed_ref_root *delayed_refs;
+- u8 ref_type;
+- int ret;
+-
+- delayed_refs = &trans->transaction->delayed_refs;
+- full_ref = btrfs_delayed_node_to_tree_ref(ref);
+- if (parent)
+- ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+- else
+- ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+-
+- init_delayed_ref_common(fs_info, ref, bytenr, num_bytes, ref_root,
+- action, ref_type);
+- full_ref->root = ref_root;
+- full_ref->parent = parent;
+- full_ref->level = level;
+-
+- trace_add_delayed_tree_ref(fs_info, ref, full_ref,
+- action == BTRFS_ADD_DELAYED_EXTENT ?
+- BTRFS_ADD_DELAYED_REF : action);
+-
+- ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+-
+- /*
+- * XXX: memory should be freed at the same level allocated.
+- * But bad practice is anywhere... Follow it now. Need cleanup.
+- */
+- if (ret > 0)
+- kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
+-}
+-
+-/*
+ * helper to insert a delayed data ref into the rbtree.
+ */
+ static noinline void
+@@ -814,12 +771,24 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
+ int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ int ret;
++ u8 ref_type;
+
+ BUG_ON(extent_op && extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
++ if (parent)
++ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
++ else
++ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
++ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ ref->root = ref_root;
++ ref->parent = parent;
++ ref->level = level;
++
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref)
+ goto free_ref;
+@@ -845,10 +814,16 @@ int btrfs_add_delayed_tree_ref(struct bt
+ is_system, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+- add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, level, action);
++
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+
++ trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
++ if (ret > 0)
++ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
++
+ if (qrecord_inserted)
+ return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return 0;
diff --git a/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch b/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
new file mode 100644
index 0000000000..4bb5260040
--- /dev/null
+++ b/patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
@@ -0,0 +1,125 @@
+From cd7f9699b113434467434580ebb8d9b328152fb8 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:21 +0300
+Git-commit: cd7f9699b113434467434580ebb8d9b328152fb8
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 5/8] btrfs: Open-code add_delayed_data_ref
+
+Now that the initialization part and the critical section code have been
+split it's a lot easier to open code add_delayed_data_ref. Do so in the
+following manner:
+
+1. The common init function is put immediately after memory-to-be-initialized
+ is allocated, followed by the specific data ref initialization.
+
+2. The only piece of code that remains in the critical section is
+ insert_delayed_ref call.
+
+3. Tracing and memory freeing code is moved outside of the critical
+ section.
+
+No functional changes, just an overall shorter critical section.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 +++++++++++++++----------------------------------
+ 1 file changed, 21 insertions(+), 44 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -713,47 +713,6 @@ static void init_delayed_ref_common(stru
+ }
+
+ /*
+- * helper to insert a delayed data ref into the rbtree.
+- */
+-static noinline void
+-add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+- struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_head *head_ref,
+- struct btrfs_delayed_ref_node *ref, u64 bytenr,
+- u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
+- u64 offset, int action)
+-{
+- struct btrfs_delayed_data_ref *full_ref;
+- struct btrfs_delayed_ref_root *delayed_refs;
+- u8 ref_type;
+- int ret;
+-
+- delayed_refs = &trans->transaction->delayed_refs;
+-
+-
+- full_ref = btrfs_delayed_node_to_data_ref(ref);
+- if (parent)
+- ref_type = BTRFS_SHARED_DATA_REF_KEY;
+- else
+- ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+-
+- init_delayed_ref_common(fs_info, ref, bytenr, num_bytes,
+- ref_root, action, ref_type);
+- full_ref->root = ref_root;
+- full_ref->parent = parent;
+- full_ref->objectid = owner;
+- full_ref->offset = offset;
+-
+- trace_add_delayed_data_ref(fs_info, ref, full_ref,
+- action == BTRFS_ADD_DELAYED_EXTENT ?
+- BTRFS_ADD_DELAYED_REF : action);
+-
+- ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+- if (ret > 0)
+- kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
+-}
+-
+-/*
+ * add a delayed tree ref. This does all of the accounting required
+ * to make sure the delayed ref is eventually processed before this
+ * transaction commits.
+@@ -851,11 +810,25 @@ int btrfs_add_delayed_data_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
++ int ret;
++ u8 ref_type;
+
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+
++ if (parent)
++ ref_type = BTRFS_SHARED_DATA_REF_KEY;
++ else
++ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
++ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
++ ref_root, action, ref_type);
++ ref->root = ref_root;
++ ref->parent = parent;
++ ref->objectid = owner;
++ ref->offset = offset;
++
++
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
+ if (!head_ref) {
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+@@ -887,11 +860,15 @@ int btrfs_add_delayed_data_ref(struct bt
+ action, 1, 0, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+- add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
+- num_bytes, parent, ref_root, owner, offset,
+- action);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+ spin_unlock(&delayed_refs->lock);
+
++ trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
++ action == BTRFS_ADD_DELAYED_EXTENT ?
++ BTRFS_ADD_DELAYED_REF : action);
++ if (ret > 0)
++ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
++
+ if (qrecord_inserted)
+ return btrfs_qgroup_trace_extent_post(fs_info, record);
+ return 0;
diff --git a/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch b/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
new file mode 100644
index 0000000000..1593956fc0
--- /dev/null
+++ b/patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
@@ -0,0 +1,102 @@
+From a2e569b3f2b138f2c25b4598cf4b18af8af39abd Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:22 +0300
+Git-commit: a2e569b3f2b138f2c25b4598cf4b18af8af39abd
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 6/8] btrfs: Introduce init_delayed_ref_head
+
+add_delayed_ref_head implements the logic to both initialize a head_ref
+structure as well as perform the necessary operations to add it to the
+delayed ref machinery. This has resulted in a very cumebrsome interface
+with loads of parameters and code, which at first glance, looks very
+unwieldy. Begin untangling it by first extracting the initialization
+only code in its own function. It's more or less verbatim copy of the
+first part of add_delayed_ref_head.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 65 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 65 insertions(+)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 3fa8ea5cbbc6..227094efd050 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -526,6 +526,71 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+ spin_unlock(&existing->lock);
+ }
+
++static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
++ struct btrfs_qgroup_extent_record *qrecord,
++ u64 bytenr, u64 num_bytes, u64 ref_root,
++ u64 reserved, int action, bool is_data,
++ bool is_system)
++{
++ int count_mod = 1;
++ int must_insert_reserved = 0;
++
++ /* If reserved is provided, it must be a data extent. */
++ BUG_ON(!is_data && reserved);
++
++ /*
++ * The head node stores the sum of all the mods, so dropping a ref
++ * should drop the sum in the head node by one.
++ */
++ if (action == BTRFS_UPDATE_DELAYED_HEAD)
++ count_mod = 0;
++ else if (action == BTRFS_DROP_DELAYED_REF)
++ count_mod = -1;
++
++ /*
++ * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
++ * accounting when the extent is finally added, or if a later
++ * modification deletes the delayed ref without ever inserting the
++ * extent into the extent allocation tree. ref->must_insert_reserved
++ * is the flag used to record that accounting mods are required.
++ *
++ * Once we record must_insert_reserved, switch the action to
++ * BTRFS_ADD_DELAYED_REF because other special casing is not required.
++ */
++ if (action == BTRFS_ADD_DELAYED_EXTENT)
++ must_insert_reserved = 1;
++ else
++ must_insert_reserved = 0;
++
++ refcount_set(&head_ref->refs, 1);
++ head_ref->bytenr = bytenr;
++ head_ref->num_bytes = num_bytes;
++ head_ref->ref_mod = count_mod;
++ head_ref->must_insert_reserved = must_insert_reserved;
++ head_ref->is_data = is_data;
++ head_ref->is_system = is_system;
++ head_ref->ref_tree = RB_ROOT;
++ INIT_LIST_HEAD(&head_ref->ref_add_list);
++ RB_CLEAR_NODE(&head_ref->href_node);
++ head_ref->processing = 0;
++ head_ref->total_ref_mod = count_mod;
++ head_ref->qgroup_reserved = 0;
++ head_ref->qgroup_ref_root = 0;
++ spin_lock_init(&head_ref->lock);
++ mutex_init(&head_ref->mutex);
++
++ if (qrecord) {
++ if (ref_root && reserved) {
++ head_ref->qgroup_ref_root = ref_root;
++ head_ref->qgroup_reserved = reserved;
++ }
++
++ qrecord->bytenr = bytenr;
++ qrecord->num_bytes = num_bytes;
++ qrecord->old_roots = NULL;
++ }
++}
++
+ /*
+ * helper function to actually insert a head node into the rbtree.
+ * this does all the dirty work in terms of maintaining the correct
+--
+2.21.0
+
diff --git a/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch b/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
new file mode 100644
index 0000000000..d41cf73dd5
--- /dev/null
+++ b/patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
@@ -0,0 +1,102 @@
+From eb86ec73b968b2895ffede893b33bf49bbc9bf5c Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:23 +0300
+Git-commit: eb86ec73b968b2895ffede893b33bf49bbc9bf5c
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 7/8] btrfs: Use init_delayed_ref_head in add_delayed_ref_head
+
+Use the newly introduced function when initialising the head_ref in
+add_delayed_ref_head. No functional changes.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 61 +++----------------------------------------------
+ 1 file changed, 4 insertions(+), 57 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -625,69 +625,16 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ {
+ struct btrfs_delayed_ref_head *existing;
+ struct btrfs_delayed_ref_root *delayed_refs;
+- int count_mod = 1;
+- int must_insert_reserved = 0;
+ int qrecord_inserted = 0;
+
+- /* If reserved is provided, it must be a data extent. */
+- BUG_ON(!is_data && reserved);
+-
+- /*
+- * the head node stores the sum of all the mods, so dropping a ref
+- * should drop the sum in the head node by one.
+- */
+- if (action == BTRFS_UPDATE_DELAYED_HEAD)
+- count_mod = 0;
+- else if (action == BTRFS_DROP_DELAYED_REF)
+- count_mod = -1;
+-
+- /*
+- * BTRFS_ADD_DELAYED_EXTENT means that we need to update
+- * the reserved accounting when the extent is finally added, or
+- * if a later modification deletes the delayed ref without ever
+- * inserting the extent into the extent allocation tree.
+- * ref->must_insert_reserved is the flag used to record
+- * that accounting mods are required.
+- *
+- * Once we record must_insert_reserved, switch the action to
+- * BTRFS_ADD_DELAYED_REF because other special casing is not required.
+- */
+- if (action == BTRFS_ADD_DELAYED_EXTENT)
+- must_insert_reserved = 1;
+- else
+- must_insert_reserved = 0;
+-
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- refcount_set(&head_ref->refs, 1);
+- head_ref->bytenr = bytenr;
+- head_ref->num_bytes = num_bytes;
+- head_ref->ref_mod = count_mod;
+- head_ref->must_insert_reserved = must_insert_reserved;
+- head_ref->is_data = is_data;
+- head_ref->is_system = is_system;
+- head_ref->ref_tree = RB_ROOT;
+- INIT_LIST_HEAD(&head_ref->ref_add_list);
+- RB_CLEAR_NODE(&head_ref->href_node);
+- head_ref->processing = 0;
+- head_ref->total_ref_mod = count_mod;
+- head_ref->qgroup_reserved = 0;
+- head_ref->qgroup_ref_root = 0;
+- spin_lock_init(&head_ref->lock);
+- mutex_init(&head_ref->mutex);
++ init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
++ reserved, action, is_data, is_system);
+
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+- if (ref_root && reserved) {
+- head_ref->qgroup_ref_root = ref_root;
+- head_ref->qgroup_reserved = reserved;
+- }
+-
+- qrecord->bytenr = bytenr;
+- qrecord->num_bytes = num_bytes;
+- qrecord->old_roots = NULL;
+-
+- if(btrfs_qgroup_trace_extent_nolock(fs_info,
++ if (btrfs_qgroup_trace_extent_nolock(fs_info,
+ delayed_refs, qrecord))
+ kfree(qrecord);
+ else
+@@ -712,7 +659,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ } else {
+ if (old_ref_mod)
+ *old_ref_mod = 0;
+- if (is_data && count_mod < 0)
++ if (is_data && head_ref->ref_mod < 0)
+ delayed_refs->pending_csums += num_bytes;
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
diff --git a/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch b/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
new file mode 100644
index 0000000000..7620fc154c
--- /dev/null
+++ b/patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
@@ -0,0 +1,159 @@
+From 2335efafa63f0c675ebb4f8908fff9e972fb8a58 Mon Sep 17 00:00:00 2001
+From: Nikolay Borisov <nborisov@suse.com>
+Date: Tue, 24 Apr 2018 17:18:24 +0300
+Git-commit: 2335efafa63f0c675ebb4f8908fff9e972fb8a58
+Patch-mainline: v4.18
+References: bsc#1134813
+Subject: [PATCH 8/8] btrfs: split delayed ref head initialization and addition
+
+add_delayed_ref_head really performed 2 independent operations -
+initialisting the ref head and adding it to a list. Now that the init
+part is in a separate function let's complete the separation between
+both operations. This results in a lot simpler interface for
+add_delayed_ref_head since the function now deals solely with either
+adding the newly initialised delayed ref head or merging it into an
+existing delayed ref head. This results in vastly simplified function
+signature since 5 arguments are dropped. The only other thing worth
+mentioning is that due to this split the WARN_ON catching reinit of
+existing. In this patch the condition is extended such that:
+
+ qrecord && head_ref->qgroup_ref_root && head_ref->qgroup_reserved
+
+is added. This is done because the two qgroup_* prefixed member are
+set only if both ref_root and reserved are passed. So functionally
+it's equivalent to the old WARN_ON and allows to remove the two args
+from add_delayed_ref_head.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 42 ++++++++++++++++++++----------------------
+ 1 file changed, 20 insertions(+), 22 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -617,9 +617,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+- u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
+- int action, int is_data, int is_system,
+- int *qrecord_inserted_ret,
++ int action, int *qrecord_inserted_ret,
+ int *old_ref_mod, int *new_ref_mod)
+
+ {
+@@ -629,9 +627,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+- init_delayed_ref_head(head_ref, qrecord, bytenr, num_bytes, ref_root,
+- reserved, action, is_data, is_system);
+-
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+ if (btrfs_qgroup_trace_extent_nolock(fs_info,
+@@ -646,7 +641,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ existing = htree_insert(&delayed_refs->href_root,
+ &head_ref->href_node);
+ if (existing) {
+- WARN_ON(ref_root && reserved && existing->qgroup_ref_root
++ WARN_ON(qrecord && head_ref->qgroup_ref_root
++ && head_ref->qgroup_reserved
++ && existing->qgroup_ref_root
+ && existing->qgroup_reserved);
+ update_existing_head_ref(delayed_refs, existing, head_ref,
+ old_ref_mod);
+@@ -659,8 +656,8 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ } else {
+ if (old_ref_mod)
+ *old_ref_mod = 0;
+- if (is_data && head_ref->ref_mod < 0)
+- delayed_refs->pending_csums += num_bytes;
++ if (head_ref->is_data && head_ref->ref_mod < 0)
++ delayed_refs->pending_csums += head_ref->num_bytes;
+ delayed_refs->num_heads++;
+ delayed_refs->num_heads_ready++;
+ atomic_inc(&delayed_refs->num_entries);
+@@ -670,6 +667,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ *qrecord_inserted_ret = qrecord_inserted;
+ if (new_ref_mod)
+ *new_ref_mod = head_ref->total_ref_mod;
++
+ return head_ref;
+ }
+
+@@ -741,7 +739,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
+- int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ int ret;
+ u8 ref_type;
+
+@@ -771,6 +769,8 @@ int btrfs_add_delayed_tree_ref(struct bt
+ goto free_head_ref;
+ }
+
++ init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
++ ref_root, 0, action, false, is_system);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -781,8 +781,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+- bytenr, num_bytes, 0, 0, action, 0,
+- is_system, &qrecord_inserted,
++ action, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+
+@@ -858,6 +857,8 @@ int btrfs_add_delayed_data_ref(struct bt
+ }
+ }
+
++ init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
++ reserved, action, true, false);
+ head_ref->extent_op = NULL;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -868,8 +869,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ * the spin lock
+ */
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+- bytenr, num_bytes, ref_root, reserved,
+- action, 1, 0, &qrecord_inserted,
++ action, &qrecord_inserted,
+ old_ref_mod, new_ref_mod);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+@@ -898,19 +898,17 @@ int btrfs_add_delayed_extent_op(struct b
+ if (!head_ref)
+ return -ENOMEM;
+
++ init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
++ BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
++ false);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+
+- /*
+- * extent_ops just modify the flags of an extent and they don't result
+- * in ref count changes, hence it's safe to pass false/0 for is_system
+- * argument
+- */
+- add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
+- num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
+- extent_op->is_data, 0, NULL, NULL, NULL);
++ add_delayed_ref_head(fs_info, trans, head_ref, NULL,
++ BTRFS_UPDATE_DELAYED_HEAD,
++ NULL, NULL, NULL);
+
+ spin_unlock(&delayed_refs->lock);
+ return 0;
diff --git a/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch b/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
index 16d43c06b9..d676081c42 100644
--- a/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
+++ b/patches.suse/btrfs-Take-trans-lock-before-access-running-trans-in.patch
@@ -32,8 +32,8 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -3072,7 +3072,11 @@ static noinline int check_delayed_ref(st
- struct btrfs_transaction *cur_trans;
+@@ -3073,7 +3073,11 @@ static noinline int check_delayed_ref(st
+ struct rb_node *node;
int ret = 0;
+ spin_lock(&root->fs_info->trans_lock);
@@ -44,7 +44,7 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
if (!cur_trans)
return 0;
-@@ -3081,6 +3085,7 @@ static noinline int check_delayed_ref(st
+@@ -3082,6 +3086,7 @@ static noinline int check_delayed_ref(st
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
@@ -52,7 +52,7 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
return 0;
}
-@@ -3097,6 +3102,7 @@ static noinline int check_delayed_ref(st
+@@ -3098,6 +3103,7 @@ static noinline int check_delayed_ref(st
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
@@ -60,7 +60,7 @@ Acked-by: Nikolay Borisov <nborisov@suse.com>
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
-@@ -3124,6 +3130,7 @@ static noinline int check_delayed_ref(st
+@@ -3130,6 +3136,7 @@ static noinline int check_delayed_ref(st
}
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
diff --git a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
index aac087d985..f7f380eaa6 100644
--- a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
+++ b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
@@ -55,7 +55,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -548,8 +548,10 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -553,8 +553,10 @@ add_delayed_ref_head(struct btrfs_fs_inf
struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
@@ -67,15 +67,15 @@ Signed-off-by: David Sterba <dsterba@suse.com>
{
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
-@@ -593,6 +595,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -598,6 +600,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
+ head_ref->is_system = is_system;
- INIT_LIST_HEAD(&head_ref->ref_list);
+ head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
RB_CLEAR_NODE(&head_ref->href_node);
-@@ -781,6 +784,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -785,6 +788,7 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
int qrecord_inserted;
@@ -83,7 +83,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
BUG_ON(extent_op && extent_op->is_data);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
-@@ -809,8 +813,8 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -813,8 +817,8 @@ int btrfs_add_delayed_tree_ref(struct bt
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, 0, 0, action, 0,
@@ -94,7 +94,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action);
-@@ -876,7 +880,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -880,7 +884,7 @@ int btrfs_add_delayed_data_ref(struct bt
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
bytenr, num_bytes, ref_root, reserved,
@@ -103,7 +103,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
old_ref_mod, new_ref_mod);
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
-@@ -906,9 +910,14 @@ int btrfs_add_delayed_extent_op(struct b
+@@ -910,9 +914,14 @@ int btrfs_add_delayed_extent_op(struct b
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
@@ -121,7 +121,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
return 0;
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -140,6 +140,7 @@ struct btrfs_delayed_ref_head {
+@@ -139,6 +139,7 @@ struct btrfs_delayed_ref_head {
*/
unsigned int must_insert_reserved:1;
unsigned int is_data:1;
diff --git a/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch b/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
index f1c1f33fc2..1ff8b31a3b 100644
--- a/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
+++ b/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
@@ -40,31 +40,30 @@ deadlocks when resolving references.
Acked-by: Jeff Mahoney <jeffm@suse.com>
---
---
- fs/btrfs/delayed-ref.c | 20 +++-----------------
+ fs/btrfs/delayed-ref.c | 22 ++++------------------
fs/btrfs/qgroup.c | 30 +++---------------------------
fs/btrfs/qgroup.h | 33 +++------------------------------
- 3 files changed, 9 insertions(+), 74 deletions(-)
+ 3 files changed, 10 insertions(+), 75 deletions(-)
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -549,7 +549,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -615,13 +615,11 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
- u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
- int action, int is_data, int is_system,
-- int *qrecord_inserted_ret,
- int *old_ref_mod, int *new_ref_mod)
+- int action, int *qrecord_inserted_ret,
+- int *old_ref_mod, int *new_ref_mod)
++ int action, int *old_ref_mod, int *new_ref_mod)
{
-@@ -557,7 +556,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
- int count_mod = 1;
- int must_insert_reserved = 0;
- int qrecord_inserted = 0;
- /* If reserved is provided, it must be a data extent. */
- BUG_ON(!is_data && reserved);
-@@ -618,8 +616,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
- if(btrfs_qgroup_trace_extent_nolock(fs_info,
+ delayed_refs = &trans->transaction->delayed_refs;
+
+@@ -630,8 +628,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ if (btrfs_qgroup_trace_extent_nolock(fs_info,
delayed_refs, qrecord))
kfree(qrecord);
- else
@@ -72,7 +71,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
}
trace_add_delayed_ref_head(fs_info, head_ref, action);
-@@ -645,8 +641,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -657,8 +653,6 @@ add_delayed_ref_head(struct btrfs_fs_inf
atomic_inc(&delayed_refs->num_entries);
trans->delayed_ref_updates++;
}
@@ -80,64 +79,67 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
- *qrecord_inserted_ret = qrecord_inserted;
if (new_ref_mod)
*new_ref_mod = head_ref->total_ref_mod;
- return head_ref;
-@@ -779,7 +773,6 @@ int btrfs_add_delayed_tree_ref(struct bt
+
+@@ -732,7 +726,6 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
- int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
-
- BUG_ON(extent_op && extent_op->is_data);
-@@ -809,15 +802,12 @@ int btrfs_add_delayed_tree_ref(struct bt
+ bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ int ret;
+ u8 ref_type;
+@@ -775,8 +768,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ * the spin lock
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
- bytenr, num_bytes, 0, 0, action, 0,
-- is_system, &qrecord_inserted,
+- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
-+ is_system, old_ref_mod, new_ref_mod);
++ action, old_ref_mod, new_ref_mod);
- add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
- num_bytes, parent, ref_root, level, action);
- spin_unlock(&delayed_refs->lock);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
+@@ -788,8 +780,6 @@ int btrfs_add_delayed_tree_ref(struct bt
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
- if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
return 0;
free_head_ref:
-@@ -842,7 +832,6 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -814,7 +804,6 @@ int btrfs_add_delayed_data_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
+ int ret;
+ u8 ref_type;
- ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
- if (!ref)
-@@ -876,16 +865,13 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -863,8 +852,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ * the spin lock
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
- bytenr, num_bytes, ref_root, reserved,
-- action, 1, 0, &qrecord_inserted,
+- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
-+ action, 1, 0, old_ref_mod, new_ref_mod);
++ action, old_ref_mod, new_ref_mod);
- add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
- num_bytes, parent, ref_root, owner, offset,
- action);
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
+@@ -875,8 +863,6 @@ int btrfs_add_delayed_data_ref(struct bt
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
- if (qrecord_inserted)
- return btrfs_qgroup_trace_extent_post(fs_info, record);
return 0;
}
-@@ -913,7 +899,7 @@ int btrfs_add_delayed_extent_op(struct b
- */
- add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
- num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
-- extent_op->is_data, 0, NULL, NULL, NULL);
-+ extent_op->is_data, 0, NULL, NULL);
+@@ -902,7 +888,7 @@ int btrfs_add_delayed_extent_op(struct b
+
+ add_delayed_ref_head(fs_info, trans, head_ref, NULL,
+ BTRFS_UPDATE_DELAYED_HEAD,
+- NULL, NULL, NULL);
++ NULL, NULL);
spin_unlock(&delayed_refs->lock);
return 0;
diff --git a/series.conf b/series.conf
index 0b4b1f3b32..cfacde8fb2 100644
--- a/series.conf
+++ b/series.conf
@@ -8307,6 +8307,7 @@
patches.suse/0019-btrfs-make-the-delalloc-block-rsv-per-inode.patch
patches.suse/0021-btrfs-switch-args-for-comp_-_refs.patch
patches.suse/0022-btrfs-add-a-comp_refs-helper.patch
+ patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
patches.suse/btrfs-move-btrfs_truncate_block-out-of-trans-handle.patch
patches.suse/btrfs-Fix-bug-for-misused-dev_t-when-lookup-in-dev-s.patch
patches.fixes/jfs-remove-increment-of-i_version-counter.patch
@@ -16398,6 +16399,14 @@
patches.drivers/hwmon-asus_atk0110-Replace-deprecated-device-registe
patches.drivers/spi-bcm63xx-hspi-Enable-the-clock-before-calling-clk
patches.drivers/spi-pxa2xx-check-clk_prepare_enable-return-value
+ patches.suse/0001-btrfs-Factor-out-common-delayed-refs-init-code.patch
+ patches.suse/0002-btrfs-Use-init_delayed_ref_common-in-add_delayed_tre.patch
+ patches.suse/0003-btrfs-Use-init_delayed_ref_common-in-add_delayed_dat.patch
+ patches.suse/0004-btrfs-Open-code-add_delayed_tree_ref.patch
+ patches.suse/0005-btrfs-Open-code-add_delayed_data_ref.patch
+ patches.suse/0006-btrfs-Introduce-init_delayed_ref_head.patch
+ patches.suse/0007-btrfs-Use-init_delayed_ref_head-in-add_delayed_ref_h.patch
+ patches.suse/0008-btrfs-split-delayed-ref-head-initialization-and-addi.patch
patches.suse/0001-btrfs-qgroup-Search-commit-root-for-rescan-to-avoid-.patch
patches.suse/0002-btrfs-qgroup-Finish-rescan-when-hit-the-last-leaf-of.patch
patches.suse/btrfs-update-stale-comments-referencing-vmtruncate.patch