Home Home > GIT Browse > SLE12-SP3-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2019-05-17 15:58:18 +0800
committerQu Wenruo <wqu@suse.com>2019-05-17 15:58:18 +0800
commit786c79d308b3571a21a3f8a31b51aa5a37aa8483 (patch)
tree47e40e4c9448b6706b36bc425b9a45149851b0f1
parentb27bb39365ff35c8308957af35067359c060f3e8 (diff)
- btrfs: track refs in a rb_tree instead of a list (bsc#1134813).
- Refresh patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch. - Refresh patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch.
-rw-r--r--patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch10
-rw-r--r--patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch391
-rw-r--r--patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch16
-rw-r--r--series.conf1
4 files changed, 405 insertions, 13 deletions
diff --git a/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch b/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch
index 71fb8e858e..cbbaaef660 100644
--- a/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch
+++ b/patches.fixes/0001-btrfs-Take-trans-lock-before-access-running-trans-in.patch
@@ -31,8 +31,8 @@ Signed-off-by: David Sterba <dsterba@suse.com>
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
-@@ -3086,7 +3086,11 @@ static noinline int check_delayed_ref(st
- struct btrfs_transaction *cur_trans;
+@@ -3080,7 +3080,11 @@ static noinline int check_delayed_ref(st
+ struct rb_node *node;
int ret = 0;
+ spin_lock(&root->fs_info->trans_lock);
@@ -43,7 +43,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
if (!cur_trans)
return 0;
-@@ -3095,6 +3099,7 @@ static noinline int check_delayed_ref(st
+@@ -3089,6 +3093,7 @@ static noinline int check_delayed_ref(st
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (!head) {
spin_unlock(&delayed_refs->lock);
@@ -51,7 +51,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
return 0;
}
-@@ -3111,6 +3116,7 @@ static noinline int check_delayed_ref(st
+@@ -3105,6 +3110,7 @@ static noinline int check_delayed_ref(st
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
@@ -59,7 +59,7 @@ Signed-off-by: David Sterba <dsterba@suse.com>
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
-@@ -3138,6 +3144,7 @@ static noinline int check_delayed_ref(st
+@@ -3137,6 +3143,7 @@ static noinline int check_delayed_ref(st
}
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
diff --git a/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
new file mode 100644
index 0000000000..50e17fddf4
--- /dev/null
+++ b/patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
@@ -0,0 +1,391 @@
+From 0e0adbcfdc908684317c99a9bf5e13383f03b7ec Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Thu, 19 Oct 2017 14:16:00 -0400
+Git-commit: 0e0adbcfdc908684317c99a9bf5e13383f03b7ec
+Patch-mainline: v4.15
+References: bsc#1134813
+Subject: [PATCH] btrfs: track refs in a rb_tree instead of a list
+
+If we get a significant amount of delayed refs for a single block (think
+modifying multiple snapshots) we can end up spending an ungodly amount
+of time looping through all of the entries trying to see if they can be
+merged. This is because we only add them to a list, so we have O(2n)
+for every ref head. This doesn't make any sense as we likely have refs
+for different roots, and so they cannot be merged. Tracking in a tree
+will allow us to break as soon as we hit an entry that doesn't match,
+making our worst case O(n).
+
+With this we can also merge entries more easily. Before we had to hope
+that matching refs were on the ends of our list, but with the tree we
+can search down to exact matches and merge them at insert time.
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/backref.c | 5 +-
+ fs/btrfs/delayed-ref.c | 108 +++++++++++++++++++++++++------------------------
+ fs/btrfs/delayed-ref.h | 5 --
+ fs/btrfs/disk-io.c | 10 ++--
+ fs/btrfs/extent-tree.c | 21 ++++++---
+ 5 files changed, 82 insertions(+), 67 deletions(-)
+
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -758,6 +758,7 @@ static int add_delayed_refs(const struct
+ struct btrfs_delayed_extent_op *extent_op = head->extent_op;
+ struct btrfs_key key;
+ struct btrfs_key op_key = {0};
++ struct rb_node *n;
+ int count;
+ int ret = 0;
+
+@@ -765,7 +766,9 @@ static int add_delayed_refs(const struct
+ btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
+
+ spin_lock(&head->lock);
+- list_for_each_entry(node, &head->ref_list, list) {
++ for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
++ node = rb_entry(n, struct btrfs_delayed_ref_node,
++ ref_node);
+ if (node->seq > seq)
+ continue;
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -143,6 +143,34 @@ static struct btrfs_delayed_ref_head *ht
+ return NULL;
+ }
+
++static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
++ struct btrfs_delayed_ref_node *ins)
++{
++ struct rb_node **p = &root->rb_node;
++ struct rb_node *node = &ins->ref_node;
++ struct rb_node *parent_node = NULL;
++ struct btrfs_delayed_ref_node *entry;
++
++ while (*p) {
++ int comp;
++
++ parent_node = *p;
++ entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
++ ref_node);
++ comp = comp_refs(ins, entry, true);
++ if (comp < 0)
++ p = &(*p)->rb_left;
++ else if (comp > 0)
++ p = &(*p)->rb_right;
++ else
++ return entry;
++ }
++
++ rb_link_node(node, parent_node, p);
++ rb_insert_color(node, root);
++ return NULL;
++}
++
+ /*
+ * find an head entry based on bytenr. This returns the delayed ref
+ * head if it was able to find one, or NULL if nothing was in that spot.
+@@ -216,7 +244,8 @@ static inline void drop_delayed_ref(stru
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+ } else {
+ assert_spin_locked(&head->lock);
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ }
+@@ -234,24 +263,18 @@ static bool merge_ref(struct btrfs_trans
+ u64 seq)
+ {
+ struct btrfs_delayed_ref_node *next;
++ struct rb_node *node = rb_next(&ref->ref_node);
+ bool done = false;
+
+- next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
+- while (!done && &next->list != &head->ref_list) {
++ while (!done && node) {
+ int mod;
+- struct btrfs_delayed_ref_node *next2;
+-
+- next2 = list_next_entry(next, list);
+-
+- if (next == ref)
+- goto next;
+
++ next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
++ node = rb_next(node);
+ if (seq && next->seq >= seq)
+- goto next;
+-
++ break;
+ if (comp_refs(ref, next, false))
+- goto next;
++ break;
+
+ if (ref->action == next->action) {
+ mod = next->ref_mod;
+@@ -275,8 +298,6 @@ static bool merge_ref(struct btrfs_trans
+ WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
+ ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
+ }
+-next:
+- next = next2;
+ }
+
+ return done;
+@@ -288,11 +309,12 @@ void btrfs_merge_delayed_refs(struct btr
+ struct btrfs_delayed_ref_head *head)
+ {
+ struct btrfs_delayed_ref_node *ref;
++ struct rb_node *node;
+ u64 seq = 0;
+
+ assert_spin_locked(&head->lock);
+
+- if (list_empty(&head->ref_list))
++ if (RB_EMPTY_ROOT(&head->ref_tree))
+ return;
+
+ /* We don't have too many refs to merge for data. */
+@@ -309,22 +331,13 @@ void btrfs_merge_delayed_refs(struct btr
+ }
+ spin_unlock(&fs_info->tree_mod_seq_lock);
+
+- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
+- while (&ref->list != &head->ref_list) {
++again:
++ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
++ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ if (seq && ref->seq >= seq)
+- goto next;
+-
+- if (merge_ref(trans, delayed_refs, head, ref, seq)) {
+- if (list_empty(&head->ref_list))
+- break;
+- ref = list_first_entry(&head->ref_list,
+- struct btrfs_delayed_ref_node,
+- list);
+ continue;
+- }
+-next:
+- ref = list_next_entry(ref, list);
++ if (merge_ref(trans, delayed_refs, head, ref, seq))
++ goto again;
+ }
+ }
+
+@@ -407,25 +420,19 @@ again:
+ * Return 0 for insert.
+ * Return >0 for merge.
+ */
+-static int
+-add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
+- struct btrfs_delayed_ref_root *root,
+- struct btrfs_delayed_ref_head *href,
+- struct btrfs_delayed_ref_node *ref)
++static int insert_delayed_ref(struct btrfs_trans_handle *trans,
++ struct btrfs_delayed_ref_root *root,
++ struct btrfs_delayed_ref_head *href,
++ struct btrfs_delayed_ref_node *ref)
+ {
+ struct btrfs_delayed_ref_node *exist;
+ int mod;
+ int ret = 0;
+
+ spin_lock(&href->lock);
+- /* Check whether we can merge the tail node with ref */
+- if (list_empty(&href->ref_list))
+- goto add_tail;
+- exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
+- list);
+- /* No need to compare bytenr nor is_head */
+- if (comp_refs(exist, ref, true))
+- goto add_tail;
++ exist = tree_insert(&href->ref_tree, ref);
++ if (!exist)
++ goto inserted;
+
+ /* Now we are sure we can merge */
+ ret = 1;
+@@ -456,9 +463,7 @@ add_delayed_ref_tail_merge(struct btrfs_
+ drop_delayed_ref(trans, root, href, exist);
+ spin_unlock(&href->lock);
+ return ret;
+-
+-add_tail:
+- list_add_tail(&ref->list, &href->ref_list);
++inserted:
+ if (ref->action == BTRFS_ADD_DELAYED_REF)
+ list_add_tail(&ref->add_list, &href->ref_add_list);
+ atomic_inc(&root->num_entries);
+@@ -610,7 +615,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+ head_ref = btrfs_delayed_node_to_head(ref);
+ head_ref->must_insert_reserved = must_insert_reserved;
+ head_ref->is_data = is_data;
+- INIT_LIST_HEAD(&head_ref->ref_list);
++ head_ref->ref_tree = RB_ROOT;
+ INIT_LIST_HEAD(&head_ref->ref_add_list);
+ head_ref->processing = 0;
+ head_ref->total_ref_mod = count_mod;
+@@ -698,7 +703,7 @@ add_delayed_tree_ref(struct btrfs_fs_inf
+ ref->is_head = 0;
+ ref->in_tree = 1;
+ ref->seq = seq;
+- INIT_LIST_HEAD(&ref->list);
++ RB_CLEAR_NODE(&ref->ref_node);
+ INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_tree_ref(ref);
+@@ -712,7 +717,7 @@ add_delayed_tree_ref(struct btrfs_fs_inf
+
+ trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
+
+- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+
+ /*
+ * XXX: memory should be freed at the same level allocated.
+@@ -755,7 +760,7 @@ add_delayed_data_ref(struct btrfs_fs_inf
+ ref->is_head = 0;
+ ref->in_tree = 1;
+ ref->seq = seq;
+- INIT_LIST_HEAD(&ref->list);
++ RB_CLEAR_NODE(&ref->ref_node);
+ INIT_LIST_HEAD(&ref->add_list);
+
+ full_ref = btrfs_delayed_node_to_data_ref(ref);
+@@ -771,8 +776,7 @@ add_delayed_data_ref(struct btrfs_fs_inf
+
+ trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
+
+- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
+-
++ ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
+ }
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -40,8 +40,7 @@ struct btrfs_delayed_ref_node {
+ */
+ struct rb_node rb_node;
+
+- /*data/tree ref use list, stored in ref_head->ref_list. */
+- struct list_head list;
++ struct rb_node ref_node;
+ /*
+ * If action is BTRFS_ADD_DELAYED_REF, also link this node to
+ * ref_head->ref_add_list, then we do not need to iterate the
+@@ -104,7 +103,7 @@ struct btrfs_delayed_ref_head {
+ struct mutex mutex;
+
+ spinlock_t lock;
+- struct list_head ref_list;
++ struct rb_root ref_tree;
+ /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
+ struct list_head ref_add_list;
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4162,7 +4162,7 @@ static int btrfs_destroy_delayed_refs(st
+
+ while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
+ struct btrfs_delayed_ref_head *head;
+- struct btrfs_delayed_ref_node *tmp;
++ struct rb_node *n;
+ bool pin_bytes = false;
+
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+@@ -4178,10 +4178,12 @@ static int btrfs_destroy_delayed_refs(st
+ continue;
+ }
+ spin_lock(&head->lock);
+- list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
+- list) {
++ while ((n = rb_first(&head->ref_tree)) != NULL) {
++ ref = rb_entry(n, struct btrfs_delayed_ref_node,
++ ref_node);
+ ref->in_tree = 0;
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &head->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ atomic_dec(&delayed_refs->num_entries);
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2434,7 +2434,7 @@ select_delayed_ref(struct btrfs_delayed_
+ {
+ struct btrfs_delayed_ref_node *ref;
+
+- if (list_empty(&head->ref_list))
++ if (RB_EMPTY_ROOT(&head->ref_tree))
+ return NULL;
+
+ /*
+@@ -2447,8 +2447,8 @@ select_delayed_ref(struct btrfs_delayed_
+ return list_first_entry(&head->ref_add_list,
+ struct btrfs_delayed_ref_node, add_list);
+
+- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
+- list);
++ ref = rb_entry(rb_first(&head->ref_tree),
++ struct btrfs_delayed_ref_node, ref_node);
+ ASSERT(list_empty(&ref->add_list));
+ return ref;
+ }
+@@ -2509,7 +2509,7 @@ static int cleanup_ref_head(struct btrfs
+ spin_unlock(&head->lock);
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+- if (!list_empty(&head->ref_list) || head->extent_op) {
++ if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+@@ -2659,7 +2659,8 @@ static noinline int __btrfs_run_delayed_
+
+ actual_count++;
+ ref->in_tree = 0;
+- list_del(&ref->list);
++ rb_erase(&ref->ref_node, &locked_ref->ref_tree);
++ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ /*
+@@ -3071,6 +3072,7 @@ static noinline int check_delayed_ref(st
+ struct btrfs_delayed_data_ref *data_ref;
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_transaction *cur_trans;
++ struct rb_node *node;
+ int ret = 0;
+
+ cur_trans = root->fs_info->running_transaction;
+@@ -3103,7 +3105,12 @@ static noinline int check_delayed_ref(st
+ spin_unlock(&delayed_refs->lock);
+
+ spin_lock(&head->lock);
+- list_for_each_entry(ref, &head->ref_list, list) {
++ /*
++ * XXX: We should replace this with a proper search function in the
++ * future.
++ */
++ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
++ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ /* If it's a shared ref we know a cross reference exists */
+ if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
+ ret = 1;
+@@ -7078,7 +7085,7 @@ static noinline int check_ref_cleanup(st
+ goto out_delayed_unlock;
+
+ spin_lock(&head->lock);
+- if (!list_empty(&head->ref_list))
++ if (!RB_EMPTY_ROOT(&head->ref_tree))
+ goto out;
+
+ if (head->extent_op) {
diff --git a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
index 102cd4d2f5..3a02b8fa0e 100644
--- a/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
+++ b/patches.suse/btrfs-fix-race-condition-between-delayed-refs-and-blockgroup-removal.patch
@@ -56,7 +56,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
-@@ -557,8 +557,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -562,8 +562,9 @@ add_delayed_ref_head(struct btrfs_fs_inf
struct btrfs_delayed_ref_node *ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
@@ -67,15 +67,15 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
{
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_head *head_ref = NULL;
-@@ -610,6 +611,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
+@@ -615,6 +616,7 @@ add_delayed_ref_head(struct btrfs_fs_inf
head_ref = btrfs_delayed_node_to_head(ref);
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
+ head_ref->is_system = is_system;
- INIT_LIST_HEAD(&head_ref->ref_list);
+ head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
head_ref->processing = 0;
-@@ -793,6 +795,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -797,6 +799,7 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
@@ -83,7 +83,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
BUG_ON(extent_op && extent_op->is_data);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
-@@ -821,7 +824,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -825,7 +828,7 @@ int btrfs_add_delayed_tree_ref(struct bt
*/
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
bytenr, num_bytes, 0, 0, action, 0,
@@ -92,7 +92,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action);
-@@ -886,7 +889,7 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -890,7 +893,7 @@ int btrfs_add_delayed_data_ref(struct bt
*/
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
bytenr, num_bytes, ref_root, reserved,
@@ -101,7 +101,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
-@@ -941,9 +944,14 @@ int btrfs_add_delayed_extent_op(struct b
+@@ -945,9 +948,14 @@ int btrfs_add_delayed_extent_op(struct b
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
@@ -119,7 +119,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
return 0;
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
-@@ -144,6 +144,7 @@ struct btrfs_delayed_ref_head {
+@@ -143,6 +143,7 @@ struct btrfs_delayed_ref_head {
*/
unsigned int must_insert_reserved:1;
unsigned int is_data:1;
diff --git a/series.conf b/series.conf
index f02b4fe311..4e91f36add 100644
--- a/series.conf
+++ b/series.conf
@@ -23314,6 +23314,7 @@
patches.suse/0019-btrfs-make-the-delalloc-block-rsv-per-inode.patch
patches.suse/0021-btrfs-switch-args-for-comp_-_refs.patch
patches.suse/0022-btrfs-add-a-comp_refs-helper.patch
+ patches.suse/0001-btrfs-track-refs-in-a-rb_tree-instead-of-a-list.patch
patches.fixes/DLM-fix-double-list_del.patch
patches.fixes/DLM-fix-NULL-pointer-dereference-in-send_to_sock.patch
patches.drivers/0016-bcache-Avoid-nested-function-definition.patch