aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_iter.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-02 19:41:47 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:40 -0400
commitf96c0df4dbb0ed845bbc51f341d00bc90368c93c (patch)
treee2e2caa6a99307007fc44f0167ec537896847c33 /fs/bcachefs/btree_iter.c
parent495aabede3ff594c5eda98cb9f4463502cb48cad (diff)
bcachefs: Fix a deadlock in bch2_btree_node_get_sibling()
There was a bad interaction with bch2_btree_iter_set_pos_same_leaf(), which can leave a btree node locked that is just outside iter->pos, breaking the lock ordering checks in __bch2_btree_node_lock(). Ideally we should get rid of this corner case, but for now fix it locally with verbose comments. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_iter.c')
-rw-r--r--fs/bcachefs/btree_iter.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index f7de2def58df..43ea3ceafcf2 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -205,8 +205,9 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
if (!linked->nodes_locked)
continue;
- /* * Must lock btree nodes in key order: */
- if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
+ /* Must lock btree nodes in key order: */
+ if ((cmp_int(iter->btree_id, linked->btree_id) ?:
+ bkey_cmp(pos, linked->pos)) < 0)
ret = false;
/*
@@ -1320,6 +1321,16 @@ void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_
btree_iter_advance_to_pos(iter, l, -1);
+ /*
+ * XXX:
+ * keeping a node locked that's outside (even just outside) iter->pos
+ * breaks __bch2_btree_node_lock(). This seems to only affect
+ * bch2_btree_node_get_sibling so for now it's fixed there, but we
+ * should try to get rid of this corner case.
+ *
+ * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK)
+ */
+
if (bch2_btree_node_iter_end(&l->iter) &&
btree_iter_pos_after_node(iter, l->b))
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
@@ -2195,6 +2206,7 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
bch2_trans_preload_mem(trans, expected_mem_bytes);
#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->pid = current->pid;
mutex_lock(&c->btree_trans_lock);
list_add(&trans->list, &c->btree_trans_list);
mutex_unlock(&c->btree_trans_lock);
@@ -2233,7 +2245,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
mutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) {
- pr_buf(out, "%ps\n", (void *) trans->ip);
+ pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
trans_for_each_iter(trans, iter) {
if (!iter->nodes_locked)