From 1bb9123301834fbeb81de9e52181ba71b06a011a Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 3 Sep 2022 22:24:16 -0400 Subject: bcachefs: Ensure intent locks are marked before taking write locks Locks must be correctly marked for the cycle detector to work. Signed-off-by: Kent Overstreet --- fs/bcachefs/btree_key_cache.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 0f54db0c1b8a..977c523359a5 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -214,9 +214,13 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path) return ERR_PTR(ret); } - ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_write); + path->l[0].b = (void *) ck; + path->l[0].lock_seq = ck->c.lock.state.seq; + mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent); + + ret = bch2_btree_node_lock_write(trans, path, &ck->c); if (unlikely(ret)) { - six_unlock_intent(&ck->c.lock); + btree_node_unlock(trans, path, 0); bkey_cached_move_to_freelist(bc, ck); return ERR_PTR(ret); } @@ -285,6 +289,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path) return ERR_PTR(-ENOMEM); } + mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent); was_new = false; } else { if (path->btree_id == BTREE_ID_subvolumes) @@ -311,6 +316,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path) bkey_cached_free_fast(bc, ck); } + mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED); return NULL; } -- cgit