diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-04-16 14:29:26 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:09:00 -0400 |
commit | 5e6a668b19614c44819c4b0f7691da92df973384 (patch) | |
tree | 371eef2e556c9cd10161185a9456103e883da372 /fs | |
parent | 96f399d0eed9a63e706c045407675622f32f5a5d (diff) |
bcachefs: Fix transaction restarts due to upgrading of cloned iterators
This fixes a regression from
52d86202fd bcachefs: Improve bch2_btree_iter_traverse_all()
We want to avoid mucking with other iterators in the btree transaction
in operations that are only supposed to be touching individual iterators
- that patch was a cleanup to move lock ordering handling to
bch2_btree_iter_traverse_all(). But it broke upgrading of cloned
iterators.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bcachefs/btree_iter.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index f82976aab7d9..11f7b47e3e7f 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -387,11 +387,44 @@ bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace) bool __bch2_btree_iter_upgrade(struct btree_iter *iter, unsigned new_locks_want) { + struct btree_iter *linked; + EBUG_ON(iter->locks_want >= new_locks_want); iter->locks_want = new_locks_want; - return btree_iter_get_locks(iter, true, true); + if (btree_iter_get_locks(iter, true, true)) + return true; + + /* + * XXX: this is ugly - we'd prefer to not be mucking with other + * iterators in the btree_trans here. + * + * On failure to upgrade the iterator, setting iter->locks_want and + * calling get_locks() is sufficient to make bch2_btree_iter_traverse() + * get the locks we want on transaction restart. + * + * But if this iterator was a clone, on transaction restart what we did + * to this iterator isn't going to be preserved. + * + * Possibly we could add an iterator field for the parent iterator when + * an iterator is a copy - for now, we'll just upgrade any other + * iterators with the same btree id. + * + * The code below used to be needed to ensure ancestor nodes get locked + * before interior nodes - now that's handled by + * bch2_btree_iter_traverse_all(). + */ + trans_for_each_iter(iter->trans, linked) + if (linked != iter && + btree_iter_type(linked) == btree_iter_type(iter) && + linked->btree_id == iter->btree_id && + linked->locks_want < new_locks_want) { + linked->locks_want = new_locks_want; + btree_iter_get_locks(linked, true, false); + } + + return false; } void __bch2_btree_iter_downgrade(struct btree_iter *iter, |