aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/alloc_background.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-04-16 21:53:23 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:01 -0400
commit6ad060b0eb45d2eaa5411be042bd3b53900f992e (patch)
tree3a97b7284aac84aa554a6cb845187396c5657676 /fs/bcachefs/alloc_background.c
parentdac1525d9c0d6e69da561dbc2becdcd32230b907 (diff)
bcachefs: Allocator thread doesn't need gc_lock anymore
Even with runtime gc (which currently isn't supported), runtime gc no longer clears/recalculates the main set of bucket marks - it allocates and calculates another set, updating the primary at the end. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/alloc_background.c')
-rw-r--r--fs/bcachefs/alloc_background.c26
1 files changed, 5 insertions, 21 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 6c1da7873295..84a560659413 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -209,7 +209,7 @@ void bch2_alloc_pack(struct bch_fs *c,
bch2_alloc_pack_v2(dst, src);
}
-static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
+static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
{
unsigned i, bytes = offsetof(struct bch_alloc, data);
@@ -229,7 +229,7 @@ const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
return "invalid device";
/* allow for unknown fields */
- if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
+ if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
return "incorrect value size";
return NULL;
@@ -293,11 +293,8 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
{
int ret;
- down_read(&c->gc_lock);
ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
NULL, bch2_alloc_read_fn);
- up_read(&c->gc_lock);
-
if (ret) {
bch_err(c, "error reading alloc info: %i", ret);
return ret;
@@ -475,10 +472,8 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
if (available)
break;
- up_read(&c->gc_lock);
schedule();
try_to_freeze();
- down_read(&c->gc_lock);
}
__set_current_state(TASK_RUNNING);
@@ -914,7 +909,6 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
!fifo_full(&ca->free_inc) &&
ca->alloc_heap.used)
ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
- BTREE_INSERT_GC_LOCK_HELD|
(!fifo_empty(&ca->free_inc)
? BTREE_INSERT_NOWAIT : 0));
@@ -1055,18 +1049,12 @@ static int bch2_allocator_thread(void *arg)
if (ret)
goto stop;
- down_read(&c->gc_lock);
-
ret = bch2_invalidate_buckets(c, ca);
- if (ret) {
- up_read(&c->gc_lock);
+ if (ret)
goto stop;
- }
- if (!fifo_empty(&ca->free_inc)) {
- up_read(&c->gc_lock);
+ if (!fifo_empty(&ca->free_inc))
continue;
- }
pr_debug("free_inc now empty");
@@ -1104,14 +1092,10 @@ static int bch2_allocator_thread(void *arg)
* available so we don't spin:
*/
ret = wait_buckets_available(c, ca);
- if (ret) {
- up_read(&c->gc_lock);
+ if (ret)
goto stop;
- }
}
- up_read(&c->gc_lock);
-
pr_debug("%zu buckets to invalidate", nr);
/*