aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/alloc_background.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-07-11 16:28:54 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:42 -0400
commite6d1161530bcd632ad10b6aa0ad511abb146dbcc (patch)
treedf05a7dd1dc962373bedc7aa4bd47ddd403ad7ac /fs/bcachefs/alloc_background.c
parentf793bc15491c04481b3f12a10ff22a53cd126842 (diff)
bcachefs: Make copygc thread global
Per device copygc threads don't move data to different devices and they make fragmentation works - they don't make much sense anymore. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/alloc_background.c')
-rw-r--r--fs/bcachefs/alloc_background.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index b1a8192f2751..d80e1edf8c44 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -41,29 +41,26 @@ static void pd_controllers_update(struct work_struct *work)
struct bch_fs,
pd_controllers_update);
struct bch_dev *ca;
+ s64 free = 0, fragmented = 0;
unsigned i;
for_each_member_device(ca, c, i) {
struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
- u64 free = bucket_to_sector(ca,
+ free += bucket_to_sector(ca,
__dev_buckets_free(ca, stats)) << 9;
/*
* Bytes of internal fragmentation, which can be
* reclaimed by copy GC
*/
- s64 fragmented = (bucket_to_sector(ca,
+ fragmented += max_t(s64, 0, (bucket_to_sector(ca,
stats.buckets[BCH_DATA_user] +
stats.buckets[BCH_DATA_cached]) -
(stats.sectors[BCH_DATA_user] +
- stats.sectors[BCH_DATA_cached])) << 9;
-
- fragmented = max(0LL, fragmented);
-
- bch2_pd_controller_update(&ca->copygc_pd,
- free, fragmented, -1);
+ stats.sectors[BCH_DATA_cached])) << 9);
}
+ bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
schedule_delayed_work(&c->pd_controllers_update,
c->pd_controllers_update_seconds * HZ);
}
@@ -1191,7 +1188,7 @@ stop:
void bch2_recalc_capacity(struct bch_fs *c)
{
struct bch_dev *ca;
- u64 capacity = 0, reserved_sectors = 0, gc_reserve;
+ u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
unsigned bucket_size_max = 0;
unsigned long ra_pages = 0;
unsigned i, j;
@@ -1234,7 +1231,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
dev_reserve *= ca->mi.bucket_size;
- ca->copygc_threshold = dev_reserve;
+ copygc_threshold += dev_reserve;
capacity += bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket);
@@ -1253,6 +1250,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
reserved_sectors = min(reserved_sectors, capacity);
+ c->copygc_threshold = copygc_threshold;
c->capacity = capacity - reserved_sectors;
c->bucket_size_max = bucket_size_max;
@@ -1312,7 +1310,7 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
bch2_writepoint_stop(c, ca, &c->write_points[i]);
- bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
+ bch2_writepoint_stop(c, ca, &c->copygc_write_point);
bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
bch2_writepoint_stop(c, ca, &c->btree_write_point);