aboutsummaryrefslogtreecommitdiff
path: root/fs/bcachefs/buckets.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-07-21 22:57:20 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:08 -0400
commitc6923995296e5f06a47aa36e684ef0eccd17adea (patch)
treed2d2dd9ad4fc9846b79b56544c7714e397f79579 /fs/bcachefs/buckets.c
parent8bb4dff72d07f4f46e5627870a9614c4cee5a1bb (diff)
bcachefs: don't call bch2_bucket_seq_cleanup from journal_buf_switch
journal_buf_switch is called from the foreground when getting a journal reservation and thus is somewhat latency sensitive; bch2_bucket_seq_cleanup has to run infrequently but is a bit expensive when it does run. Call it from the journal write path instead, and punt the journal write to worqueue context. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/buckets.c')
-rw-r--r--fs/bcachefs/buckets.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index f347c93e0c6e..4a910f773953 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -147,6 +147,7 @@ static void bch2_disk_reservations_verify(struct bch_fs *c, int flags) {}
*/
void bch2_bucket_seq_cleanup(struct bch_fs *c)
{
+ u64 journal_seq = atomic64_read(&c->journal.seq);
u16 last_seq_ondisk = c->journal.last_seq_ondisk;
struct bch_dev *ca;
struct bucket_array *buckets;
@@ -154,6 +155,12 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c)
struct bucket_mark m;
unsigned i;
+ if (journal_seq - c->last_bucket_seq_cleanup <
+ (1U << (BUCKET_JOURNAL_SEQ_BITS - 2)))
+ return;
+
+ c->last_bucket_seq_cleanup = journal_seq;
+
for_each_member_device(ca, c, i) {
down_read(&ca->bucket_lock);
buckets = bucket_array(ca);