From ae05eb117108428895e75a30e7fe3dbf1016cf93 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:44 -0800 Subject: xfs: speed up xfs_iwalk_adjust_start a little bit Replace the open-coded loop that recomputes freecount with a single call to a bit weight function. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_iwalk.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/fs/xfs/xfs_iwalk.c b/fs/xfs/xfs_iwalk.c index 6d2eb6364867..c1e9c7bcb6a9 100644 --- a/fs/xfs/xfs_iwalk.c +++ b/fs/xfs/xfs_iwalk.c @@ -22,6 +22,7 @@ #include "xfs_trans.h" #include "xfs_pwork.h" #include "xfs_ag.h" +#include "xfs_bit.h" /* * Walking Inodes in the Filesystem @@ -131,21 +132,11 @@ xfs_iwalk_adjust_start( struct xfs_inobt_rec_incore *irec) /* btree record */ { int idx; /* index into inode chunk */ - int i; idx = agino - irec->ir_startino; - /* - * We got a right chunk with some left inodes allocated at it. Grab - * the chunk record. Mark all the uninteresting inodes free because - * they're before our start point. - */ - for (i = 0; i < idx; i++) { - if (XFS_INOBT_MASK(i) & ~irec->ir_free) - irec->ir_freecount++; - } - irec->ir_free |= xfs_inobt_maskn(0, idx); + irec->ir_freecount = hweight64(irec->ir_free); } /* Allocate memory for a walk. */ -- cgit From 8660c7b74aeab67210064a8dc756960b2adfd613 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:45 -0800 Subject: xfs: implement live inode scan for scrub This patch implements a live file scanner for online fsck functions that require the ability to walk a filesystem to gather metadata records and stay informed about metadata changes to files that have already been visited. The iscan structure consists of two inode number cursors: one to track which inode we want to visit next, and a second one to track which inodes have already been visited. This second cursor is key to capturing live updates to files previously scanned while the main thread continues scanning -- any inode greater than this value hasn't been scanned and can go on its way; any other update must be incorporated into the collected data. It is critical for the scanning thraad to hold exclusive access on the inode until after marking the inode visited. This new code is a separate patch from the patchsets adding callers for the sake of enabling the author to move patches around his tree with ease. The intended usage model for this code is roughly: xchk_iscan_start(iscan, 0, 0); while ((error = xchk_iscan_iter(sc, iscan, &ip)) == 1) { xfs_ilock(ip, ...); /* capture inode metadata */ xchk_iscan_mark_visited(iscan, ip); xfs_iunlock(ip, ...); xfs_irele(ip); } xchk_iscan_stop(iscan); if (error) return error; Hook functions for live updates can then do: if (xchk_iscan_want_live_update(...)) /* update the captured inode metadata */ Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/Makefile | 1 + fs/xfs/scrub/iscan.c | 485 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/scrub/iscan.h | 63 +++++++ fs/xfs/scrub/trace.c | 1 + fs/xfs/scrub/trace.h | 106 +++++++++++ 5 files changed, 656 insertions(+) create mode 100644 fs/xfs/scrub/iscan.c create mode 100644 fs/xfs/scrub/iscan.h diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 35a23427055b..17af5bde5308 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -158,6 +158,7 @@ xfs-y += $(addprefix scrub/, \ health.o \ ialloc.o \ inode.o \ + iscan.o \ parent.o \ readdir.o \ refcount.o \ diff --git a/fs/xfs/scrub/iscan.c b/fs/xfs/scrub/iscan.c new file mode 100644 index 000000000000..d13fc3b60f2e --- /dev/null +++ b/fs/xfs/scrub/iscan.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_ialloc_btree.h" +#include "xfs_ag.h" +#include "xfs_error.h" +#include "xfs_bit.h" +#include "xfs_icache.h" +#include "scrub/scrub.h" +#include "scrub/iscan.h" +#include "scrub/common.h" +#include "scrub/trace.h" + +/* + * Live File Scan + * ============== + * + * Live file scans walk every inode in a live filesystem. This is more or + * less like a regular iwalk, except that when we're advancing the scan cursor, + * we must ensure that inodes cannot be added or deleted anywhere between the + * old cursor value and the new cursor value. If we're advancing the cursor + * by one inode, the caller must hold that inode; if we're finding the next + * inode to scan, we must grab the AGI and hold it until we've updated the + * scan cursor. + * + * Callers are expected to use this code to scan all files in the filesystem to + * construct a new metadata index of some kind. The scan races against other + * live updates, which means there must be a provision to update the new index + * when updates are made to inodes that already been scanned. The iscan lock + * can be used in live update hook code to stop the scan and protect this data + * structure. + * + * To keep the new index up to date with other metadata updates being made to + * the live filesystem, it is assumed that the caller will add hooks as needed + * to be notified when a metadata update occurs. The inode scanner must tell + * the hook code when an inode has been visited with xchk_iscan_mark_visit. + * Hook functions can use xchk_iscan_want_live_update to decide if the + * scanner's observations must be updated. + */ + +/* + * Set *cursor to the next allocated inode after whatever it's set to now. + * If there are no more inodes in this AG, cursor is set to NULLAGINO. + */ +STATIC int +xchk_iscan_find_next( + struct xchk_iscan *iscan, + struct xfs_buf *agi_bp, + struct xfs_perag *pag, + xfs_agino_t *cursor) +{ + struct xfs_scrub *sc = iscan->sc; + struct xfs_inobt_rec_incore rec; + struct xfs_btree_cur *cur; + struct xfs_mount *mp = sc->mp; + struct xfs_trans *tp = sc->tp; + xfs_agnumber_t agno = pag->pag_agno; + xfs_agino_t lastino = NULLAGINO; + xfs_agino_t first, last; + xfs_agino_t agino = *cursor; + int has_rec; + int error; + + /* If the cursor is beyond the end of this AG, move to the next one. */ + xfs_agino_range(mp, agno, &first, &last); + if (agino > last) { + *cursor = NULLAGINO; + return 0; + } + + /* + * Look up the inode chunk for the current cursor position. If there + * is no chunk here, we want the next one. + */ + cur = xfs_inobt_init_cursor(pag, tp, agi_bp, XFS_BTNUM_INO); + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_rec); + if (!error && !has_rec) + error = xfs_btree_increment(cur, 0, &has_rec); + for (; !error; error = xfs_btree_increment(cur, 0, &has_rec)) { + xfs_inofree_t allocmask; + + /* + * If we've run out of inobt records in this AG, move the + * cursor on to the next AG and exit. The caller can try + * again with the next AG. + */ + if (!has_rec) { + *cursor = NULLAGINO; + break; + } + + error = xfs_inobt_get_rec(cur, &rec, &has_rec); + if (error) + break; + if (!has_rec) { + error = -EFSCORRUPTED; + break; + } + + /* Make sure that we always move forward. */ + if (lastino != NULLAGINO && + XFS_IS_CORRUPT(mp, lastino >= rec.ir_startino)) { + error = -EFSCORRUPTED; + break; + } + lastino = rec.ir_startino + XFS_INODES_PER_CHUNK - 1; + + /* + * If this record only covers inodes that come before the + * cursor, advance to the next record. + */ + if (rec.ir_startino + XFS_INODES_PER_CHUNK <= agino) + continue; + + /* + * If the incoming lookup put us in the middle of an inobt + * record, mark it and the previous inodes "free" so that the + * search for allocated inodes will start at the cursor. + * We don't care about ir_freecount here. + */ + if (agino >= rec.ir_startino) + rec.ir_free |= xfs_inobt_maskn(0, + agino + 1 - rec.ir_startino); + + /* + * If there are allocated inodes in this chunk, find them + * and update the scan cursor. + */ + allocmask = ~rec.ir_free; + if (hweight64(allocmask) > 0) { + int next = xfs_lowbit64(allocmask); + + ASSERT(next >= 0); + *cursor = rec.ir_startino + next; + break; + } + } + + xfs_btree_del_cursor(cur, error); + return error; +} + +/* + * Advance both the scan and the visited cursors. + * + * The inumber address space for a given filesystem is sparse, which means that + * the scan cursor can jump a long ways in a single iter() call. There are no + * inodes in these sparse areas, so we must move the visited cursor forward at + * the same time so that the scan user can receive live updates for inodes that + * may get created once we release the AGI buffer. + */ +static inline void +xchk_iscan_move_cursor( + struct xchk_iscan *iscan, + xfs_agnumber_t agno, + xfs_agino_t agino) +{ + struct xfs_scrub *sc = iscan->sc; + struct xfs_mount *mp = sc->mp; + + mutex_lock(&iscan->lock); + iscan->cursor_ino = XFS_AGINO_TO_INO(mp, agno, agino); + iscan->__visited_ino = iscan->cursor_ino - 1; + trace_xchk_iscan_move_cursor(iscan); + mutex_unlock(&iscan->lock); +} + +/* + * Prepare to return agno/agino to the iscan caller by moving the lastino + * cursor to the previous inode. Do this while we still hold the AGI so that + * no other threads can create or delete inodes in this AG. + */ +static inline void +xchk_iscan_finish( + struct xchk_iscan *iscan) +{ + mutex_lock(&iscan->lock); + iscan->cursor_ino = NULLFSINO; + + /* All live updates will be applied from now on */ + iscan->__visited_ino = NULLFSINO; + + mutex_unlock(&iscan->lock); +} + +/* + * Advance ino to the next inode that the inobt thinks is allocated, being + * careful to jump to the next AG if we've reached the right end of this AG's + * inode btree. Advancing ino effectively means that we've pushed the inode + * scan forward, so set the iscan cursor to (ino - 1) so that our live update + * predicates will track inode allocations in that part of the inode number + * key space once we release the AGI buffer. + * + * Returns 1 if there's a new inode to examine, 0 if we've run out of inodes, + * -ECANCELED if the live scan aborted, or the usual negative errno. + */ +STATIC int +xchk_iscan_advance( + struct xchk_iscan *iscan, + struct xfs_perag **pagp, + struct xfs_buf **agi_bpp) +{ + struct xfs_scrub *sc = iscan->sc; + struct xfs_mount *mp = sc->mp; + struct xfs_buf *agi_bp; + struct xfs_perag *pag; + xfs_agnumber_t agno; + xfs_agino_t agino; + int ret; + + ASSERT(iscan->cursor_ino >= iscan->__visited_ino); + + do { + if (xchk_iscan_aborted(iscan)) + return -ECANCELED; + + agno = XFS_INO_TO_AGNO(mp, iscan->cursor_ino); + pag = xfs_perag_get(mp, agno); + if (!pag) + return -ECANCELED; + + ret = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp); + if (ret) + goto out_pag; + + agino = XFS_INO_TO_AGINO(mp, iscan->cursor_ino); + ret = xchk_iscan_find_next(iscan, agi_bp, pag, &agino); + if (ret) + goto out_buf; + + if (agino != NULLAGINO) { + /* + * Found the next inode in this AG, so return it along + * with the AGI buffer and the perag structure to + * ensure it cannot go away. + */ + xchk_iscan_move_cursor(iscan, agno, agino); + *agi_bpp = agi_bp; + *pagp = pag; + return 1; + } + + /* + * Did not find any more inodes in this AG, move on to the next + * AG. + */ + xchk_iscan_move_cursor(iscan, ++agno, 0); + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + + trace_xchk_iscan_advance_ag(iscan); + } while (agno < mp->m_sb.sb_agcount); + + xchk_iscan_finish(iscan); + return 0; + +out_buf: + xfs_trans_brelse(sc->tp, agi_bp); +out_pag: + xfs_perag_put(pag); + return ret; +} + +/* + * Grabbing the inode failed, so we need to back up the scan and ask the caller + * to try to _advance the scan again. Returns -EBUSY if we've run out of retry + * opportunities, -ECANCELED if the process has a fatal signal pending, or + * -EAGAIN if we should try again. + */ +STATIC int +xchk_iscan_iget_retry( + struct xchk_iscan *iscan, + bool wait) +{ + ASSERT(iscan->cursor_ino == iscan->__visited_ino + 1); + + if (!iscan->iget_timeout || + time_is_before_jiffies(iscan->__iget_deadline)) + return -EBUSY; + + if (wait) { + unsigned long relax; + + /* + * Sleep for a period of time to let the rest of the system + * catch up. If we return early, someone sent a kill signal to + * the calling process. + */ + relax = msecs_to_jiffies(iscan->iget_retry_delay); + trace_xchk_iscan_iget_retry_wait(iscan); + + if (schedule_timeout_killable(relax) || + xchk_iscan_aborted(iscan)) + return -ECANCELED; + } + + iscan->cursor_ino--; + return -EAGAIN; +} + +/* + * Grab an inode as part of an inode scan. While scanning this inode, the + * caller must ensure that no other threads can modify the inode until a call + * to xchk_iscan_visit succeeds. + * + * Returns 0 and an incore inode; -EAGAIN if the caller should call again + * xchk_iscan_advance; -EBUSY if we couldn't grab an inode; -ECANCELED if + * there's a fatal signal pending; or some other negative errno. + */ +STATIC int +xchk_iscan_iget( + struct xchk_iscan *iscan, + struct xfs_perag *pag, + struct xfs_buf *agi_bp, + struct xfs_inode **ipp) +{ + struct xfs_scrub *sc = iscan->sc; + struct xfs_mount *mp = sc->mp; + int error; + + error = xfs_iget(sc->mp, sc->tp, iscan->cursor_ino, XFS_IGET_NORETRY, + 0, ipp); + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + + trace_xchk_iscan_iget(iscan, error); + + if (error == -ENOENT || error == -EAGAIN) { + /* + * It's possible that this inode has lost all of its links but + * hasn't yet been inactivated. If we don't have a transaction + * or it's not writable, flush the inodegc workers and wait. + */ + xfs_inodegc_flush(mp); + return xchk_iscan_iget_retry(iscan, true); + } + + if (error == -EINVAL) { + /* + * We thought the inode was allocated, but the inode btree + * lookup failed, which means that it was freed since the last + * time we advanced the cursor. Back up and try again. This + * should never happen since still hold the AGI buffer from the + * inobt check, but we need to be careful about infinite loops. + */ + return xchk_iscan_iget_retry(iscan, false); + } + + return error; +} + +/* + * Advance the inode scan cursor to the next allocated inode and return the + * incore inode structure associated with it. + * + * Returns 1 if there's a new inode to examine, 0 if we've run out of inodes, + * -ECANCELED if the live scan aborted, -EBUSY if the incore inode could not be + * grabbed, or the usual negative errno. + * + * If the function returns -EBUSY and the caller can handle skipping an inode, + * it may call this function again to continue the scan with the next allocated + * inode. + */ +int +xchk_iscan_iter( + struct xchk_iscan *iscan, + struct xfs_inode **ipp) +{ + struct xfs_scrub *sc = iscan->sc; + int ret; + + if (iscan->iget_timeout) + iscan->__iget_deadline = jiffies + + msecs_to_jiffies(iscan->iget_timeout); + + do { + struct xfs_buf *agi_bp = NULL; + struct xfs_perag *pag = NULL; + + ret = xchk_iscan_advance(iscan, &pag, &agi_bp); + if (ret != 1) + return ret; + + if (xchk_iscan_aborted(iscan)) { + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + ret = -ECANCELED; + break; + } + + ret = xchk_iscan_iget(iscan, pag, agi_bp, ipp); + } while (ret == -EAGAIN); + + if (!ret) + return 1; + + return ret; +} + + +/* Mark this inode scan finished and release resources. */ +void +xchk_iscan_teardown( + struct xchk_iscan *iscan) +{ + xchk_iscan_finish(iscan); + mutex_destroy(&iscan->lock); +} + +/* + * Set ourselves up to start an inode scan. If the @iget_timeout and + * @iget_retry_delay parameters are set, the scan will try to iget each inode + * for @iget_timeout milliseconds. If an iget call indicates that the inode is + * waiting to be inactivated, the CPU will relax for @iget_retry_delay + * milliseconds after pushing the inactivation workers. + */ +void +xchk_iscan_start( + struct xfs_scrub *sc, + unsigned int iget_timeout, + unsigned int iget_retry_delay, + struct xchk_iscan *iscan) +{ + iscan->sc = sc; + clear_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate); + iscan->iget_timeout = iget_timeout; + iscan->iget_retry_delay = iget_retry_delay; + iscan->__visited_ino = 0; + iscan->cursor_ino = 0; + mutex_init(&iscan->lock); + + trace_xchk_iscan_start(iscan); +} + +/* + * Mark this inode as having been visited. Callers must hold a sufficiently + * exclusive lock on the inode to prevent concurrent modifications. + */ +void +xchk_iscan_mark_visited( + struct xchk_iscan *iscan, + struct xfs_inode *ip) +{ + mutex_lock(&iscan->lock); + iscan->__visited_ino = ip->i_ino; + trace_xchk_iscan_visit(iscan); + mutex_unlock(&iscan->lock); +} + +/* + * Do we need a live update for this inode? This is true if the scanner thread + * has visited this inode and the scan hasn't been aborted due to errors. + * Callers must hold a sufficiently exclusive lock on the inode to prevent + * scanners from reading any inode metadata. + */ +bool +xchk_iscan_want_live_update( + struct xchk_iscan *iscan, + xfs_ino_t ino) +{ + bool ret; + + if (xchk_iscan_aborted(iscan)) + return false; + + mutex_lock(&iscan->lock); + trace_xchk_iscan_want_live_update(iscan, ino); + ret = iscan->__visited_ino >= ino; + mutex_unlock(&iscan->lock); + + return ret; +} diff --git a/fs/xfs/scrub/iscan.h b/fs/xfs/scrub/iscan.h new file mode 100644 index 000000000000..c25f121859ce --- /dev/null +++ b/fs/xfs/scrub/iscan.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef __XFS_SCRUB_ISCAN_H__ +#define __XFS_SCRUB_ISCAN_H__ + +struct xchk_iscan { + struct xfs_scrub *sc; + + /* Lock to protect the scan cursor. */ + struct mutex lock; + + /* This is the inode that will be examined next. */ + xfs_ino_t cursor_ino; + + /* + * This is the last inode that we've successfully scanned, either + * because the caller scanned it, or we moved the cursor past an empty + * part of the inode address space. Scan callers should only use the + * xchk_iscan_visit function to modify this. + */ + xfs_ino_t __visited_ino; + + /* Operational state of the livescan. */ + unsigned long __opstate; + + /* Give up on iterating @cursor_ino if we can't iget it by this time. */ + unsigned long __iget_deadline; + + /* Amount of time (in ms) that we will try to iget an inode. */ + unsigned int iget_timeout; + + /* Wait this many ms to retry an iget. */ + unsigned int iget_retry_delay; +}; + +/* Set if the scan has been aborted due to some event in the fs. */ +#define XCHK_ISCAN_OPSTATE_ABORTED (1) + +static inline bool +xchk_iscan_aborted(const struct xchk_iscan *iscan) +{ + return test_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate); +} + +static inline void +xchk_iscan_abort(struct xchk_iscan *iscan) +{ + set_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate); +} + +void xchk_iscan_start(struct xfs_scrub *sc, unsigned int iget_timeout, + unsigned int iget_retry_delay, struct xchk_iscan *iscan); +void xchk_iscan_teardown(struct xchk_iscan *iscan); + +int xchk_iscan_iter(struct xchk_iscan *iscan, struct xfs_inode **ipp); + +void xchk_iscan_mark_visited(struct xchk_iscan *iscan, struct xfs_inode *ip); +bool xchk_iscan_want_live_update(struct xchk_iscan *iscan, xfs_ino_t ino); + +#endif /* __XFS_SCRUB_ISCAN_H__ */ diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c index d0e24ffaf754..4542eeebab6f 100644 --- a/fs/xfs/scrub/trace.c +++ b/fs/xfs/scrub/trace.c @@ -20,6 +20,7 @@ #include "scrub/xfile.h" #include "scrub/xfarray.h" #include "scrub/quota.h" +#include "scrub/iscan.h" /* Figure out which block the btree cursor was pointing to. */ static inline xfs_fsblock_t diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index ae6b2385a8cb..29026d1d9293 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -16,10 +16,12 @@ #include #include "xfs_bit.h" +struct xfs_scrub; struct xfile; struct xfarray; struct xfarray_sortinfo; struct xchk_dqiter; +struct xchk_iscan; /* * ftrace's __print_symbolic requires that all enum values be wrapped in the @@ -1146,6 +1148,110 @@ TRACE_EVENT(xchk_rtsum_record_free, ); #endif /* CONFIG_XFS_RT */ +DECLARE_EVENT_CLASS(xchk_iscan_class, + TP_PROTO(struct xchk_iscan *iscan), + TP_ARGS(iscan), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, cursor) + __field(xfs_ino_t, visited) + ), + TP_fast_assign( + __entry->dev = iscan->sc->mp->m_super->s_dev; + __entry->cursor = iscan->cursor_ino; + __entry->visited = iscan->__visited_ino; + ), + TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->cursor, + __entry->visited) +) +#define DEFINE_ISCAN_EVENT(name) \ +DEFINE_EVENT(xchk_iscan_class, name, \ + TP_PROTO(struct xchk_iscan *iscan), \ + TP_ARGS(iscan)) +DEFINE_ISCAN_EVENT(xchk_iscan_move_cursor); +DEFINE_ISCAN_EVENT(xchk_iscan_visit); +DEFINE_ISCAN_EVENT(xchk_iscan_advance_ag); +DEFINE_ISCAN_EVENT(xchk_iscan_start); + +DECLARE_EVENT_CLASS(xchk_iscan_ino_class, + TP_PROTO(struct xchk_iscan *iscan, xfs_ino_t ino), + TP_ARGS(iscan, ino), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, cursor) + __field(xfs_ino_t, visited) + __field(xfs_ino_t, ino) + ), + TP_fast_assign( + __entry->dev = iscan->sc->mp->m_super->s_dev; + __entry->cursor = iscan->cursor_ino; + __entry->visited = iscan->__visited_ino; + __entry->ino = ino; + ), + TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx ino 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->cursor, + __entry->visited, + __entry->ino) +) +#define DEFINE_ISCAN_INO_EVENT(name) \ +DEFINE_EVENT(xchk_iscan_ino_class, name, \ + TP_PROTO(struct xchk_iscan *iscan, xfs_ino_t ino), \ + TP_ARGS(iscan, ino)) +DEFINE_ISCAN_INO_EVENT(xchk_iscan_want_live_update); + +TRACE_EVENT(xchk_iscan_iget, + TP_PROTO(struct xchk_iscan *iscan, int error), + TP_ARGS(iscan, error), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, cursor) + __field(xfs_ino_t, visited) + __field(int, error) + ), + TP_fast_assign( + __entry->dev = iscan->sc->mp->m_super->s_dev; + __entry->cursor = iscan->cursor_ino; + __entry->visited = iscan->__visited_ino; + __entry->error = error; + ), + TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx error %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->cursor, + __entry->visited, + __entry->error) +); + +TRACE_EVENT(xchk_iscan_iget_retry_wait, + TP_PROTO(struct xchk_iscan *iscan), + TP_ARGS(iscan), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, cursor) + __field(xfs_ino_t, visited) + __field(unsigned int, retry_delay) + __field(unsigned long, remaining) + __field(unsigned int, iget_timeout) + ), + TP_fast_assign( + __entry->dev = iscan->sc->mp->m_super->s_dev; + __entry->cursor = iscan->cursor_ino; + __entry->visited = iscan->__visited_ino; + __entry->retry_delay = iscan->iget_retry_delay; + __entry->remaining = jiffies_to_msecs(iscan->__iget_deadline - jiffies); + __entry->iget_timeout = iscan->iget_timeout; + ), + TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx remaining %lu timeout %u delay %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->cursor, + __entry->visited, + __entry->remaining, + __entry->iget_timeout, + __entry->retry_delay) +); + /* repair tracepoints */ #if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) -- cgit From 4e98cc905c0fec337416e9fd7ca4f75607a6de99 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:45 -0800 Subject: xfs: allow scrub to hook metadata updates in other writers Certain types of filesystem metadata can only be checked by scanning every file in the entire filesystem. Specific examples of this include quota counts, file link counts, and reverse mappings of file extents. Directory and parent pointer reconstruction may also fall into this category. File scanning is much trickier than scanning AG metadata because we have to take inode locks in the same order as the rest of [VX]FS, we can't be holding buffer locks when we do that, and scanning the whole filesystem takes time. Earlier versions of the online repair patchset relied heavily on fsfreeze as a means to quiesce the filesystem so that we could take locks in the proper order without worrying about concurrent updates from other writers. Reviewers of those patches opined that freezing the entire fs to check and repair something was not sufficiently better than unmounting to run fsck offline. I don't agree with that 100%, but the message was clear: find a way to repair things that minimizes the quiet period where nobody can write to the filesystem. Generally, building btree indexes online can be split into two phases: a collection phase where we compute the records that will be put into the new btree; and a construction phase, where we construct the physical btree blocks and persist them. While it's simple to hold resource locks for the entirety of the two phases to ensure that the new index is consistent with the rest of the system, we don't need to hold resource locks during the collection phase if we have a means to receive live updates of other work going on elsewhere in the system. The goal of this patch, then, is to enable online fsck to learn about metadata updates going on in other threads while it constructs a shadow copy of the metadata records to verify or correct the real metadata. To minimize the overhead when online fsck isn't running, we use srcu notifiers because they prioritize fast access to the notifier call chain (particularly when the chain is empty) at a cost to configuring notifiers. Online fsck should be relatively infrequent, so this is acceptable. The intended usage model is fairly simple. Code that modifies a metadata structure of interest should declare a xfs_hook_chain structure in some well defined place, and call xfs_hook_call whenever an update happens. Online fsck code should define a struct notifier_block and use xfs_hook_add to attach the block to the chain, along with a function to be called. This function should synchronize with the fsck scanner to update whatever in-memory data the scanner is collecting. When finished, xfs_hook_del removes the notifier from the list and waits for them all to complete. Originally, I selected srcu notifiers over blocking notifiers to implement live hooks because they seemed to have fewer impacts to scalability. The per-call cost of srcu_notifier_call_chain is higher (19ns) than blocking_notifier_ (4ns) in the single threaded case, but blocking notifiers use an rwsem to stabilize the list. Cacheline bouncing for that rwsem is costly to runtime code when there are a lot of CPUs running regular filesystem operations. If there are no hooks installed, this is a total waste of CPU time. Therefore, I stuck with srcu notifiers, despite trading off single threaded performance for multithreaded performance. I also wasn't thrilled with the very high teardown time for srcu notifiers, since the caller has to wait for the next rcu grace period. This can take a long time if there are a lot of CPUs. Then I discovered the jump label implementation of static keys. Jump labels use kernel code patching to replace a branch with a nop sled when the key is disabled. IOWs, they can eliminate the overhead of _call_chain when there are no hooks enabled. This makes blocking notifiers competitive again -- scrub runs faster because teardown of the chain is a lot cheaper, and runtime code only pays the rwsem locking overhead when scrub is actually running. With jump labels enabled, calls to empty notifier chains are elided from the call sites when there are no hooks registered, which means that the overhead is 0.36ns when fsck is not running. This is perfect for most of the architectures that XFS is expected to run on (e.g. x86, powerpc, arm64, s390x, riscv). For architectures that don't support jump labels (e.g. m68k) the runtime overhead of checking the static key is an atomic counter read. This isn't great, but it's still cheaper than taking a shared rwsem. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/Kconfig | 5 +++++ fs/xfs/Makefile | 1 + fs/xfs/xfs_hooks.c | 52 +++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_hooks.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_linux.h | 1 + 5 files changed, 124 insertions(+) create mode 100644 fs/xfs/xfs_hooks.c create mode 100644 fs/xfs/xfs_hooks.h diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 567fb37274d3..fa7eb3e2a248 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig @@ -124,11 +124,16 @@ config XFS_DRAIN_INTENTS bool select JUMP_LABEL if HAVE_ARCH_JUMP_LABEL +config XFS_LIVE_HOOKS + bool + select JUMP_LABEL if HAVE_ARCH_JUMP_LABEL + config XFS_ONLINE_SCRUB bool "XFS online metadata check support" default n depends on XFS_FS depends on TMPFS && SHMEM + select XFS_LIVE_HOOKS select XFS_DRAIN_INTENTS help If you say Y here you will be able to check metadata on a diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 17af5bde5308..4597c0f19e8e 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -136,6 +136,7 @@ xfs-$(CONFIG_FS_DAX) += xfs_notify_failure.o endif xfs-$(CONFIG_XFS_DRAIN_INTENTS) += xfs_drain.o +xfs-$(CONFIG_XFS_LIVE_HOOKS) += xfs_hooks.o # online scrub/repair ifeq ($(CONFIG_XFS_ONLINE_SCRUB),y) diff --git a/fs/xfs/xfs_hooks.c b/fs/xfs/xfs_hooks.c new file mode 100644 index 000000000000..a58d1de2d37d --- /dev/null +++ b/fs/xfs/xfs_hooks.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_ag.h" +#include "xfs_trace.h" + +/* Initialize a notifier chain. */ +void +xfs_hooks_init( + struct xfs_hooks *chain) +{ + BLOCKING_INIT_NOTIFIER_HEAD(&chain->head); +} + +/* Make it so a function gets called whenever we hit a certain hook point. */ +int +xfs_hooks_add( + struct xfs_hooks *chain, + struct xfs_hook *hook) +{ + ASSERT(hook->nb.notifier_call != NULL); + BUILD_BUG_ON(offsetof(struct xfs_hook, nb) != 0); + + return blocking_notifier_chain_register(&chain->head, &hook->nb); +} + +/* Remove a previously installed hook. */ +void +xfs_hooks_del( + struct xfs_hooks *chain, + struct xfs_hook *hook) +{ + blocking_notifier_chain_unregister(&chain->head, &hook->nb); +} + +/* Call a hook. Returns the NOTIFY_* value returned by the last hook. */ +int +xfs_hooks_call( + struct xfs_hooks *chain, + unsigned long val, + void *priv) +{ + return blocking_notifier_call_chain(&chain->head, val, priv); +} diff --git a/fs/xfs/xfs_hooks.h b/fs/xfs/xfs_hooks.h new file mode 100644 index 000000000000..60b8a5831536 --- /dev/null +++ b/fs/xfs/xfs_hooks.h @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong + */ +#ifndef XFS_HOOKS_H_ +#define XFS_HOOKS_H_ + +#ifdef CONFIG_XFS_LIVE_HOOKS +struct xfs_hooks { + struct blocking_notifier_head head; +}; + +/* + * If jump labels are enabled in Kconfig, the static key uses nop sleds and + * code patching to eliminate the overhead of taking the rwsem in + * blocking_notifier_call_chain when there are no hooks configured. If not, + * the static key per-call overhead is an atomic read. Most arches that can + * handle XFS also support jump labels. + * + * Note: Patching the kernel code requires taking the cpu hotplug lock. Other + * parts of the kernel allocate memory with that lock held, which means that + * XFS callers cannot hold any locks that might be used by memory reclaim or + * writeback when calling the static_branch_{inc,dec} functions. + */ +# define DEFINE_STATIC_XFS_HOOK_SWITCH(name) \ + static DEFINE_STATIC_KEY_FALSE(name) +# define xfs_hooks_switch_on(name) static_branch_inc(name) +# define xfs_hooks_switch_off(name) static_branch_dec(name) +# define xfs_hooks_switched_on(name) static_branch_unlikely(name) + +struct xfs_hook { + /* This must come at the start of the structure. */ + struct notifier_block nb; +}; + +typedef int (*xfs_hook_fn_t)(struct xfs_hook *hook, unsigned long action, + void *data); + +void xfs_hooks_init(struct xfs_hooks *chain); +int xfs_hooks_add(struct xfs_hooks *chain, struct xfs_hook *hook); +void xfs_hooks_del(struct xfs_hooks *chain, struct xfs_hook *hook); +int xfs_hooks_call(struct xfs_hooks *chain, unsigned long action, + void *priv); + +static inline void xfs_hook_setup(struct xfs_hook *hook, notifier_fn_t fn) +{ + hook->nb.notifier_call = fn; + hook->nb.priority = 0; +} + +#else + +struct xfs_hooks { /* empty */ }; + +# define DEFINE_STATIC_XFS_HOOK_SWITCH(name) +# define xfs_hooks_switch_on(name) ((void)0) +# define xfs_hooks_switch_off(name) ((void)0) +# define xfs_hooks_switched_on(name) (false) + +# define xfs_hooks_init(chain) ((void)0) +# define xfs_hooks_call(chain, val, priv) (NOTIFY_DONE) +#endif + +#endif /* XFS_HOOKS_H_ */ diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 75245932e0ad..8f07c9f6157f 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -81,6 +81,7 @@ typedef __u32 xfs_nlink_t; #include "xfs_buf.h" #include "xfs_message.h" #include "xfs_drain.h" +#include "xfs_hooks.h" #ifdef __BIG_ENDIAN #define XFS_NATIVE_HOST 1 -- cgit From c473a3320be32b2273042bfdf0fe8db5da7ae5d0 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:46 -0800 Subject: xfs: stagger the starting AG of scrub iscans to reduce contention Online directory and parent repairs on parent-pointer equipped filesystems have shown that starting a large number of parallel iscans causes a lot of AGI buffer contention. Try to reduce this by making it so that iscans scan wrap around the end of the filesystem, and using a rotor to stagger where each scanner begins. Surprisingly, this boosts CPU utilization (on the author's test machines) from effectively single-threaded to 160%. Not great, but see the next patch. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/scrub/iscan.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++------ fs/xfs/scrub/iscan.h | 7 +++++ fs/xfs/scrub/trace.h | 7 +++-- 3 files changed, 89 insertions(+), 12 deletions(-) diff --git a/fs/xfs/scrub/iscan.c b/fs/xfs/scrub/iscan.c index d13fc3b60f2e..3179c299c77f 100644 --- a/fs/xfs/scrub/iscan.c +++ b/fs/xfs/scrub/iscan.c @@ -170,10 +170,24 @@ xchk_iscan_move_cursor( { struct xfs_scrub *sc = iscan->sc; struct xfs_mount *mp = sc->mp; + xfs_ino_t cursor, visited; + + BUILD_BUG_ON(XFS_MAXINUMBER == NULLFSINO); + + /* + * Special-case ino == 0 here so that we never set visited_ino to + * NULLFSINO when wrapping around EOFS, for that will let through all + * live updates. + */ + cursor = XFS_AGINO_TO_INO(mp, agno, agino); + if (cursor == 0) + visited = XFS_MAXINUMBER; + else + visited = cursor - 1; mutex_lock(&iscan->lock); - iscan->cursor_ino = XFS_AGINO_TO_INO(mp, agno, agino); - iscan->__visited_ino = iscan->cursor_ino - 1; + iscan->cursor_ino = cursor; + iscan->__visited_ino = visited; trace_xchk_iscan_move_cursor(iscan); mutex_unlock(&iscan->lock); } @@ -257,12 +271,13 @@ xchk_iscan_advance( * Did not find any more inodes in this AG, move on to the next * AG. */ - xchk_iscan_move_cursor(iscan, ++agno, 0); + agno = (agno + 1) % mp->m_sb.sb_agcount; + xchk_iscan_move_cursor(iscan, agno, 0); xfs_trans_brelse(sc->tp, agi_bp); xfs_perag_put(pag); trace_xchk_iscan_advance_ag(iscan); - } while (agno < mp->m_sb.sb_agcount); + } while (iscan->cursor_ino != iscan->scan_start_ino); xchk_iscan_finish(iscan); return 0; @@ -420,6 +435,23 @@ xchk_iscan_teardown( mutex_destroy(&iscan->lock); } +/* Pick an AG from which to start a scan. */ +static inline xfs_ino_t +xchk_iscan_rotor( + struct xfs_mount *mp) +{ + static atomic_t agi_rotor; + unsigned int r = atomic_inc_return(&agi_rotor) - 1; + + /* + * Rotoring *backwards* through the AGs, so we add one here before + * subtracting from the agcount to arrive at an AG number. + */ + r = (r % mp->m_sb.sb_agcount) + 1; + + return XFS_AGINO_TO_INO(mp, mp->m_sb.sb_agcount - r, 0); +} + /* * Set ourselves up to start an inode scan. If the @iget_timeout and * @iget_retry_delay parameters are set, the scan will try to iget each inode @@ -434,15 +466,20 @@ xchk_iscan_start( unsigned int iget_retry_delay, struct xchk_iscan *iscan) { + xfs_ino_t start_ino; + + start_ino = xchk_iscan_rotor(sc->mp); + iscan->sc = sc; clear_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate); iscan->iget_timeout = iget_timeout; iscan->iget_retry_delay = iget_retry_delay; - iscan->__visited_ino = 0; - iscan->cursor_ino = 0; + iscan->__visited_ino = start_ino; + iscan->cursor_ino = start_ino; + iscan->scan_start_ino = start_ino; mutex_init(&iscan->lock); - trace_xchk_iscan_start(iscan); + trace_xchk_iscan_start(iscan, start_ino); } /* @@ -471,15 +508,45 @@ xchk_iscan_want_live_update( struct xchk_iscan *iscan, xfs_ino_t ino) { - bool ret; + bool ret = false; if (xchk_iscan_aborted(iscan)) return false; mutex_lock(&iscan->lock); + trace_xchk_iscan_want_live_update(iscan, ino); - ret = iscan->__visited_ino >= ino; - mutex_unlock(&iscan->lock); + /* Scan is finished, caller should receive all updates. */ + if (iscan->__visited_ino == NULLFSINO) { + ret = true; + goto unlock; + } + + /* + * The visited cursor hasn't yet wrapped around the end of the FS. If + * @ino is inside the starred range, the caller should receive updates: + * + * 0 ------------ S ************ V ------------ EOFS + */ + if (iscan->scan_start_ino <= iscan->__visited_ino) { + if (ino >= iscan->scan_start_ino && + ino <= iscan->__visited_ino) + ret = true; + + goto unlock; + } + + /* + * The visited cursor wrapped around the end of the FS. If @ino is + * inside the starred range, the caller should receive updates: + * + * 0 ************ V ------------ S ************ EOFS + */ + if (ino >= iscan->scan_start_ino || ino <= iscan->__visited_ino) + ret = true; + +unlock: + mutex_unlock(&iscan->lock); return ret; } diff --git a/fs/xfs/scrub/iscan.h b/fs/xfs/scrub/iscan.h index c25f121859ce..0db97d98ee8d 100644 --- a/fs/xfs/scrub/iscan.h +++ b/fs/xfs/scrub/iscan.h @@ -12,6 +12,13 @@ struct xchk_iscan { /* Lock to protect the scan cursor. */ struct mutex lock; + /* + * This is the first inode in the inumber address space that we + * examined. When the scan wraps around back to here, the scan is + * finished. + */ + xfs_ino_t scan_start_ino; + /* This is the inode that will be examined next. */ xfs_ino_t cursor_ino; diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 29026d1d9293..5a70968bc3e2 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -1173,25 +1173,27 @@ DEFINE_EVENT(xchk_iscan_class, name, \ DEFINE_ISCAN_EVENT(xchk_iscan_move_cursor); DEFINE_ISCAN_EVENT(xchk_iscan_visit); DEFINE_ISCAN_EVENT(xchk_iscan_advance_ag); -DEFINE_ISCAN_EVENT(xchk_iscan_start); DECLARE_EVENT_CLASS(xchk_iscan_ino_class, TP_PROTO(struct xchk_iscan *iscan, xfs_ino_t ino), TP_ARGS(iscan, ino), TP_STRUCT__entry( __field(dev_t, dev) + __field(xfs_ino_t, startino) __field(xfs_ino_t, cursor) __field(xfs_ino_t, visited) __field(xfs_ino_t, ino) ), TP_fast_assign( __entry->dev = iscan->sc->mp->m_super->s_dev; + __entry->startino = iscan->scan_start_ino; __entry->cursor = iscan->cursor_ino; __entry->visited = iscan->__visited_ino; __entry->ino = ino; ), - TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx ino 0x%llx", + TP_printk("dev %d:%d iscan start 0x%llx cursor 0x%llx visited 0x%llx ino 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->startino, __entry->cursor, __entry->visited, __entry->ino) @@ -1201,6 +1203,7 @@ DEFINE_EVENT(xchk_iscan_ino_class, name, \ TP_PROTO(struct xchk_iscan *iscan, xfs_ino_t ino), \ TP_ARGS(iscan, ino)) DEFINE_ISCAN_INO_EVENT(xchk_iscan_want_live_update); +DEFINE_ISCAN_INO_EVENT(xchk_iscan_start); TRACE_EVENT(xchk_iscan_iget, TP_PROTO(struct xchk_iscan *iscan, int error), -- cgit From a7a686cb07203fc42a38e66324241b7f2fe4fae2 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:47 -0800 Subject: xfs: cache a bunch of inodes for repair scans After observing xfs_scrub taking forever to rebuild parent pointers on a pptrs enabled filesystem, I decided to profile what the system was doing. It turns out that when there are a lot of threads trying to scan the filesystem, most of our time is spent contending on AGI buffer locks. Given that we're walking the inobt records anyway, we can often tell ahead of time when there's a bunch of (up to 64) consecutive inodes that we could grab all at once. Do this to amortize the cost of taking the AGI lock across as many inodes as we possibly can. On the author's system this seems to improve parallel throughput from barely one and a half cores to slightly sublinear scaling. The obvious antipattern here of course is where the freemask has every other bit set (e.g. all 0xA's) Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/scrub/iscan.c | 159 +++++++++++++++++++++++++++++++++++++++++---------- fs/xfs/scrub/iscan.h | 7 +++ fs/xfs/scrub/trace.h | 23 ++++++++ 3 files changed, 159 insertions(+), 30 deletions(-) diff --git a/fs/xfs/scrub/iscan.c b/fs/xfs/scrub/iscan.c index 3179c299c77f..d1c33aba1f10 100644 --- a/fs/xfs/scrub/iscan.c +++ b/fs/xfs/scrub/iscan.c @@ -60,6 +60,7 @@ xchk_iscan_find_next( struct xchk_iscan *iscan, struct xfs_buf *agi_bp, struct xfs_perag *pag, + xfs_inofree_t *allocmaskp, xfs_agino_t *cursor) { struct xfs_scrub *sc = iscan->sc; @@ -145,6 +146,7 @@ xchk_iscan_find_next( ASSERT(next >= 0); *cursor = rec.ir_startino + next; + *allocmaskp = allocmask >> next; break; } } @@ -225,7 +227,8 @@ STATIC int xchk_iscan_advance( struct xchk_iscan *iscan, struct xfs_perag **pagp, - struct xfs_buf **agi_bpp) + struct xfs_buf **agi_bpp, + xfs_inofree_t *allocmaskp) { struct xfs_scrub *sc = iscan->sc; struct xfs_mount *mp = sc->mp; @@ -251,7 +254,8 @@ xchk_iscan_advance( goto out_pag; agino = XFS_INO_TO_AGINO(mp, iscan->cursor_ino); - ret = xchk_iscan_find_next(iscan, agi_bp, pag, &agino); + ret = xchk_iscan_find_next(iscan, agi_bp, pag, allocmaskp, + &agino); if (ret) goto out_buf; @@ -331,29 +335,35 @@ xchk_iscan_iget_retry( * caller must ensure that no other threads can modify the inode until a call * to xchk_iscan_visit succeeds. * - * Returns 0 and an incore inode; -EAGAIN if the caller should call again - * xchk_iscan_advance; -EBUSY if we couldn't grab an inode; -ECANCELED if - * there's a fatal signal pending; or some other negative errno. + * Returns the number of incore inodes grabbed; -EAGAIN if the caller should + * call again xchk_iscan_advance; -EBUSY if we couldn't grab an inode; + * -ECANCELED if there's a fatal signal pending; or some other negative errno. */ STATIC int xchk_iscan_iget( struct xchk_iscan *iscan, struct xfs_perag *pag, struct xfs_buf *agi_bp, - struct xfs_inode **ipp) + xfs_inofree_t allocmask) { struct xfs_scrub *sc = iscan->sc; struct xfs_mount *mp = sc->mp; + xfs_ino_t ino = iscan->cursor_ino; + unsigned int idx = 0; int error; - error = xfs_iget(sc->mp, sc->tp, iscan->cursor_ino, XFS_IGET_NORETRY, - 0, ipp); - xfs_trans_brelse(sc->tp, agi_bp); - xfs_perag_put(pag); + ASSERT(iscan->__inodes[0] == NULL); + + /* Fill the first slot in the inode array. */ + error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0, + &iscan->__inodes[idx]); trace_xchk_iscan_iget(iscan, error); if (error == -ENOENT || error == -EAGAIN) { + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + /* * It's possible that this inode has lost all of its links but * hasn't yet been inactivated. If we don't have a transaction @@ -364,6 +374,9 @@ xchk_iscan_iget( } if (error == -EINVAL) { + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + /* * We thought the inode was allocated, but the inode btree * lookup failed, which means that it was freed since the last @@ -374,25 +387,47 @@ xchk_iscan_iget( return xchk_iscan_iget_retry(iscan, false); } - return error; + if (error) { + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + return error; + } + idx++; + ino++; + allocmask >>= 1; + + /* + * Now that we've filled the first slot in __inodes, try to fill the + * rest of the batch with consecutively ordered inodes. to reduce the + * number of _iter calls. If we can't get an inode, we stop and return + * what we have. + */ + for (; allocmask & 1; allocmask >>= 1, ino++, idx++) { + ASSERT(iscan->__inodes[idx] == NULL); + + error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0, + &iscan->__inodes[idx]); + if (error) + break; + + mutex_lock(&iscan->lock); + iscan->cursor_ino = ino; + mutex_unlock(&iscan->lock); + } + + trace_xchk_iscan_iget_batch(sc->mp, iscan, idx); + xfs_trans_brelse(sc->tp, agi_bp); + xfs_perag_put(pag); + return idx; } /* - * Advance the inode scan cursor to the next allocated inode and return the - * incore inode structure associated with it. - * - * Returns 1 if there's a new inode to examine, 0 if we've run out of inodes, - * -ECANCELED if the live scan aborted, -EBUSY if the incore inode could not be - * grabbed, or the usual negative errno. - * - * If the function returns -EBUSY and the caller can handle skipping an inode, - * it may call this function again to continue the scan with the next allocated - * inode. + * Advance the inode scan cursor to the next allocated inode and return up to + * 64 consecutive allocated inodes starting with the cursor position. */ -int -xchk_iscan_iter( - struct xchk_iscan *iscan, - struct xfs_inode **ipp) +STATIC int +xchk_iscan_iter_batch( + struct xchk_iscan *iscan) { struct xfs_scrub *sc = iscan->sc; int ret; @@ -404,8 +439,9 @@ xchk_iscan_iter( do { struct xfs_buf *agi_bp = NULL; struct xfs_perag *pag = NULL; + xfs_inofree_t allocmask = 0; - ret = xchk_iscan_advance(iscan, &pag, &agi_bp); + ret = xchk_iscan_advance(iscan, &pag, &agi_bp, &allocmask); if (ret != 1) return ret; @@ -416,21 +452,74 @@ xchk_iscan_iter( break; } - ret = xchk_iscan_iget(iscan, pag, agi_bp, ipp); + ret = xchk_iscan_iget(iscan, pag, agi_bp, allocmask); } while (ret == -EAGAIN); - if (!ret) - return 1; - return ret; } +/* + * Advance the inode scan cursor to the next allocated inode and return the + * incore inode structure associated with it. + * + * Returns 1 if there's a new inode to examine, 0 if we've run out of inodes, + * -ECANCELED if the live scan aborted, -EBUSY if the incore inode could not be + * grabbed, or the usual negative errno. + * + * If the function returns -EBUSY and the caller can handle skipping an inode, + * it may call this function again to continue the scan with the next allocated + * inode. + */ +int +xchk_iscan_iter( + struct xchk_iscan *iscan, + struct xfs_inode **ipp) +{ + unsigned int i; + int error; + + /* Find a cached inode, or go get another batch. */ + for (i = 0; i < XFS_INODES_PER_CHUNK; i++) { + if (iscan->__inodes[i]) + goto foundit; + } + + error = xchk_iscan_iter_batch(iscan); + if (error <= 0) + return error; + + ASSERT(iscan->__inodes[0] != NULL); + i = 0; + +foundit: + /* Give the caller our reference. */ + *ipp = iscan->__inodes[i]; + iscan->__inodes[i] = NULL; + return 1; +} + +/* Clean up an xfs_iscan_iter call by dropping any inodes that we still hold. */ +void +xchk_iscan_iter_finish( + struct xchk_iscan *iscan) +{ + struct xfs_scrub *sc = iscan->sc; + unsigned int i; + + for (i = 0; i < XFS_INODES_PER_CHUNK; i++) { + if (iscan->__inodes[i]) { + xchk_irele(sc, iscan->__inodes[i]); + iscan->__inodes[i] = NULL; + } + } +} /* Mark this inode scan finished and release resources. */ void xchk_iscan_teardown( struct xchk_iscan *iscan) { + xchk_iscan_iter_finish(iscan); xchk_iscan_finish(iscan); mutex_destroy(&iscan->lock); } @@ -478,6 +567,7 @@ xchk_iscan_start( iscan->cursor_ino = start_ino; iscan->scan_start_ino = start_ino; mutex_init(&iscan->lock); + memset(iscan->__inodes, 0, sizeof(iscan->__inodes)); trace_xchk_iscan_start(iscan, start_ino); } @@ -523,6 +613,15 @@ xchk_iscan_want_live_update( goto unlock; } + /* + * No inodes have been visited yet, so the visited cursor points at the + * start of the scan range. The caller should not receive any updates. + */ + if (iscan->scan_start_ino == iscan->__visited_ino) { + ret = false; + goto unlock; + } + /* * The visited cursor hasn't yet wrapped around the end of the FS. If * @ino is inside the starred range, the caller should receive updates: diff --git a/fs/xfs/scrub/iscan.h b/fs/xfs/scrub/iscan.h index 0db97d98ee8d..f7317af807dd 100644 --- a/fs/xfs/scrub/iscan.h +++ b/fs/xfs/scrub/iscan.h @@ -41,6 +41,12 @@ struct xchk_iscan { /* Wait this many ms to retry an iget. */ unsigned int iget_retry_delay; + + /* + * The scan grabs batches of inodes and stashes them here before + * handing them out with _iter. + */ + struct xfs_inode *__inodes[XFS_INODES_PER_CHUNK]; }; /* Set if the scan has been aborted due to some event in the fs. */ @@ -63,6 +69,7 @@ void xchk_iscan_start(struct xfs_scrub *sc, unsigned int iget_timeout, void xchk_iscan_teardown(struct xchk_iscan *iscan); int xchk_iscan_iter(struct xchk_iscan *iscan, struct xfs_inode **ipp); +void xchk_iscan_iter_finish(struct xchk_iscan *iscan); void xchk_iscan_mark_visited(struct xchk_iscan *iscan, struct xfs_inode *ip); bool xchk_iscan_want_live_update(struct xchk_iscan *iscan, xfs_ino_t ino); diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 5a70968bc3e2..38d3356466cd 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -1227,6 +1227,29 @@ TRACE_EVENT(xchk_iscan_iget, __entry->error) ); +TRACE_EVENT(xchk_iscan_iget_batch, + TP_PROTO(struct xfs_mount *mp, struct xchk_iscan *iscan, + unsigned int nr), + TP_ARGS(mp, iscan, nr), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, cursor) + __field(xfs_ino_t, visited) + __field(unsigned int, nr) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->cursor = iscan->cursor_ino; + __entry->visited = iscan->__visited_ino; + __entry->nr = nr; + ), + TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx nr %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->cursor, + __entry->visited, + __entry->nr) +); + TRACE_EVENT(xchk_iscan_iget_retry_wait, TP_PROTO(struct xchk_iscan *iscan), TP_ARGS(iscan), -- cgit From 82334a79c6eb1c18b4b38285cf2693bbc5db3933 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:48 -0800 Subject: xfs: iscan batching should handle unallocated inodes too The inode scanner tries to reduce contention on the AGI header buffer lock by grabbing references to consecutive allocated inodes. Batching stops as soon as we encounter an unallocated inode. This is unfortunate because in the worst case performance collapses to the old "one at a time" behavior if every other inode is free. This is correct behavior, but we could do better. Unallocated inodes by definition have nothing to scan, which means the iscan can ignore them as long as someone ensures that the scan data will reflect another thread allocating the inode and adding interesting metadata to that inode. That mechanism is, of course, the live update hooks. Therefore, extend the batching mechanism to track unallocated inodes adjacent to the scan cursor. The _want_live_update predicate can tell the caller's live update hook to incorporate all live updates to what the scanner thinks is an unallocated inode if (after dropping the AGI) some other thread allocates one of those inodes and begins using it. Note that we cannot just copy the ir_free bitmap into the scan cursor because the batching stops if iget says the inode is in an intermediate state (e.g. on the inactivation list) and cannot be igrabbed. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/scrub/iscan.c | 107 ++++++++++++++++++++++++++++++++++++++++++++++----- fs/xfs/scrub/iscan.h | 6 ++- fs/xfs/scrub/trace.h | 21 ++++++++-- 3 files changed, 119 insertions(+), 15 deletions(-) diff --git a/fs/xfs/scrub/iscan.c b/fs/xfs/scrub/iscan.c index d1c33aba1f10..327f7fb83958 100644 --- a/fs/xfs/scrub/iscan.c +++ b/fs/xfs/scrub/iscan.c @@ -61,7 +61,8 @@ xchk_iscan_find_next( struct xfs_buf *agi_bp, struct xfs_perag *pag, xfs_inofree_t *allocmaskp, - xfs_agino_t *cursor) + xfs_agino_t *cursor, + uint8_t *nr_inodesp) { struct xfs_scrub *sc = iscan->sc; struct xfs_inobt_rec_incore rec; @@ -147,6 +148,7 @@ xchk_iscan_find_next( ASSERT(next >= 0); *cursor = rec.ir_startino + next; *allocmaskp = allocmask >> next; + *nr_inodesp = XFS_INODES_PER_CHUNK - next; break; } } @@ -228,7 +230,8 @@ xchk_iscan_advance( struct xchk_iscan *iscan, struct xfs_perag **pagp, struct xfs_buf **agi_bpp, - xfs_inofree_t *allocmaskp) + xfs_inofree_t *allocmaskp, + uint8_t *nr_inodesp) { struct xfs_scrub *sc = iscan->sc; struct xfs_mount *mp = sc->mp; @@ -255,7 +258,7 @@ xchk_iscan_advance( agino = XFS_INO_TO_AGINO(mp, iscan->cursor_ino); ret = xchk_iscan_find_next(iscan, agi_bp, pag, allocmaskp, - &agino); + &agino, nr_inodesp); if (ret) goto out_buf; @@ -344,12 +347,14 @@ xchk_iscan_iget( struct xchk_iscan *iscan, struct xfs_perag *pag, struct xfs_buf *agi_bp, - xfs_inofree_t allocmask) + xfs_inofree_t allocmask, + uint8_t nr_inodes) { struct xfs_scrub *sc = iscan->sc; struct xfs_mount *mp = sc->mp; xfs_ino_t ino = iscan->cursor_ino; unsigned int idx = 0; + unsigned int i; int error; ASSERT(iscan->__inodes[0] == NULL); @@ -399,10 +404,28 @@ xchk_iscan_iget( /* * Now that we've filled the first slot in __inodes, try to fill the * rest of the batch with consecutively ordered inodes. to reduce the - * number of _iter calls. If we can't get an inode, we stop and return - * what we have. + * number of _iter calls. Make a bitmap of unallocated inodes from the + * zeroes in the inuse bitmap; these inodes will not be scanned, but + * the _want_live_update predicate will pass through all live updates. + * + * If we can't iget an allocated inode, stop and return what we have. */ - for (; allocmask & 1; allocmask >>= 1, ino++, idx++) { + mutex_lock(&iscan->lock); + iscan->__batch_ino = ino - 1; + iscan->__skipped_inomask = 0; + mutex_unlock(&iscan->lock); + + for (i = 1; i < nr_inodes; i++, ino++, allocmask >>= 1) { + if (!(allocmask & 1)) { + ASSERT(!(iscan->__skipped_inomask & (1ULL << i))); + + mutex_lock(&iscan->lock); + iscan->cursor_ino = ino; + iscan->__skipped_inomask |= (1ULL << i); + mutex_unlock(&iscan->lock); + continue; + } + ASSERT(iscan->__inodes[idx] == NULL); error = xfs_iget(sc->mp, sc->tp, ino, XFS_IGET_NORETRY, 0, @@ -413,14 +436,42 @@ xchk_iscan_iget( mutex_lock(&iscan->lock); iscan->cursor_ino = ino; mutex_unlock(&iscan->lock); + idx++; } - trace_xchk_iscan_iget_batch(sc->mp, iscan, idx); + trace_xchk_iscan_iget_batch(sc->mp, iscan, nr_inodes, idx); xfs_trans_brelse(sc->tp, agi_bp); xfs_perag_put(pag); return idx; } +/* + * Advance the visit cursor to reflect skipped inodes beyond whatever we + * scanned. + */ +STATIC void +xchk_iscan_finish_batch( + struct xchk_iscan *iscan) +{ + xfs_ino_t highest_skipped; + + mutex_lock(&iscan->lock); + + if (iscan->__batch_ino != NULLFSINO) { + highest_skipped = iscan->__batch_ino + + xfs_highbit64(iscan->__skipped_inomask); + iscan->__visited_ino = max(iscan->__visited_ino, + highest_skipped); + + trace_xchk_iscan_skip(iscan); + } + + iscan->__batch_ino = NULLFSINO; + iscan->__skipped_inomask = 0; + + mutex_unlock(&iscan->lock); +} + /* * Advance the inode scan cursor to the next allocated inode and return up to * 64 consecutive allocated inodes starting with the cursor position. @@ -432,6 +483,8 @@ xchk_iscan_iter_batch( struct xfs_scrub *sc = iscan->sc; int ret; + xchk_iscan_finish_batch(iscan); + if (iscan->iget_timeout) iscan->__iget_deadline = jiffies + msecs_to_jiffies(iscan->iget_timeout); @@ -440,8 +493,10 @@ xchk_iscan_iter_batch( struct xfs_buf *agi_bp = NULL; struct xfs_perag *pag = NULL; xfs_inofree_t allocmask = 0; + uint8_t nr_inodes = 0; - ret = xchk_iscan_advance(iscan, &pag, &agi_bp, &allocmask); + ret = xchk_iscan_advance(iscan, &pag, &agi_bp, &allocmask, + &nr_inodes); if (ret != 1) return ret; @@ -452,7 +507,7 @@ xchk_iscan_iter_batch( break; } - ret = xchk_iscan_iget(iscan, pag, agi_bp, allocmask); + ret = xchk_iscan_iget(iscan, pag, agi_bp, allocmask, nr_inodes); } while (ret == -EAGAIN); return ret; @@ -559,6 +614,9 @@ xchk_iscan_start( start_ino = xchk_iscan_rotor(sc->mp); + iscan->__batch_ino = NULLFSINO; + iscan->__skipped_inomask = 0; + iscan->sc = sc; clear_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate); iscan->iget_timeout = iget_timeout; @@ -587,6 +645,26 @@ xchk_iscan_mark_visited( mutex_unlock(&iscan->lock); } +/* + * Did we skip this inode because it wasn't allocated when we loaded the batch? + * If so, it is newly allocated and will not be scanned. All live updates to + * this inode must be passed to the caller to maintain scan correctness. + */ +static inline bool +xchk_iscan_skipped( + const struct xchk_iscan *iscan, + xfs_ino_t ino) +{ + if (iscan->__batch_ino == NULLFSINO) + return false; + if (ino < iscan->__batch_ino) + return false; + if (ino >= iscan->__batch_ino + XFS_INODES_PER_CHUNK) + return false; + + return iscan->__skipped_inomask & (1ULL << (ino - iscan->__batch_ino)); +} + /* * Do we need a live update for this inode? This is true if the scanner thread * has visited this inode and the scan hasn't been aborted due to errors. @@ -622,6 +700,15 @@ xchk_iscan_want_live_update( goto unlock; } + /* + * This inode was not allocated at the time of the iscan batch. + * The caller should receive all updates. + */ + if (xchk_iscan_skipped(iscan, ino)) { + ret = true; + goto unlock; + } + /* * The visited cursor hasn't yet wrapped around the end of the FS. If * @ino is inside the starred range, the caller should receive updates: diff --git a/fs/xfs/scrub/iscan.h b/fs/xfs/scrub/iscan.h index f7317af807dd..365d54e35cd9 100644 --- a/fs/xfs/scrub/iscan.h +++ b/fs/xfs/scrub/iscan.h @@ -44,8 +44,12 @@ struct xchk_iscan { /* * The scan grabs batches of inodes and stashes them here before - * handing them out with _iter. + * handing them out with _iter. Unallocated inodes are set in the + * mask so that all updates to that inode are selected for live + * update propagation. */ + xfs_ino_t __batch_ino; + xfs_inofree_t __skipped_inomask; struct xfs_inode *__inodes[XFS_INODES_PER_CHUNK]; }; diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 38d3356466cd..829c90da59c7 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -1172,6 +1172,7 @@ DEFINE_EVENT(xchk_iscan_class, name, \ TP_ARGS(iscan)) DEFINE_ISCAN_EVENT(xchk_iscan_move_cursor); DEFINE_ISCAN_EVENT(xchk_iscan_visit); +DEFINE_ISCAN_EVENT(xchk_iscan_skip); DEFINE_ISCAN_EVENT(xchk_iscan_advance_ag); DECLARE_EVENT_CLASS(xchk_iscan_ino_class, @@ -1229,25 +1230,37 @@ TRACE_EVENT(xchk_iscan_iget, TRACE_EVENT(xchk_iscan_iget_batch, TP_PROTO(struct xfs_mount *mp, struct xchk_iscan *iscan, - unsigned int nr), - TP_ARGS(mp, iscan, nr), + unsigned int nr, unsigned int avail), + TP_ARGS(mp, iscan, nr, avail), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, cursor) __field(xfs_ino_t, visited) __field(unsigned int, nr) + __field(unsigned int, avail) + __field(unsigned int, unavail) + __field(xfs_ino_t, batch_ino) + __field(unsigned long long, skipmask) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->cursor = iscan->cursor_ino; __entry->visited = iscan->__visited_ino; __entry->nr = nr; + __entry->avail = avail; + __entry->unavail = hweight64(iscan->__skipped_inomask); + __entry->batch_ino = iscan->__batch_ino; + __entry->skipmask = iscan->__skipped_inomask; ), - TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx nr %d", + TP_printk("dev %d:%d iscan cursor 0x%llx visited 0x%llx batchino 0x%llx skipmask 0x%llx nr %u avail %u unavail %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->cursor, __entry->visited, - __entry->nr) + __entry->batch_ino, + __entry->skipmask, + __entry->nr, + __entry->avail, + __entry->unavail) ); TRACE_EVENT(xchk_iscan_iget_retry_wait, -- cgit From e99bfc9e687e208d4ba7e85167b8753e80cf4169 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:48 -0800 Subject: xfs: create a static name for the dot entry too Create an xfs_name_dot object so that upcoming scrub code can compare against that. Offline repair already has such an object, so we're really just hoisting it to the kernel. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/libxfs/xfs_dir2.c | 6 ++++++ fs/xfs/libxfs/xfs_dir2.h | 1 + 2 files changed, 7 insertions(+) diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index 8c9403b33191..9018abb38d7f 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c @@ -25,6 +25,12 @@ const struct xfs_name xfs_name_dotdot = { .type = XFS_DIR3_FT_DIR, }; +const struct xfs_name xfs_name_dot = { + .name = (const unsigned char *)".", + .len = 1, + .type = XFS_DIR3_FT_DIR, +}; + /* * Convert inode mode to directory entry filetype */ diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 19af22a16c41..7d7cd8d808e4 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -22,6 +22,7 @@ struct xfs_dir3_icfree_hdr; struct xfs_dir3_icleaf_hdr; extern const struct xfs_name xfs_name_dotdot; +extern const struct xfs_name xfs_name_dot; /* * Convert inode mode to directory entry filetype -- cgit From d9c0775897147bab54410611ac2659a7477c770c Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:49 -0800 Subject: xfs: create a predicate to determine if two xfs_names are the same Create a simple predicate to determine if two xfs_names are the same objects or have the exact same name. The comparison is always case sensitive. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/libxfs/xfs_dir2.h | 12 ++++++++++++ fs/xfs/scrub/dir.c | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 7d7cd8d808e4..8497d041f316 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -24,6 +24,18 @@ struct xfs_dir3_icleaf_hdr; extern const struct xfs_name xfs_name_dotdot; extern const struct xfs_name xfs_name_dot; +static inline bool +xfs_dir2_samename( + const struct xfs_name *n1, + const struct xfs_name *n2) +{ + if (n1 == n2) + return true; + if (n1->len != n2->len) + return false; + return !memcmp(n1->name, n2->name, n1->len); +} + /* * Convert inode mode to directory entry filetype */ diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c index d86ab51af928..076a310b8eb0 100644 --- a/fs/xfs/scrub/dir.c +++ b/fs/xfs/scrub/dir.c @@ -93,11 +93,11 @@ xchk_dir_actor( return -ECANCELED; } - if (!strncmp(".", name->name, name->len)) { + if (xfs_dir2_samename(name, &xfs_name_dot)) { /* If this is "." then check that the inum matches the dir. */ if (ino != dp->i_ino) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); - } else if (!strncmp("..", name->name, name->len)) { + } else if (xfs_dir2_samename(name, &xfs_name_dotdot)) { /* * If this is ".." in the root inode, check that the inum * matches this dir. -- cgit From 3c79e6a87221e063064e3680946a8b4bcd9fe78d Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:50 -0800 Subject: xfs: create a macro for decoding ftypes in tracepoints Create the XFS_DIR3_FTYPE_STR macro so that we can report ftype as strings instead of numbers in tracepoints. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/libxfs/xfs_da_format.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h index 24f9d1461f9a..060e5c96b70f 100644 --- a/fs/xfs/libxfs/xfs_da_format.h +++ b/fs/xfs/libxfs/xfs_da_format.h @@ -159,6 +159,17 @@ struct xfs_da3_intnode { #define XFS_DIR3_FT_MAX 9 +#define XFS_DIR3_FTYPE_STR \ + { XFS_DIR3_FT_UNKNOWN, "unknown" }, \ + { XFS_DIR3_FT_REG_FILE, "file" }, \ + { XFS_DIR3_FT_DIR, "directory" }, \ + { XFS_DIR3_FT_CHRDEV, "char" }, \ + { XFS_DIR3_FT_BLKDEV, "block" }, \ + { XFS_DIR3_FT_FIFO, "fifo" }, \ + { XFS_DIR3_FT_SOCK, "sock" }, \ + { XFS_DIR3_FT_SYMLINK, "symlink" }, \ + { XFS_DIR3_FT_WHT, "whiteout" } + /* * Byte offset in data block and shortform entry. */ -- cgit From 5385f1a60d4e5b73e8ecd2757865352b68f54fb9 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 22 Feb 2024 12:30:51 -0800 Subject: xfs: repair file modes by scanning for a dirent pointing to us Repair might encounter an inode with a totally garbage i_mode. To fix this problem, we have to figure out if the file was a regular file, a directory, or a special file. One way to figure this out is to check if there are any directories with entries pointing down to the busted file. This patch recovers the file mode by scanning every directory entry on the filesystem to see if there are any that point to the busted file. If the ftype of all such dirents are consistent, the mode is recovered from the ftype. If no dirents are found, the file becomes a regular file. In all cases, ACLs are canceled and the file is made accessible only by root. A previous patch attempted to guess the mode by reading the beginning of the file data. This was rejected by Christoph on the grounds that we cannot trust user-controlled data blocks. Users do not have direct control over the ondisk contents of directory entries, so this method should be much safer. If all the dirents have the same ftype, then we can translate that back into an S_IFMT flag and fix the file. If not, reset the mode to S_IFREG. Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/xfs/scrub/inode_repair.c | 236 ++++++++++++++++++++++++++++++++++++++++++-- fs/xfs/scrub/iscan.c | 29 ++++++ fs/xfs/scrub/iscan.h | 3 + fs/xfs/scrub/trace.c | 1 + fs/xfs/scrub/trace.h | 49 +++++++++ 5 files changed, 312 insertions(+), 6 deletions(-) diff --git a/fs/xfs/scrub/inode_repair.c b/fs/xfs/scrub/inode_repair.c index 0ca62d59f84a..7e859c412a5b 100644 --- a/fs/xfs/scrub/inode_repair.c +++ b/fs/xfs/scrub/inode_repair.c @@ -43,6 +43,8 @@ #include "scrub/btree.h" #include "scrub/trace.h" #include "scrub/repair.h" +#include "scrub/iscan.h" +#include "scrub/readdir.h" /* * Inode Record Repair @@ -126,6 +128,10 @@ struct xrep_inode { /* Must we remove all access from this file? */ bool zap_acls; + + /* Inode scanner to see if we can find the ftype from dirents */ + struct xchk_iscan ftype_iscan; + uint8_t alleged_ftype; }; /* @@ -227,26 +233,233 @@ xrep_dinode_header( dip->di_gen = cpu_to_be32(sc->sm->sm_gen); } -/* Turn di_mode into /something/ recognizable. */ -STATIC void +/* + * If this directory entry points to the scrub target inode, then the directory + * we're scanning is the parent of the scrub target inode. + */ +STATIC int +xrep_dinode_findmode_dirent( + struct xfs_scrub *sc, + struct xfs_inode *dp, + xfs_dir2_dataptr_t dapos, + const struct xfs_name *name, + xfs_ino_t ino, + void *priv) +{ + struct xrep_inode *ri = priv; + int error = 0; + + if (xchk_should_terminate(ri->sc, &error)) + return error; + + if (ino != sc->sm->sm_ino) + return 0; + + /* Ignore garbage directory entry names. */ + if (name->len == 0 || !xfs_dir2_namecheck(name->name, name->len)) + return -EFSCORRUPTED; + + /* Don't pick up dot or dotdot entries; we only want child dirents. */ + if (xfs_dir2_samename(name, &xfs_name_dotdot) || + xfs_dir2_samename(name, &xfs_name_dot)) + return 0; + + /* + * Uhoh, more than one parent for this inode and they don't agree on + * the file type? + */ + if (ri->alleged_ftype != XFS_DIR3_FT_UNKNOWN && + ri->alleged_ftype != name->type) { + trace_xrep_dinode_findmode_dirent_inval(ri->sc, dp, name->type, + ri->alleged_ftype); + return -EFSCORRUPTED; + } + + /* We found a potential parent; remember the ftype. */ + trace_xrep_dinode_findmode_dirent(ri->sc, dp, name->type); + ri->alleged_ftype = name->type; + return 0; +} + +/* + * If this is a directory, walk the dirents looking for any that point to the + * scrub target inode. + */ +STATIC int +xrep_dinode_findmode_walk_directory( + struct xrep_inode *ri, + struct xfs_inode *dp) +{ + struct xfs_scrub *sc = ri->sc; + unsigned int lock_mode; + int error = 0; + + /* + * Scan the directory to see if there it contains an entry pointing to + * the directory that we are repairing. + */ + lock_mode = xfs_ilock_data_map_shared(dp); + + /* + * If this directory is known to be sick, we cannot scan it reliably + * and must abort. + */ + if (xfs_inode_has_sickness(dp, XFS_SICK_INO_CORE | + XFS_SICK_INO_BMBTD | + XFS_SICK_INO_DIR)) { + error = -EFSCORRUPTED; + goto out_unlock; + } + + /* + * We cannot complete our parent pointer scan if a directory looks as + * though it has been zapped by the inode record repair code. + */ + if (xchk_dir_looks_zapped(dp)) { + error = -EBUSY; + goto out_unlock; + } + + error = xchk_dir_walk(sc, dp, xrep_dinode_findmode_dirent, ri); + if (error) + goto out_unlock; + +out_unlock: + xfs_iunlock(dp, lock_mode); + return error; +} + +/* + * Try to find the mode of the inode being repaired by looking for directories + * that point down to this file. + */ +STATIC int +xrep_dinode_find_mode( + struct xrep_inode *ri, + uint16_t *mode) +{ + struct xfs_scrub *sc = ri->sc; + struct xfs_inode *dp; + int error; + + /* No ftype means we have no other metadata to consult. */ + if (!xfs_has_ftype(sc->mp)) { + *mode = S_IFREG; + return 0; + } + + /* + * Scan all directories for parents that might point down to this + * inode. Skip the inode being repaired during the scan since it + * cannot be its own parent. Note that we still hold the AGI locked + * so there's a real possibility that _iscan_iter can return EBUSY. + */ + xchk_iscan_start(sc, 5000, 100, &ri->ftype_iscan); + ri->ftype_iscan.skip_ino = sc->sm->sm_ino; + ri->alleged_ftype = XFS_DIR3_FT_UNKNOWN; + while ((error = xchk_iscan_iter(&ri->ftype_iscan, &dp)) == 1) { + if (S_ISDIR(VFS_I(dp)->i_mode)) + error = xrep_dinode_findmode_walk_directory(ri, dp); + xchk_iscan_mark_visited(&ri->ftype_iscan, dp); + xchk_irele(sc, dp); + if (error < 0) + break; + if (xchk_should_terminate(sc, &error)) + break; + } + xchk_iscan_iter_finish(&ri->ftype_iscan); + xchk_iscan_teardown(&ri->ftype_iscan); + + if (error == -EBUSY) { + if (ri->alleged_ftype != XFS_DIR3_FT_UNKNOWN) { + /* + * If we got an EBUSY after finding at least one + * dirent, that means the scan found an inode on the + * inactivation list and could not open it. Accept the + * alleged ftype and install a new mode below. + */ + error = 0; + } else if (!(sc->flags & XCHK_TRY_HARDER)) { + /* + * Otherwise, retry the operation one time to see if + * the reason for the delay is an inode from the same + * cluster buffer waiting on the inactivation list. + */ + error = -EDEADLOCK; + } + } + if (error) + return error; + + /* + * Convert the discovered ftype into the file mode. If all else fails, + * return S_IFREG. + */ + switch (ri->alleged_ftype) { + case XFS_DIR3_FT_DIR: + *mode = S_IFDIR; + break; + case XFS_DIR3_FT_WHT: + case XFS_DIR3_FT_CHRDEV: + *mode = S_IFCHR; + break; + case XFS_DIR3_FT_BLKDEV: + *mode = S_IFBLK; + break; + case XFS_DIR3_FT_FIFO: + *mode = S_IFIFO; + break; + case XFS_DIR3_FT_SOCK: + *mode = S_IFSOCK; + break; + case XFS_DIR3_FT_SYMLINK: + *mode = S_IFLNK; + break; + default: + *mode = S_IFREG; + break; + } + return 0; +} + +/* Turn di_mode into /something/ recognizable. Returns true if we succeed. */ +STATIC int xrep_dinode_mode( struct xrep_inode *ri, struct xfs_dinode *dip) { struct xfs_scrub *sc = ri->sc; uint16_t mode = be16_to_cpu(dip->di_mode); + int error; trace_xrep_dinode_mode(sc, dip); if (mode == 0 || xfs_mode_to_ftype(mode) != XFS_DIR3_FT_UNKNOWN) - return; + return 0; + + /* Try to fix the mode. If we cannot, then leave everything alone. */ + error = xrep_dinode_find_mode(ri, &mode); + switch (error) { + case -EINTR: + case -EBUSY: + case -EDEADLOCK: + /* temporary failure or fatal signal */ + return error; + case 0: + /* found mode */ + break; + default: + /* some other error, assume S_IFREG */ + mode = S_IFREG; + break; + } /* bad mode, so we set it to a file that only root can read */ - mode = S_IFREG; dip->di_mode = cpu_to_be16(mode); dip->di_uid = 0; dip->di_gid = 0; ri->zap_acls = true; + return 0; } /* Fix any conflicting flags that the verifiers complain about. */ @@ -1107,12 +1320,15 @@ xrep_dinode_core( /* Fix everything the verifier will complain about. */ dip = xfs_buf_offset(bp, ri->imap.im_boffset); xrep_dinode_header(sc, dip); - xrep_dinode_mode(ri, dip); + iget_error = xrep_dinode_mode(ri, dip); + if (iget_error) + goto write; xrep_dinode_flags(sc, dip, ri->rt_extents > 0); xrep_dinode_size(ri, dip); xrep_dinode_extsize_hints(sc, dip); xrep_dinode_zap_forks(ri, dip); +write: /* Write out the inode. */ trace_xrep_dinode_fixed(sc, dip); xfs_dinode_calc_crc(sc->mp, dip); @@ -1128,7 +1344,8 @@ xrep_dinode_core( * accessing the inode. If iget fails, we still need to commit the * changes. */ - iget_error = xchk_iget(sc, ino, &sc->ip); + if (!iget_error) + iget_error = xchk_iget(sc, ino, &sc->ip); if (!iget_error) xchk_ilock(sc, XFS_IOLOCK_EXCL); @@ -1496,6 +1713,13 @@ xrep_inode( ASSERT(ri != NULL); error = xrep_dinode_problems(ri); + if (error == -EBUSY) { + /* + * Directory scan to recover inode mode encountered a + * busy inode, so we did not continue repairing things. + */ + return 0; + } if (error) return error; diff --git a/fs/xfs/scrub/iscan.c b/fs/xfs/scrub/iscan.c index 327f7fb83958..17af89b519b3 100644 --- a/fs/xfs/scrub/iscan.c +++ b/fs/xfs/scrub/iscan.c @@ -51,6 +51,32 @@ * scanner's observations must be updated. */ +/* + * If the inobt record @rec covers @iscan->skip_ino, mark the inode free so + * that the scan ignores that inode. + */ +STATIC void +xchk_iscan_mask_skipino( + struct xchk_iscan *iscan, + struct xfs_perag *pag, + struct xfs_inobt_rec_incore *rec, + xfs_agino_t lastrecino) +{ + struct xfs_scrub *sc = iscan->sc; + struct xfs_mount *mp = sc->mp; + xfs_agnumber_t skip_agno = XFS_INO_TO_AGNO(mp, iscan->skip_ino); + xfs_agnumber_t skip_agino = XFS_INO_TO_AGINO(mp, iscan->skip_ino); + + if (pag->pag_agno != skip_agno) + return; + if (skip_agino < rec->ir_startino) + return; + if (skip_agino > lastrecino) + return; + + rec->ir_free |= xfs_inobt_maskn(skip_agino - rec->ir_startino, 1); +} + /* * Set *cursor to the next allocated inode after whatever it's set to now. * If there are no more inodes in this AG, cursor is set to NULLAGINO. @@ -127,6 +153,9 @@ xchk_iscan_find_next( if (rec.ir_startino + XFS_INODES_PER_CHUNK <= agino) continue; + if (iscan->skip_ino) + xchk_iscan_mask_skipino(iscan, pag, &rec, lastino); + /* * If the incoming lookup put us in the middle of an inobt * record, mark it and the previous inodes "free" so that the diff --git a/fs/xfs/scrub/iscan.h b/fs/xfs/scrub/iscan.h index 365d54e35cd9..71f657552dfa 100644 --- a/fs/xfs/scrub/iscan.h +++ b/fs/xfs/scrub/iscan.h @@ -22,6 +22,9 @@ struct xchk_iscan { /* This is the inode that will be examined next. */ xfs_ino_t cursor_ino; + /* If nonzero and non-NULL, skip this inode when scanning. */ + xfs_ino_t skip_ino; + /* * This is the last inode that we've successfully scanned, either * because the caller scanned it, or we moved the cursor past an empty diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c index 4542eeebab6f..5ed75cc33b92 100644 --- a/fs/xfs/scrub/trace.c +++ b/fs/xfs/scrub/trace.c @@ -16,6 +16,7 @@ #include "xfs_rtbitmap.h" #include "xfs_quota.h" #include "xfs_quota_defs.h" +#include "xfs_da_format.h" #include "scrub/scrub.h" #include "scrub/xfile.h" #include "scrub/xfarray.h" diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 829c90da59c7..9aba60c61880 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -1817,6 +1817,55 @@ TRACE_EVENT(xrep_dinode_count_rmaps, __entry->attr_extents) ); +TRACE_EVENT(xrep_dinode_findmode_dirent, + TP_PROTO(struct xfs_scrub *sc, struct xfs_inode *dp, + unsigned int ftype), + TP_ARGS(sc, dp, ftype), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_ino_t, parent_ino) + __field(unsigned int, ftype) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->ino = sc->sm->sm_ino; + __entry->parent_ino = dp->i_ino; + __entry->ftype = ftype; + ), + TP_printk("dev %d:%d ino 0x%llx parent_ino 0x%llx ftype '%s'", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->parent_ino, + __print_symbolic(__entry->ftype, XFS_DIR3_FTYPE_STR)) +); + +TRACE_EVENT(xrep_dinode_findmode_dirent_inval, + TP_PROTO(struct xfs_scrub *sc, struct xfs_inode *dp, + unsigned int ftype, unsigned int found_ftype), + TP_ARGS(sc, dp, ftype, found_ftype), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_ino_t, parent_ino) + __field(unsigned int, ftype) + __field(unsigned int, found_ftype) + ), + TP_fast_assign( + __entry->dev = sc->mp->m_super->s_dev; + __entry->ino = sc->sm->sm_ino; + __entry->parent_ino = dp->i_ino; + __entry->ftype = ftype; + __entry->found_ftype = found_ftype; + ), + TP_printk("dev %d:%d ino 0x%llx parent_ino 0x%llx ftype '%s' found_ftype '%s'", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->parent_ino, + __print_symbolic(__entry->ftype, XFS_DIR3_FTYPE_STR), + __print_symbolic(__entry->found_ftype, XFS_DIR3_FTYPE_STR)) +); + TRACE_EVENT(xrep_cow_mark_file_range, TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t startblock, xfs_fileoff_t startoff, xfs_filblks_t blockcount), -- cgit