From 302436c27c3fc61c1dab83f4c995dec12eb43161 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 11 Apr 2023 19:00:21 -0700 Subject: xfs: fix an inode lookup race in xchk_get_inode In commit d658e, we tried to improve the robustnes of xchk_get_inode in the face of EINVAL returns from iget by calling xfs_imap to see if the inobt itself thinks that the inode is allocated. Unfortunately, that commit didn't consider the possibility that the inode gets allocated after iget but before imap. In this case, the imap call will succeed, but we turn that into a corruption error and tell userspace the inode is corrupt. Avoid this false corruption report by grabbing the AGI header and retrying the iget before calling imap. If the iget succeeds, we can proceed with the usual scrub-by-handle code. Fix all the incorrect comments too, since unreadable/corrupt inodes no longer result in EINVAL returns. Fixes: d658e72b4a09 ("xfs: distinguish between corrupt inode and invalid inum in xfs_scrub_get_inode") Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner --- fs/xfs/xfs_icache.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/xfs/xfs_icache.c') diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index c9a7e270a428..351849fc18ff 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -767,7 +767,8 @@ again: return 0; out_error_or_again: - if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { + if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) && + error == -EAGAIN) { delay(1); goto again; } -- cgit From 03e0add80f4cf3f7393edb574eeb3a89a1db7758 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 2 May 2023 09:16:05 +1000 Subject: xfs: explicitly specify cpu when forcing inodegc delayed work to run immediately I've been noticing odd racing behavior in the inodegc code that could only be explained by one cpu adding an inode to its inactivation llist at the same time that another cpu is processing that cpu's llist. Preemption is disabled between get/put_cpu_ptr, so the only explanation is scheduler mayhem. I inserted the following debug code into xfs_inodegc_worker (see the next patch): ASSERT(gc->cpu == smp_processor_id()); This assertion tripped during overnight tests on the arm64 machines, but curiously not on x86_64. I think we haven't observed any resource leaks here because the lockfree list code can handle simultaneous llist_add and llist_del_all functions operating on the same list. However, the whole point of having percpu inodegc lists is to take advantage of warm memory caches by inactivating inodes on the last processor to touch the inode. The incorrect scheduling seems to occur after an inodegc worker is subjected to mod_delayed_work(). This wraps mod_delayed_work_on with WORK_CPU_UNBOUND specified as the cpu number. Unbound allows for scheduling on any cpu, not necessarily the same one that scheduled the work. Because preemption is disabled for as long as we have the gc pointer, I think it's safe to use current_cpu() (aka smp_processor_id) to queue the delayed work item on the correct cpu. Fixes: 7cf2b0f9611b ("xfs: bound maximum wait time for inodegc work") Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_icache.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs/xfs/xfs_icache.c') diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 351849fc18ff..58712113d5d6 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -2069,7 +2069,8 @@ xfs_inodegc_queue( queue_delay = 0; trace_xfs_inodegc_queue(mp, __return_address); - mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay); + mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, + queue_delay); put_cpu_ptr(gc); if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) { @@ -2113,7 +2114,8 @@ xfs_inodegc_cpu_dead( if (xfs_is_inodegc_enabled(mp)) { trace_xfs_inodegc_queue(mp, __return_address); - mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0); + mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, + 0); } put_cpu_ptr(gc); } -- cgit From b37c4c8339cd394ea6b8b415026603320a185651 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 2 May 2023 09:16:12 +1000 Subject: xfs: check that per-cpu inodegc workers actually run on that cpu Now that we've allegedly worked out the problem of the per-cpu inodegc workers being scheduled on the wrong cpu, let's put in a debugging knob to let us know if a worker ever gets mis-scheduled again. Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_icache.c | 2 ++ fs/xfs/xfs_mount.h | 3 +++ fs/xfs/xfs_super.c | 3 +++ 3 files changed, 8 insertions(+) (limited to 'fs/xfs/xfs_icache.c') diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 58712113d5d6..4b63c065ef19 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1856,6 +1856,8 @@ xfs_inodegc_worker( struct xfs_inode *ip, *n; unsigned int nofs_flag; + ASSERT(gc->cpu == smp_processor_id()); + WRITE_ONCE(gc->items, 0); if (!node) diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index f3269c0626f0..aaaf5ec13492 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -66,6 +66,9 @@ struct xfs_inodegc { /* approximate count of inodes in the list */ unsigned int items; unsigned int shrinker_hits; +#if defined(DEBUG) || defined(XFS_WARN) + unsigned int cpu; +#endif }; /* diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 4d2e87462ac4..7e706255f165 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1095,6 +1095,9 @@ xfs_inodegc_init_percpu( for_each_possible_cpu(cpu) { gc = per_cpu_ptr(mp->m_inodegc, cpu); +#if defined(DEBUG) || defined(XFS_WARN) + gc->cpu = cpu; +#endif init_llist_head(&gc->list); gc->items = 0; INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); -- cgit From 2254a7396a0ca6309854948ee1c0a33fa4268cec Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 2 May 2023 09:16:14 +1000 Subject: xfs: fix xfs_inodegc_stop racing with mod_delayed_work syzbot reported this warning from the faux inodegc shrinker that tries to kick off inodegc work: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 102 at kernel/workqueue.c:1445 __queue_work+0xd44/0x1120 kernel/workqueue.c:1444 RIP: 0010:__queue_work+0xd44/0x1120 kernel/workqueue.c:1444 Call Trace: __queue_delayed_work+0x1c8/0x270 kernel/workqueue.c:1672 mod_delayed_work_on+0xe1/0x220 kernel/workqueue.c:1746 xfs_inodegc_shrinker_scan fs/xfs/xfs_icache.c:2212 [inline] xfs_inodegc_shrinker_scan+0x250/0x4f0 fs/xfs/xfs_icache.c:2191 do_shrink_slab+0x428/0xaa0 mm/vmscan.c:853 shrink_slab+0x175/0x660 mm/vmscan.c:1013 shrink_one+0x502/0x810 mm/vmscan.c:5343 shrink_many mm/vmscan.c:5394 [inline] lru_gen_shrink_node mm/vmscan.c:5511 [inline] shrink_node+0x2064/0x35f0 mm/vmscan.c:6459 kswapd_shrink_node mm/vmscan.c:7262 [inline] balance_pgdat+0xa02/0x1ac0 mm/vmscan.c:7452 kswapd+0x677/0xd60 mm/vmscan.c:7712 kthread+0x2e8/0x3a0 kernel/kthread.c:376 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308 This warning corresponds to this code in __queue_work: /* * For a draining wq, only works from the same workqueue are * allowed. The __WQ_DESTROYING helps to spot the issue that * queues a new work item to a wq after destroy_workqueue(wq). */ if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && WARN_ON_ONCE(!is_chained_work(wq)))) return; For this to trip, we must have a thread draining the inodedgc workqueue and a second thread trying to queue inodegc work to that workqueue. This can happen if freezing or a ro remount race with reclaim poking our faux inodegc shrinker and another thread dropping an unlinked O_RDONLY file: Thread 0 Thread 1 Thread 2 xfs_inodegc_stop xfs_inodegc_shrinker_scan xfs_is_inodegc_enabled xfs_clear_inodegc_enabled xfs_inodegc_queue_all xfs_inodegc_queue xfs_is_inodegc_enabled drain_workqueue llist_empty mod_delayed_work_on(..., 0) __queue_work In other words, everything between the access to inodegc_enabled state and the decision to poke the inodegc workqueue requires some kind of coordination to avoid the WQ_DRAINING state. We could perhaps introduce a lock here, but we could also try to eliminate WQ_DRAINING from the picture. We could replace the drain_workqueue call with a loop that flushes the workqueue and queues workers as long as there is at least one inode present in the per-cpu inodegc llists. We've disabled inodegc at this point, so we know that the number of queued inodes will eventually hit zero as long as xfs_inodegc_start cannot reactivate the workers. There are four callers of xfs_inodegc_start. Three of them come from the VFS with s_umount held: filesystem thawing, failed filesystem freezing, and the rw remount transition. The fourth caller is mounting rw (no remount or freezing possible). There are three callers ofs xfs_inodegc_stop. One is unmounting (no remount or thaw possible). Two of them come from the VFS with s_umount held: fs freezing and ro remount transition. Hence, it is correct to replace the drain_workqueue call with a loop that drains the inodegc llists. Fixes: 6191cf3ad59f ("xfs: flush inodegc workqueue tasks before cancel") Signed-off-by: Darrick J. Wong Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_icache.c | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) (limited to 'fs/xfs/xfs_icache.c') diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 4b63c065ef19..0f60e301eb1f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -435,18 +435,23 @@ xfs_iget_check_free_state( } /* Make all pending inactivation work start immediately. */ -static void +static bool xfs_inodegc_queue_all( struct xfs_mount *mp) { struct xfs_inodegc *gc; int cpu; + bool ret = false; for_each_online_cpu(cpu) { gc = per_cpu_ptr(mp->m_inodegc, cpu); - if (!llist_empty(&gc->list)) + if (!llist_empty(&gc->list)) { mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); + ret = true; + } } + + return ret; } /* @@ -1911,24 +1916,41 @@ xfs_inodegc_flush( /* * Flush all the pending work and then disable the inode inactivation background - * workers and wait for them to stop. + * workers and wait for them to stop. Caller must hold sb->s_umount to + * coordinate changes in the inodegc_enabled state. */ void xfs_inodegc_stop( struct xfs_mount *mp) { + bool rerun; + if (!xfs_clear_inodegc_enabled(mp)) return; + /* + * Drain all pending inodegc work, including inodes that could be + * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan + * threads that sample the inodegc state just prior to us clearing it. + * The inodegc flag state prevents new threads from queuing more + * inodes, so we queue pending work items and flush the workqueue until + * all inodegc lists are empty. IOWs, we cannot use drain_workqueue + * here because it does not allow other unserialized mechanisms to + * reschedule inodegc work while this draining is in progress. + */ xfs_inodegc_queue_all(mp); - drain_workqueue(mp->m_inodegc_wq); + do { + flush_workqueue(mp->m_inodegc_wq); + rerun = xfs_inodegc_queue_all(mp); + } while (rerun); trace_xfs_inodegc_stop(mp, __return_address); } /* * Enable the inode inactivation background workers and schedule deferred inode - * inactivation work if there is any. + * inactivation work if there is any. Caller must hold sb->s_umount to + * coordinate changes in the inodegc_enabled state. */ void xfs_inodegc_start( -- cgit From d4d12c02bf5f768f1b423c7ae2909c5afdfe0d5f Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 5 Jun 2023 14:48:15 +1000 Subject: xfs: collect errors from inodegc for unlinked inode recovery Unlinked list recovery requires errors removing the inode the from the unlinked list get fed back to the main recovery loop. Now that we offload the unlinking to the inodegc work, we don't get errors being fed back when we trip over a corruption that prevents the inode from being removed from the unlinked list. This means we never clear the corrupt unlinked list bucket, resulting in runtime operations eventually tripping over it and shutting down. Fix this by collecting inodegc worker errors and feed them back to the flush caller. This is largely best effort - the only context that really cares is log recovery, and it only flushes a single inode at a time so we don't need complex synchronised handling. Essentially the inodegc workers will capture the first error that occurs and the next flush will gather them and clear them. The flush itself will only report the first gathered error. In the cases where callers can return errors, propagate the collected inodegc flush error up the error handling chain. In the case of inode unlinked list recovery, there are several superfluous calls to flush queued unlinked inodes - xlog_recover_iunlink_bucket() guarantees that it has flushed the inodegc and collected errors before it returns. Hence nothing in the calling path needs to run a flush, even when an error is returned. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Dave Chinner --- fs/xfs/xfs_icache.c | 46 +++++++++++++++++++++++++++++++++++++--------- fs/xfs/xfs_icache.h | 4 ++-- fs/xfs/xfs_inode.c | 20 ++++++-------------- fs/xfs/xfs_inode.h | 2 +- fs/xfs/xfs_log_recover.c | 19 +++++++++---------- fs/xfs/xfs_mount.h | 1 + fs/xfs/xfs_super.c | 1 + fs/xfs/xfs_trans.c | 4 +++- 8 files changed, 60 insertions(+), 37 deletions(-) (limited to 'fs/xfs/xfs_icache.c') diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 0f60e301eb1f..453890942d9f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -454,6 +454,27 @@ xfs_inodegc_queue_all( return ret; } +/* Wait for all queued work and collect errors */ +static int +xfs_inodegc_wait_all( + struct xfs_mount *mp) +{ + int cpu; + int error = 0; + + flush_workqueue(mp->m_inodegc_wq); + for_each_online_cpu(cpu) { + struct xfs_inodegc *gc; + + gc = per_cpu_ptr(mp->m_inodegc, cpu); + if (gc->error && !error) + error = gc->error; + gc->error = 0; + } + + return error; +} + /* * Check the validity of the inode we just found it the cache */ @@ -1491,15 +1512,14 @@ xfs_blockgc_free_space( if (error) return error; - xfs_inodegc_flush(mp); - return 0; + return xfs_inodegc_flush(mp); } /* * Reclaim all the free space that we can by scheduling the background blockgc * and inodegc workers immediately and waiting for them all to clear. */ -void +int xfs_blockgc_flush_all( struct xfs_mount *mp) { @@ -1520,7 +1540,7 @@ xfs_blockgc_flush_all( for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) flush_delayed_work(&pag->pag_blockgc_work); - xfs_inodegc_flush(mp); + return xfs_inodegc_flush(mp); } /* @@ -1842,13 +1862,17 @@ xfs_inodegc_set_reclaimable( * This is the last chance to make changes to an otherwise unreferenced file * before incore reclamation happens. */ -static void +static int xfs_inodegc_inactivate( struct xfs_inode *ip) { + int error; + trace_xfs_inode_inactivating(ip); - xfs_inactive(ip); + error = xfs_inactive(ip); xfs_inodegc_set_reclaimable(ip); + return error; + } void @@ -1880,8 +1904,12 @@ xfs_inodegc_worker( WRITE_ONCE(gc->shrinker_hits, 0); llist_for_each_entry_safe(ip, n, node, i_gclist) { + int error; + xfs_iflags_set(ip, XFS_INACTIVATING); - xfs_inodegc_inactivate(ip); + error = xfs_inodegc_inactivate(ip); + if (error && !gc->error) + gc->error = error; } memalloc_nofs_restore(nofs_flag); @@ -1905,13 +1933,13 @@ xfs_inodegc_push( * Force all currently queued inode inactivation work to run immediately and * wait for the work to finish. */ -void +int xfs_inodegc_flush( struct xfs_mount *mp) { xfs_inodegc_push(mp); trace_xfs_inodegc_flush(mp, __return_address); - flush_workqueue(mp->m_inodegc_wq); + return xfs_inodegc_wait_all(mp); } /* diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 87910191a9dd..1dcdcb23796e 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h @@ -62,7 +62,7 @@ int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp, unsigned int iwalk_flags); int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags); int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm); -void xfs_blockgc_flush_all(struct xfs_mount *mp); +int xfs_blockgc_flush_all(struct xfs_mount *mp); void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip); void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); @@ -80,7 +80,7 @@ void xfs_blockgc_start(struct xfs_mount *mp); void xfs_inodegc_worker(struct work_struct *work); void xfs_inodegc_push(struct xfs_mount *mp); -void xfs_inodegc_flush(struct xfs_mount *mp); +int xfs_inodegc_flush(struct xfs_mount *mp); void xfs_inodegc_stop(struct xfs_mount *mp); void xfs_inodegc_start(struct xfs_mount *mp); void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 5808abab786c..9e62cc500140 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1620,16 +1620,7 @@ xfs_inactive_ifree( */ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); - /* - * Just ignore errors at this point. There is nothing we can do except - * to try to keep going. Make sure it's not a silent error. - */ - error = xfs_trans_commit(tp); - if (error) - xfs_notice(mp, "%s: xfs_trans_commit returned error %d", - __func__, error); - - return 0; + return xfs_trans_commit(tp); } /* @@ -1693,12 +1684,12 @@ xfs_inode_needs_inactive( * now be truncated. Also, we clear all of the read-ahead state * kept for the inode here since the file is now closed. */ -void +int xfs_inactive( xfs_inode_t *ip) { struct xfs_mount *mp; - int error; + int error = 0; int truncate = 0; /* @@ -1736,7 +1727,7 @@ xfs_inactive( * reference to the inode at this point anyways. */ if (xfs_can_free_eofblocks(ip, true)) - xfs_free_eofblocks(ip); + error = xfs_free_eofblocks(ip); goto out; } @@ -1773,7 +1764,7 @@ xfs_inactive( /* * Free the inode. */ - xfs_inactive_ifree(ip); + error = xfs_inactive_ifree(ip); out: /* @@ -1781,6 +1772,7 @@ out: * the attached dquots. */ xfs_qm_dqdetach(ip); + return error; } /* diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 69d21e42c10a..7547caf2f2ab 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -470,7 +470,7 @@ enum layout_break_reason { (xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID)) int xfs_release(struct xfs_inode *ip); -void xfs_inactive(struct xfs_inode *ip); +int xfs_inactive(struct xfs_inode *ip); int xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name, struct xfs_inode **ipp, struct xfs_name *ci_name); int xfs_create(struct mnt_idmap *idmap, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 322eb2ee6c55..82c81d20459d 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2711,7 +2711,9 @@ xlog_recover_iunlink_bucket( * just to flush the inodegc queue and wait for it to * complete. */ - xfs_inodegc_flush(mp); + error = xfs_inodegc_flush(mp); + if (error) + break; } prev_agino = agino; @@ -2719,10 +2721,15 @@ xlog_recover_iunlink_bucket( } if (prev_ip) { + int error2; + ip->i_prev_unlinked = prev_agino; xfs_irele(prev_ip); + + error2 = xfs_inodegc_flush(mp); + if (error2 && !error) + return error2; } - xfs_inodegc_flush(mp); return error; } @@ -2789,7 +2796,6 @@ xlog_recover_iunlink_ag( * bucket and remaining inodes on it unreferenced and * unfreeable. */ - xfs_inodegc_flush(pag->pag_mount); xlog_recover_clear_agi_bucket(pag, bucket); } } @@ -2806,13 +2812,6 @@ xlog_recover_process_iunlinks( for_each_perag(log->l_mp, agno, pag) xlog_recover_iunlink_ag(pag); - - /* - * Flush the pending unlinked inodes to ensure that the inactivations - * are fully completed on disk and the incore inodes can be reclaimed - * before we signal that recovery is complete. - */ - xfs_inodegc_flush(log->l_mp); } STATIC void diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index aaaf5ec13492..6c09f89534d3 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -62,6 +62,7 @@ struct xfs_error_cfg { struct xfs_inodegc { struct llist_head list; struct delayed_work work; + int error; /* approximate count of inodes in the list */ unsigned int items; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 7e706255f165..4120bd1cba90 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1100,6 +1100,7 @@ xfs_inodegc_init_percpu( #endif init_llist_head(&gc->list); gc->items = 0; + gc->error = 0; INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); } return 0; diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index f81fbf081b01..8c0bfc9a33b1 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -290,7 +290,9 @@ retry: * Do not perform a synchronous scan because callers can hold * other locks. */ - xfs_blockgc_flush_all(mp); + error = xfs_blockgc_flush_all(mp); + if (error) + return error; want_retry = false; goto retry; } -- cgit