diff options
Diffstat (limited to 'fs/xfs/xfs_icache.c')
| -rw-r--r-- | fs/xfs/xfs_icache.c | 86 | 
1 files changed, 70 insertions, 16 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 351849fc18ff..453890942d9f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -435,18 +435,44 @@ xfs_iget_check_free_state(  }  /* Make all pending inactivation work start immediately. */ -static void +static bool  xfs_inodegc_queue_all(  	struct xfs_mount	*mp)  {  	struct xfs_inodegc	*gc;  	int			cpu; +	bool			ret = false;  	for_each_online_cpu(cpu) {  		gc = per_cpu_ptr(mp->m_inodegc, cpu); -		if (!llist_empty(&gc->list)) +		if (!llist_empty(&gc->list)) {  			mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); +			ret = true; +		} +	} + +	return ret; +} + +/* Wait for all queued work and collect errors */ +static int +xfs_inodegc_wait_all( +	struct xfs_mount	*mp) +{ +	int			cpu; +	int			error = 0; + +	flush_workqueue(mp->m_inodegc_wq); +	for_each_online_cpu(cpu) { +		struct xfs_inodegc	*gc; + +		gc = per_cpu_ptr(mp->m_inodegc, cpu); +		if (gc->error && !error) +			error = gc->error; +		gc->error = 0;  	} + +	return error;  }  /* @@ -1486,15 +1512,14 @@ xfs_blockgc_free_space(  	if (error)  		return error; -	xfs_inodegc_flush(mp); -	return 0; +	return xfs_inodegc_flush(mp);  }  /*   * Reclaim all the free space that we can by scheduling the background blockgc   * and inodegc workers immediately and waiting for them all to clear.   */ -void +int  xfs_blockgc_flush_all(  	struct xfs_mount	*mp)  { @@ -1515,7 +1540,7 @@ xfs_blockgc_flush_all(  	for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)  		flush_delayed_work(&pag->pag_blockgc_work); -	xfs_inodegc_flush(mp); +	return xfs_inodegc_flush(mp);  }  /* @@ -1837,13 +1862,17 @@ xfs_inodegc_set_reclaimable(   * This is the last chance to make changes to an otherwise unreferenced file   * before incore reclamation happens.   */ -static void +static int  xfs_inodegc_inactivate(  	struct xfs_inode	*ip)  { +	int			error; +  	trace_xfs_inode_inactivating(ip); -	xfs_inactive(ip); +	error = xfs_inactive(ip);  	xfs_inodegc_set_reclaimable(ip); +	return error; +  }  void @@ -1856,6 +1885,8 @@ xfs_inodegc_worker(  	struct xfs_inode	*ip, *n;  	unsigned int		nofs_flag; +	ASSERT(gc->cpu == smp_processor_id()); +  	WRITE_ONCE(gc->items, 0);  	if (!node) @@ -1873,8 +1904,12 @@ xfs_inodegc_worker(  	WRITE_ONCE(gc->shrinker_hits, 0);  	llist_for_each_entry_safe(ip, n, node, i_gclist) { +		int	error; +  		xfs_iflags_set(ip, XFS_INACTIVATING); -		xfs_inodegc_inactivate(ip); +		error = xfs_inodegc_inactivate(ip); +		if (error && !gc->error) +			gc->error = error;  	}  	memalloc_nofs_restore(nofs_flag); @@ -1898,35 +1933,52 @@ xfs_inodegc_push(   * Force all currently queued inode inactivation work to run immediately and   * wait for the work to finish.   */ -void +int  xfs_inodegc_flush(  	struct xfs_mount	*mp)  {  	xfs_inodegc_push(mp);  	trace_xfs_inodegc_flush(mp, __return_address); -	flush_workqueue(mp->m_inodegc_wq); +	return xfs_inodegc_wait_all(mp);  }  /*   * Flush all the pending work and then disable the inode inactivation background - * workers and wait for them to stop. + * workers and wait for them to stop.  Caller must hold sb->s_umount to + * coordinate changes in the inodegc_enabled state.   */  void  xfs_inodegc_stop(  	struct xfs_mount	*mp)  { +	bool			rerun; +  	if (!xfs_clear_inodegc_enabled(mp))  		return; +	/* +	 * Drain all pending inodegc work, including inodes that could be +	 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan +	 * threads that sample the inodegc state just prior to us clearing it. +	 * The inodegc flag state prevents new threads from queuing more +	 * inodes, so we queue pending work items and flush the workqueue until +	 * all inodegc lists are empty.  IOWs, we cannot use drain_workqueue +	 * here because it does not allow other unserialized mechanisms to +	 * reschedule inodegc work while this draining is in progress. +	 */  	xfs_inodegc_queue_all(mp); -	drain_workqueue(mp->m_inodegc_wq); +	do { +		flush_workqueue(mp->m_inodegc_wq); +		rerun = xfs_inodegc_queue_all(mp); +	} while (rerun);  	trace_xfs_inodegc_stop(mp, __return_address);  }  /*   * Enable the inode inactivation background workers and schedule deferred inode - * inactivation work if there is any. + * inactivation work if there is any.  Caller must hold sb->s_umount to + * coordinate changes in the inodegc_enabled state.   */  void  xfs_inodegc_start( @@ -2069,7 +2121,8 @@ xfs_inodegc_queue(  		queue_delay = 0;  	trace_xfs_inodegc_queue(mp, __return_address); -	mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay); +	mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, +			queue_delay);  	put_cpu_ptr(gc);  	if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) { @@ -2113,7 +2166,8 @@ xfs_inodegc_cpu_dead(  	if (xfs_is_inodegc_enabled(mp)) {  		trace_xfs_inodegc_queue(mp, __return_address); -		mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0); +		mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, +				0);  	}  	put_cpu_ptr(gc);  }  |