aboutsummaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_icache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-19 07:03:55 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-19 07:03:55 +0200
commit8751b21ad9dc33f31dff20297dcae2063cbbcfc9 (patch)
treedf0b13c8d1409687f3d9303373f5a1a11c014cf4 /fs/xfs/xfs_icache.c
parent4e0373f1f920811a67fef0c3383f1ad602b3845e (diff)
parent90fa22da6d6b41dc17435aff7b800f9ca3c00401 (diff)
Merge tag 'xfs-6.12-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull xfs updates from Chandan Babu: "New code: - Introduce new ioctls to exchange contents of two files. The first ioctl does the preparation work to exchange the contents of two files while the second ioctl performs the actual exchange if the target file has not been changed since a given sampling point. Fixes: - Fix bugs associated with calculating the maximum range of realtime extents to scan for free space. - Copy keys instead of records when resizing the incore BMBT root block. - Do not report FITRIMming more bytes than possibly exist in the filesystem. - Modify xfs_fs.h to prevent C++ compilation errors. - Do not over eagerly free post-EOF speculative preallocation. - Ensure st_blocks never goes to zero during COW writes Cleanups/refactors: - Use Xarray to hold per-AG data instead of a Radix tree. - Cleanups to: - realtime bitmap - inode allocator - quota - inode rooted btree code" * tag 'xfs-6.12-merge-1' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (61 commits) xfs: ensure st_blocks never goes to zero during COW writes xfs: use xas_for_each_marked in xfs_reclaim_inodes_count xfs: convert perag lookup to xarray xfs: simplify tagged perag iteration xfs: move the tagged perag lookup helpers to xfs_icache.c xfs: use kfree_rcu_mightsleep to free the perag structures xfs: use LIST_HEAD() to simplify code xfs: Remove duplicate xfs_trans_priv.h header xfs: remove unnecessary check xfs: Use xfs set and clear mp state helpers xfs: reclaim speculative preallocations for append only files xfs: simplify extent lookup in xfs_can_free_eofblocks xfs: check XFS_EOFBLOCKS_RELEASED earlier in xfs_release_eofblocks xfs: only free posteof blocks on first close xfs: don't free post-EOF blocks on read close xfs: skip all of xfs_file_release when shut down xfs: don't bother returning errors from xfs_file_release xfs: refactor f_op->release handling xfs: remove the i_mode check in xfs_release xfs: standardize the btree maxrecs function parameters ...
Diffstat (limited to 'fs/xfs/xfs_icache.c')
-rw-r--r--fs/xfs/xfs_icache.c89
1 files changed, 61 insertions, 28 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index cf629302d48e..20d9924f28c2 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -65,6 +65,18 @@ static int xfs_icwalk_ag(struct xfs_perag *pag,
XFS_ICWALK_FLAG_RECLAIM_SICK | \
XFS_ICWALK_FLAG_UNION)
+/* Marks for the perag xarray */
+#define XFS_PERAG_RECLAIM_MARK XA_MARK_0
+#define XFS_PERAG_BLOCKGC_MARK XA_MARK_1
+
+static inline xa_mark_t ici_tag_to_mark(unsigned int tag)
+{
+ if (tag == XFS_ICI_RECLAIM_TAG)
+ return XFS_PERAG_RECLAIM_MARK;
+ ASSERT(tag == XFS_ICI_BLOCKGC_TAG);
+ return XFS_PERAG_BLOCKGC_MARK;
+}
+
/*
* Allocate and initialise an xfs_inode.
*/
@@ -191,7 +203,7 @@ xfs_reclaim_work_queue(
{
rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ if (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) {
queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
@@ -241,9 +253,7 @@ xfs_perag_set_inode_tag(
return;
/* propagate the tag up into the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
- spin_unlock(&mp->m_perag_lock);
+ xa_set_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag));
/* start background work */
switch (tag) {
@@ -285,14 +295,39 @@ xfs_perag_clear_inode_tag(
return;
/* clear the tag from the perag radix tree */
- spin_lock(&mp->m_perag_lock);
- radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
- spin_unlock(&mp->m_perag_lock);
+ xa_clear_mark(&mp->m_perags, pag->pag_agno, ici_tag_to_mark(tag));
trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
}
/*
+ * Find the next AG after @pag, or the first AG if @pag is NULL.
+ */
+static struct xfs_perag *
+xfs_perag_grab_next_tag(
+ struct xfs_mount *mp,
+ struct xfs_perag *pag,
+ int tag)
+{
+ unsigned long index = 0;
+
+ if (pag) {
+ index = pag->pag_agno + 1;
+ xfs_perag_rele(pag);
+ }
+
+ rcu_read_lock();
+ pag = xa_find(&mp->m_perags, &index, ULONG_MAX, ici_tag_to_mark(tag));
+ if (pag) {
+ trace_xfs_perag_grab_next_tag(pag, _RET_IP_);
+ if (!atomic_inc_not_zero(&pag->pag_active_ref))
+ pag = NULL;
+ }
+ rcu_read_unlock();
+ return pag;
+}
+
+/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
* information about the on-disk values in the VFS inode and so we can't just
@@ -755,7 +790,7 @@ xfs_iget(
ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
/* reject inode numbers outside existing AGs */
- if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
+ if (!xfs_verify_ino(mp, ino))
return -EINVAL;
XFS_STATS_INC(mp, xs_ig_attempts);
@@ -977,7 +1012,7 @@ xfs_reclaim_inodes(
if (xfs_want_reclaim_sick(mp))
icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
- while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ while (xa_marked(&mp->m_perags, XFS_PERAG_RECLAIM_MARK)) {
xfs_ail_push_all_sync(mp->m_ail);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
}
@@ -1019,15 +1054,17 @@ long
xfs_reclaim_inodes_count(
struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t ag = 0;
+ XA_STATE (xas, &mp->m_perags, 0);
long reclaimable = 0;
+ struct xfs_perag *pag;
- while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
- ag = pag->pag_agno + 1;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, pag, ULONG_MAX, XFS_PERAG_RECLAIM_MARK) {
+ trace_xfs_reclaim_inodes_count(pag, _THIS_IP_);
reclaimable += pag->pag_ici_reclaimable;
- xfs_perag_put(pag);
}
+ rcu_read_unlock();
+
return reclaimable;
}
@@ -1159,7 +1196,7 @@ xfs_inode_free_eofblocks(
if (xfs_can_free_eofblocks(ip))
return xfs_free_eofblocks(ip);
- /* inode could be preallocated or append-only */
+ /* inode could be preallocated */
trace_xfs_inode_free_eofblocks_invalid(ip);
xfs_inode_clear_eofblocks_tag(ip);
return 0;
@@ -1369,14 +1406,13 @@ void
xfs_blockgc_start(
struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t agno;
+ struct xfs_perag *pag = NULL;
if (xfs_set_blockgc_enabled(mp))
return;
trace_xfs_blockgc_start(mp, __return_address);
- for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
xfs_blockgc_queue(pag);
}
@@ -1492,21 +1528,19 @@ int
xfs_blockgc_flush_all(
struct xfs_mount *mp)
{
- struct xfs_perag *pag;
- xfs_agnumber_t agno;
+ struct xfs_perag *pag = NULL;
trace_xfs_blockgc_flush_all(mp, __return_address);
/*
- * For each blockgc worker, move its queue time up to now. If it
- * wasn't queued, it will not be requeued. Then flush whatever's
- * left.
+ * For each blockgc worker, move its queue time up to now. If it wasn't
+ * queued, it will not be requeued. Then flush whatever is left.
*/
- for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
mod_delayed_work(pag->pag_mount->m_blockgc_wq,
&pag->pag_blockgc_work, 0);
- for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG)))
flush_delayed_work(&pag->pag_blockgc_work);
return xfs_inodegc_flush(mp);
@@ -1752,12 +1786,11 @@ xfs_icwalk(
enum xfs_icwalk_goal goal,
struct xfs_icwalk *icw)
{
- struct xfs_perag *pag;
+ struct xfs_perag *pag = NULL;
int error = 0;
int last_error = 0;
- xfs_agnumber_t agno;
- for_each_perag_tag(mp, agno, pag, goal) {
+ while ((pag = xfs_perag_grab_next_tag(mp, pag, goal))) {
error = xfs_icwalk_ag(pag, goal, icw);
if (error) {
last_error = error;