aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c11
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h4
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c28
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_btree.c204
-rw-r--r--fs/xfs/libxfs/xfs_btree.h141
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c22
-rw-r--r--fs/xfs/libxfs/xfs_refcount.c11
-rw-r--r--fs/xfs/libxfs/xfs_refcount.h4
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c21
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c15
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h4
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c61
-rw-r--r--fs/xfs/libxfs/xfs_types.h12
-rw-r--r--fs/xfs/scrub/agheader.c5
-rw-r--r--fs/xfs/scrub/alloc.c7
-rw-r--r--fs/xfs/scrub/bmap.c11
-rw-r--r--fs/xfs/scrub/btree.c24
-rw-r--r--fs/xfs/scrub/ialloc.c2
-rw-r--r--fs/xfs/scrub/inode.c1
-rw-r--r--fs/xfs/scrub/refcount.c124
-rw-r--r--fs/xfs/scrub/rmap.c6
-rw-r--r--fs/xfs/scrub/scrub.h2
23 files changed, 610 insertions, 129 deletions
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 23f0acfc2a64..fdfa08cbf4db 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -3745,13 +3745,16 @@ xfs_alloc_query_all(
return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
}
-/* Is there a record covering a given extent? */
+/*
+ * Scan part of the keyspace of the free space and tell us if the area has no
+ * records, is fully mapped by records, or is partially filled.
+ */
int
-xfs_alloc_has_record(
+xfs_alloc_has_records(
struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len,
- bool *exists)
+ enum xbtree_recpacking *outcome)
{
union xfs_btree_irec low;
union xfs_btree_irec high;
@@ -3761,7 +3764,7 @@ xfs_alloc_has_record(
memset(&high, 0xFF, sizeof(high));
high.a.ar_startblock = bno + len - 1;
- return xfs_btree_has_record(cur, &low, &high, exists);
+ return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
}
/*
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 56bd05900b35..5dbb25546d0b 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -213,8 +213,8 @@ int xfs_alloc_query_range(struct xfs_btree_cur *cur,
int xfs_alloc_query_all(struct xfs_btree_cur *cur, xfs_alloc_query_range_fn fn,
void *priv);
-int xfs_alloc_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, bool *exist);
+int xfs_alloc_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, enum xbtree_recpacking *outcome);
typedef int (*xfs_agfl_walk_fn)(struct xfs_mount *mp, xfs_agblock_t bno,
void *priv);
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index 8e8416c14cec..c65228efed4a 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -260,20 +260,27 @@ STATIC int64_t
xfs_bnobt_diff_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
- const union xfs_btree_key *k2)
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
{
+ ASSERT(!mask || mask->alloc.ar_startblock);
+
return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
- be32_to_cpu(k2->alloc.ar_startblock);
+ be32_to_cpu(k2->alloc.ar_startblock);
}
STATIC int64_t
xfs_cntbt_diff_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
- const union xfs_btree_key *k2)
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
{
int64_t diff;
+ ASSERT(!mask || (mask->alloc.ar_blockcount &&
+ mask->alloc.ar_startblock));
+
diff = be32_to_cpu(k1->alloc.ar_blockcount) -
be32_to_cpu(k2->alloc.ar_blockcount);
if (diff)
@@ -423,6 +430,19 @@ xfs_cntbt_recs_inorder(
be32_to_cpu(r2->alloc.ar_startblock));
}
+STATIC enum xbtree_key_contig
+xfs_allocbt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->alloc.ar_startblock);
+
+ return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock),
+ be32_to_cpu(key2->alloc.ar_startblock));
+}
+
static const struct xfs_btree_ops xfs_bnobt_ops = {
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
@@ -443,6 +463,7 @@ static const struct xfs_btree_ops xfs_bnobt_ops = {
.diff_two_keys = xfs_bnobt_diff_two_keys,
.keys_inorder = xfs_bnobt_keys_inorder,
.recs_inorder = xfs_bnobt_recs_inorder,
+ .keys_contiguous = xfs_allocbt_keys_contiguous,
};
static const struct xfs_btree_ops xfs_cntbt_ops = {
@@ -465,6 +486,7 @@ static const struct xfs_btree_ops xfs_cntbt_ops = {
.diff_two_keys = xfs_cntbt_diff_two_keys,
.keys_inorder = xfs_cntbt_keys_inorder,
.recs_inorder = xfs_cntbt_recs_inorder,
+ .keys_contiguous = NULL, /* not needed right now */
};
/* Allocate most of a new allocation btree cursor. */
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index b8ad95050c9b..1b40e5f8b1ec 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -382,11 +382,14 @@ STATIC int64_t
xfs_bmbt_diff_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
- const union xfs_btree_key *k2)
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
{
uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
+ ASSERT(!mask || mask->bmbt.br_startoff);
+
/*
* Note: This routine previously casted a and b to int64 and subtracted
* them to generate a result. This lead to problems if b was the
@@ -500,6 +503,19 @@ xfs_bmbt_recs_inorder(
xfs_bmbt_disk_get_startoff(&r2->bmbt);
}
+STATIC enum xbtree_key_contig
+xfs_bmbt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->bmbt.br_startoff);
+
+ return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff),
+ be64_to_cpu(key2->bmbt.br_startoff));
+}
+
static const struct xfs_btree_ops xfs_bmbt_ops = {
.rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t),
@@ -520,6 +536,7 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
.buf_ops = &xfs_bmbt_buf_ops,
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
+ .keys_contiguous = xfs_bmbt_keys_contiguous,
};
/*
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index c4649cc624e1..6a6503ab0cd7 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -2067,8 +2067,7 @@ xfs_btree_get_leaf_keys(
for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
rec = xfs_btree_rec_addr(cur, n, block);
cur->bc_ops->init_high_key_from_rec(&hkey, rec);
- if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
- > 0)
+ if (xfs_btree_keycmp_gt(cur, &hkey, &max_hkey))
max_hkey = hkey;
}
@@ -2096,7 +2095,7 @@ xfs_btree_get_node_keys(
max_hkey = xfs_btree_high_key_addr(cur, 1, block);
for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
hkey = xfs_btree_high_key_addr(cur, n, block);
- if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
+ if (xfs_btree_keycmp_gt(cur, hkey, max_hkey))
max_hkey = hkey;
}
@@ -2183,8 +2182,8 @@ __xfs_btree_updkeys(
nlkey = xfs_btree_key_addr(cur, ptr, block);
nhkey = xfs_btree_high_key_addr(cur, ptr, block);
if (!force_all &&
- !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
- cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
+ xfs_btree_keycmp_eq(cur, nlkey, lkey) &&
+ xfs_btree_keycmp_eq(cur, nhkey, hkey))
break;
xfs_btree_copy_keys(cur, nlkey, lkey, 1);
xfs_btree_log_keys(cur, bp, ptr, ptr);
@@ -4716,7 +4715,6 @@ xfs_btree_simple_query_range(
{
union xfs_btree_rec *recp;
union xfs_btree_key rec_key;
- int64_t diff;
int stat;
bool firstrec = true;
int error;
@@ -4746,20 +4744,17 @@ xfs_btree_simple_query_range(
if (error || !stat)
break;
- /* Skip if high_key(rec) < low_key. */
+ /* Skip if low_key > high_key(rec). */
if (firstrec) {
cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
firstrec = false;
- diff = cur->bc_ops->diff_two_keys(cur, low_key,
- &rec_key);
- if (diff > 0)
+ if (xfs_btree_keycmp_gt(cur, low_key, &rec_key))
goto advloop;
}
- /* Stop if high_key < low_key(rec). */
+ /* Stop if low_key(rec) > high_key. */
cur->bc_ops->init_key_from_rec(&rec_key, recp);
- diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
- if (diff > 0)
+ if (xfs_btree_keycmp_gt(cur, &rec_key, high_key))
break;
/* Callback */
@@ -4813,8 +4808,6 @@ xfs_btree_overlapped_query_range(
union xfs_btree_key *hkp;
union xfs_btree_rec *recp;
struct xfs_btree_block *block;
- int64_t ldiff;
- int64_t hdiff;
int level;
struct xfs_buf *bp;
int i;
@@ -4854,25 +4847,23 @@ pop_up:
block);
cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
- ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
- low_key);
-
cur->bc_ops->init_key_from_rec(&rec_key, recp);
- hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
- &rec_key);
/*
+ * If (query's high key < record's low key), then there
+ * are no more interesting records in this block. Pop
+ * up to the leaf level to find more record blocks.
+ *
* If (record's high key >= query's low key) and
* (query's high key >= record's low key), then
* this record overlaps the query range; callback.
*/
- if (ldiff >= 0 && hdiff >= 0) {
+ if (xfs_btree_keycmp_lt(cur, high_key, &rec_key))
+ goto pop_up;
+ if (xfs_btree_keycmp_ge(cur, &rec_hkey, low_key)) {
error = fn(cur, recp, priv);
if (error)
break;
- } else if (hdiff < 0) {
- /* Record is larger than high key; pop. */
- goto pop_up;
}
cur->bc_levels[level].ptr++;
continue;
@@ -4884,15 +4875,18 @@ pop_up:
block);
pp = xfs_btree_ptr_addr(cur, cur->bc_levels[level].ptr, block);
- ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
- hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
-
/*
+ * If (query's high key < pointer's low key), then there are no
+ * more interesting keys in this block. Pop up one leaf level
+ * to continue looking for records.
+ *
* If (pointer's high key >= query's low key) and
* (query's high key >= pointer's low key), then
* this record overlaps the query range; follow pointer.
*/
- if (ldiff >= 0 && hdiff >= 0) {
+ if (xfs_btree_keycmp_lt(cur, high_key, lkp))
+ goto pop_up;
+ if (xfs_btree_keycmp_ge(cur, hkp, low_key)) {
level--;
error = xfs_btree_lookup_get_block(cur, level, pp,
&block);
@@ -4907,9 +4901,6 @@ pop_up:
#endif
cur->bc_levels[level].ptr = 1;
continue;
- } else if (hdiff < 0) {
- /* The low key is larger than the upper range; pop. */
- goto pop_up;
}
cur->bc_levels[level].ptr++;
}
@@ -4937,6 +4928,19 @@ out:
return error;
}
+static inline void
+xfs_btree_key_from_irec(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key,
+ const union xfs_btree_irec *irec)
+{
+ union xfs_btree_rec rec;
+
+ cur->bc_rec = *irec;
+ cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(key, &rec);
+}
+
/*
* Query a btree for all records overlapping a given interval of keys. The
* supplied function will be called with each record found; return one of the
@@ -4951,21 +4955,15 @@ xfs_btree_query_range(
xfs_btree_query_range_fn fn,
void *priv)
{
- union xfs_btree_rec rec;
union xfs_btree_key low_key;
union xfs_btree_key high_key;
/* Find the keys of both ends of the interval. */
- cur->bc_rec = *high_rec;
- cur->bc_ops->init_rec_from_cur(cur, &rec);
- cur->bc_ops->init_key_from_rec(&high_key, &rec);
+ xfs_btree_key_from_irec(cur, &high_key, high_rec);
+ xfs_btree_key_from_irec(cur, &low_key, low_rec);
- cur->bc_rec = *low_rec;
- cur->bc_ops->init_rec_from_cur(cur, &rec);
- cur->bc_ops->init_key_from_rec(&low_key, &rec);
-
- /* Enforce low key < high key. */
- if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
+ /* Enforce low key <= high key. */
+ if (!xfs_btree_keycmp_le(cur, &low_key, &high_key))
return -EINVAL;
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
@@ -5027,34 +5025,132 @@ xfs_btree_diff_two_ptrs(
return (int64_t)be32_to_cpu(a->s) - be32_to_cpu(b->s);
}
-/* If there's an extent, we're done. */
+struct xfs_btree_has_records {
+ /* Keys for the start and end of the range we want to know about. */
+ union xfs_btree_key start_key;
+ union xfs_btree_key end_key;
+
+ /* Mask for key comparisons, if desired. */
+ const union xfs_btree_key *key_mask;
+
+ /* Highest record key we've seen so far. */
+ union xfs_btree_key high_key;
+
+ enum xbtree_recpacking outcome;
+};
+
STATIC int
-xfs_btree_has_record_helper(
+xfs_btree_has_records_helper(
struct xfs_btree_cur *cur,
const union xfs_btree_rec *rec,
void *priv)
{
- return -ECANCELED;
+ union xfs_btree_key rec_key;
+ union xfs_btree_key rec_high_key;
+ struct xfs_btree_has_records *info = priv;
+ enum xbtree_key_contig key_contig;
+
+ cur->bc_ops->init_key_from_rec(&rec_key, rec);
+
+ if (info->outcome == XBTREE_RECPACKING_EMPTY) {
+ info->outcome = XBTREE_RECPACKING_SPARSE;
+
+ /*
+ * If the first record we find does not overlap the start key,
+ * then there is a hole at the start of the search range.
+ * Classify this as sparse and stop immediately.
+ */
+ if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key,
+ info->key_mask))
+ return -ECANCELED;
+ } else {
+ /*
+ * If a subsequent record does not overlap with the any record
+ * we've seen so far, there is a hole in the middle of the
+ * search range. Classify this as sparse and stop.
+ * If the keys overlap and this btree does not allow overlap,
+ * signal corruption.
+ */
+ key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
+ &rec_key, info->key_mask);
+ if (key_contig == XBTREE_KEY_OVERLAP &&
+ !(cur->bc_flags & XFS_BTREE_OVERLAPPING))
+ return -EFSCORRUPTED;
+ if (key_contig == XBTREE_KEY_GAP)
+ return -ECANCELED;
+ }
+
+ /*
+ * If high_key(rec) is larger than any other high key we've seen,
+ * remember it for later.
+ */
+ cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec);
+ if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key,
+ info->key_mask))
+ info->high_key = rec_high_key; /* struct copy */
+
+ return 0;
}
-/* Is there a record covering a given range of keys? */
+/*
+ * Scan part of the keyspace of a btree and tell us if that keyspace does not
+ * map to any records; is fully mapped to records; or is partially mapped to
+ * records. This is the btree record equivalent to determining if a file is
+ * sparse.
+ *
+ * For most btree types, the record scan should use all available btree key
+ * fields to compare the keys encountered. These callers should pass NULL for
+ * @mask. However, some callers (e.g. scanning physical space in the rmapbt)
+ * want to ignore some part of the btree record keyspace when performing the
+ * comparison. These callers should pass in a union xfs_btree_key object with
+ * the fields that *should* be a part of the comparison set to any nonzero
+ * value, and the rest zeroed.
+ */
int
-xfs_btree_has_record(
+xfs_btree_has_records(
struct xfs_btree_cur *cur,
const union xfs_btree_irec *low,
const union xfs_btree_irec *high,
- bool *exists)
+ const union xfs_btree_key *mask,
+ enum xbtree_recpacking *outcome)
{
+ struct xfs_btree_has_records info = {
+ .outcome = XBTREE_RECPACKING_EMPTY,
+ .key_mask = mask,
+ };
int error;
- error = xfs_btree_query_range(cur, low, high,
- &xfs_btree_has_record_helper, NULL);
- if (error == -ECANCELED) {
- *exists = true;
- return 0;
+ /* Not all btrees support this operation. */
+ if (!cur->bc_ops->keys_contiguous) {
+ ASSERT(0);
+ return -EOPNOTSUPP;
}
- *exists = false;
- return error;
+
+ xfs_btree_key_from_irec(cur, &info.start_key, low);
+ xfs_btree_key_from_irec(cur, &info.end_key, high);
+
+ error = xfs_btree_query_range(cur, low, high,
+ xfs_btree_has_records_helper, &info);
+ if (error == -ECANCELED)
+ goto out;
+ if (error)
+ return error;
+
+ if (info.outcome == XBTREE_RECPACKING_EMPTY)
+ goto out;
+
+ /*
+ * If the largest high_key(rec) we saw during the walk is greater than
+ * the end of the search range, classify this as full. Otherwise,
+ * there is a hole at the end of the search range.
+ */
+ if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key,
+ mask))
+ info.outcome = XBTREE_RECPACKING_FULL;
+
+out:
+ *outcome = info.outcome;
+ return 0;
}
/* Are there more records in this btree? */
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 29c4b4ccb909..a2aa36b23e25 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -90,6 +90,27 @@ uint32_t xfs_btree_magic(int crc, xfs_btnum_t btnum);
#define XFS_BTREE_STATS_ADD(cur, stat, val) \
XFS_STATS_ADD_OFF((cur)->bc_mp, (cur)->bc_statoff + __XBTS_ ## stat, val)
+enum xbtree_key_contig {
+ XBTREE_KEY_GAP = 0,
+ XBTREE_KEY_CONTIGUOUS,
+ XBTREE_KEY_OVERLAP,
+};
+
+/*
+ * Decide if these two numeric btree key fields are contiguous, overlapping,
+ * or if there's a gap between them. @x should be the field from the high
+ * key and @y should be the field from the low key.
+ */
+static inline enum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
+{
+ x++;
+ if (x < y)
+ return XBTREE_KEY_GAP;
+ if (x == y)
+ return XBTREE_KEY_CONTIGUOUS;
+ return XBTREE_KEY_OVERLAP;
+}
+
struct xfs_btree_ops {
/* size of the key and record structures */
size_t key_len;
@@ -140,11 +161,14 @@ struct xfs_btree_ops {
/*
* Difference between key2 and key1 -- positive if key1 > key2,
- * negative if key1 < key2, and zero if equal.
+ * negative if key1 < key2, and zero if equal. If the @mask parameter
+ * is non NULL, each key field to be used in the comparison must
+ * contain a nonzero value.
*/
int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
const union xfs_btree_key *key1,
- const union xfs_btree_key *key2);
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask);
const struct xfs_buf_ops *buf_ops;
@@ -157,6 +181,22 @@ struct xfs_btree_ops {
int (*recs_inorder)(struct xfs_btree_cur *cur,
const union xfs_btree_rec *r1,
const union xfs_btree_rec *r2);
+
+ /*
+ * Are these two btree keys immediately adjacent?
+ *
+ * Given two btree keys @key1 and @key2, decide if it is impossible for
+ * there to be a third btree key K satisfying the relationship
+ * @key1 < K < @key2. To determine if two btree records are
+ * immediately adjacent, @key1 should be the high key of the first
+ * record and @key2 should be the low key of the second record.
+ * If the @mask parameter is non NULL, each key field to be used in the
+ * comparison must contain a nonzero value.
+ */
+ enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask);
};
/*
@@ -540,12 +580,105 @@ void xfs_btree_get_keys(struct xfs_btree_cur *cur,
struct xfs_btree_block *block, union xfs_btree_key *key);
union xfs_btree_key *xfs_btree_high_key_from_key(struct xfs_btree_cur *cur,
union xfs_btree_key *key);
-int xfs_btree_has_record(struct xfs_btree_cur *cur,
+typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2);
+
+int xfs_btree_has_records(struct xfs_btree_cur *cur,
const union xfs_btree_irec *low,
- const union xfs_btree_irec *high, bool *exists);
+ const union xfs_btree_irec *high,
+ const union xfs_btree_key *mask,
+ enum xbtree_recpacking *outcome);
+
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
struct xfs_ifork *xfs_btree_ifork_ptr(struct xfs_btree_cur *cur);
+/* Key comparison helpers */
+static inline bool
+xfs_btree_keycmp_lt(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2)
+{
+ return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
+}
+
+static inline bool
+xfs_btree_keycmp_gt(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2)
+{
+ return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
+}
+
+static inline bool
+xfs_btree_keycmp_eq(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2)
+{
+ return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
+}
+
+static inline bool
+xfs_btree_keycmp_le(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2)
+{
+ return !xfs_btree_keycmp_gt(cur, key1, key2);
+}
+
+static inline bool
+xfs_btree_keycmp_ge(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2)
+{
+ return !xfs_btree_keycmp_lt(cur, key1, key2);
+}
+
+static inline bool
+xfs_btree_keycmp_ne(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2)
+{
+ return !xfs_btree_keycmp_eq(cur, key1, key2);
+}
+
+/* Masked key comparison helpers */
+static inline bool
+xfs_btree_masked_keycmp_lt(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
+}
+
+static inline bool
+xfs_btree_masked_keycmp_gt(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
+}
+
+static inline bool
+xfs_btree_masked_keycmp_ge(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
+}
+
/* Does this cursor point to the last block in the given level? */
static inline bool
xfs_btree_islastblock(
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index f900c056b82c..5a945ae21b5d 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -269,10 +269,13 @@ STATIC int64_t
xfs_inobt_diff_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
- const union xfs_btree_key *k2)
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
{
+ ASSERT(!mask || mask->inobt.ir_startino);
+
return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
- be32_to_cpu(k2->inobt.ir_startino);
+ be32_to_cpu(k2->inobt.ir_startino);
}
static xfs_failaddr_t
@@ -383,6 +386,19 @@ xfs_inobt_recs_inorder(
be32_to_cpu(r2->inobt.ir_startino);
}
+STATIC enum xbtree_key_contig
+xfs_inobt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->inobt.ir_startino);
+
+ return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino),
+ be32_to_cpu(key2->inobt.ir_startino));
+}
+
static const struct xfs_btree_ops xfs_inobt_ops = {
.rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t),
@@ -402,6 +418,7 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.diff_two_keys = xfs_inobt_diff_two_keys,
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
+ .keys_contiguous = xfs_inobt_keys_contiguous,
};
static const struct xfs_btree_ops xfs_finobt_ops = {
@@ -423,6 +440,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.diff_two_keys = xfs_inobt_diff_two_keys,
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
+ .keys_contiguous = xfs_inobt_keys_contiguous,
};
/*
diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c
index 335f84bef81c..c1c65774dcc2 100644
--- a/fs/xfs/libxfs/xfs_refcount.c
+++ b/fs/xfs/libxfs/xfs_refcount.c
@@ -1998,14 +1998,17 @@ out_free:
return error;
}
-/* Is there a record covering a given extent? */
+/*
+ * Scan part of the keyspace of the refcount records and tell us if the area
+ * has no records, is fully mapped by records, or is partially filled.
+ */
int
-xfs_refcount_has_record(
+xfs_refcount_has_records(
struct xfs_btree_cur *cur,
enum xfs_refc_domain domain,
xfs_agblock_t bno,
xfs_extlen_t len,
- bool *exists)
+ enum xbtree_recpacking *outcome)
{
union xfs_btree_irec low;
union xfs_btree_irec high;
@@ -2016,7 +2019,7 @@ xfs_refcount_has_record(
high.rc.rc_startblock = bno + len - 1;
low.rc.rc_domain = high.rc.rc_domain = domain;
- return xfs_btree_has_record(cur, &low, &high, exists);
+ return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
}
int __init
diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h
index fc0b58d4c379..783cd89ca195 100644
--- a/fs/xfs/libxfs/xfs_refcount.h
+++ b/fs/xfs/libxfs/xfs_refcount.h
@@ -111,9 +111,9 @@ extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp,
*/
#define XFS_REFCOUNT_ITEM_OVERHEAD 32
-extern int xfs_refcount_has_record(struct xfs_btree_cur *cur,
+extern int xfs_refcount_has_records(struct xfs_btree_cur *cur,
enum xfs_refc_domain domain, xfs_agblock_t bno,
- xfs_extlen_t len, bool *exists);
+ xfs_extlen_t len, enum xbtree_recpacking *outcome);
union xfs_btree_rec;
extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec,
struct xfs_refcount_irec *irec);
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 03d2b01487a1..d4afc5f4e6a5 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -202,10 +202,13 @@ STATIC int64_t
xfs_refcountbt_diff_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
- const union xfs_btree_key *k2)
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
{
+ ASSERT(!mask || mask->refc.rc_startblock);
+
return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
- be32_to_cpu(k2->refc.rc_startblock);
+ be32_to_cpu(k2->refc.rc_startblock);
}
STATIC xfs_failaddr_t
@@ -300,6 +303,19 @@ xfs_refcountbt_recs_inorder(
be32_to_cpu(r2->refc.rc_startblock);
}
+STATIC enum xbtree_key_contig
+xfs_refcountbt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->refc.rc_startblock);
+
+ return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock),
+ be32_to_cpu(key2->refc.rc_startblock));
+}
+
static const struct xfs_btree_ops xfs_refcountbt_ops = {
.rec_len = sizeof(struct xfs_refcount_rec),
.key_len = sizeof(struct xfs_refcount_key),
@@ -319,6 +335,7 @@ static const struct xfs_btree_ops xfs_refcountbt_ops = {
.diff_two_keys = xfs_refcountbt_diff_two_keys,
.keys_inorder = xfs_refcountbt_keys_inorder,
.recs_inorder = xfs_refcountbt_recs_inorder,
+ .keys_contiguous = xfs_refcountbt_keys_contiguous,
};
/*
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index da008d317f83..308b81f321eb 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -2709,14 +2709,21 @@ xfs_rmap_compare(
return 0;
}
-/* Is there a record covering a given extent? */
+/*
+ * Scan the physical storage part of the keyspace of the reverse mapping index
+ * and tell us if the area has no records, is fully mapped by records, or is
+ * partially filled.
+ */
int
-xfs_rmap_has_record(
+xfs_rmap_has_records(
struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len,
- bool *exists)
+ enum xbtree_recpacking *outcome)
{
+ union xfs_btree_key mask = {
+ .rmap.rm_startblock = cpu_to_be32(-1U),
+ };
union xfs_btree_irec low;
union xfs_btree_irec high;
@@ -2725,7 +2732,7 @@ xfs_rmap_has_record(
memset(&high, 0xFF, sizeof(high));
high.r.rm_startblock = bno + len - 1;
- return xfs_btree_has_record(cur, &low, &high, exists);
+ return xfs_btree_has_records(cur, &low, &high, &mask, outcome);
}
/*
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 7fb298bcc15f..4cbe50cf522e 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -198,8 +198,8 @@ xfs_failaddr_t xfs_rmap_btrec_to_irec(const union xfs_btree_rec *rec,
xfs_failaddr_t xfs_rmap_check_irec(struct xfs_btree_cur *cur,
const struct xfs_rmap_irec *irec);
-int xfs_rmap_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, bool *exists);
+int xfs_rmap_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, enum xbtree_recpacking *outcome);
int xfs_rmap_record_exists(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, const struct xfs_owner_info *oinfo,
bool *has_rmap);
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
index 84e2b692f034..6c81b20e97d2 100644
--- a/fs/xfs/libxfs/xfs_rmap_btree.c
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -273,31 +273,43 @@ STATIC int64_t
xfs_rmapbt_diff_two_keys(
struct xfs_btree_cur *cur,
const union xfs_btree_key *k1,
- const union xfs_btree_key *k2)
+ const union xfs_btree_key *k2,
+ const union xfs_btree_key *mask)
{
const struct xfs_rmap_key *kp1 = &k1->rmap;
const struct xfs_rmap_key *kp2 = &k2->rmap;
int64_t d;
__u64 x, y;
+ /* Doesn't make sense to mask off the physical space part */
+ ASSERT(!mask || mask->rmap.rm_startblock);
+
d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
- be32_to_cpu(kp2->rm_startblock);
+ be32_to_cpu(kp2->rm_startblock);
if (d)
return d;
- x = be64_to_cpu(kp1->rm_owner);
- y = be64_to_cpu(kp2->rm_owner);
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
+ if (!mask || mask->rmap.rm_owner) {
+ x = be64_to_cpu(kp1->rm_owner);
+ y = be64_to_cpu(kp2->rm_owner);
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ }
+
+ if (!mask || mask->rmap.rm_offset) {
+ /* Doesn't make sense to allow offset but not owner */
+ ASSERT(!mask || mask->rmap.rm_owner);
+
+ x = offset_keymask(be64_to_cpu(kp1->rm_offset));
+ y = offset_keymask(be64_to_cpu(kp2->rm_offset));
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ }
- x = offset_keymask(be64_to_cpu(kp1->rm_offset));
- y = offset_keymask(be64_to_cpu(kp2->rm_offset));
- if (x > y)
- return 1;
- else if (y > x)
- return -1;
return 0;
}
@@ -444,6 +456,26 @@ xfs_rmapbt_recs_inorder(
return 0;
}
+STATIC enum xbtree_key_contig
+xfs_rmapbt_keys_contiguous(
+ struct xfs_btree_cur *cur,
+ const union xfs_btree_key *key1,
+ const union xfs_btree_key *key2,
+ const union xfs_btree_key *mask)
+{
+ ASSERT(!mask || mask->rmap.rm_startblock);
+
+ /*
+ * We only support checking contiguity of the physical space component.
+ * If any callers ever need more specificity than that, they'll have to
+ * implement it here.
+ */
+ ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
+
+ return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
+ be32_to_cpu(key2->rmap.rm_startblock));
+}
+
static const struct xfs_btree_ops xfs_rmapbt_ops = {
.rec_len = sizeof(struct xfs_rmap_rec),
.key_len = 2 * sizeof(struct xfs_rmap_key),
@@ -463,6 +495,7 @@ static const struct xfs_btree_ops xfs_rmapbt_ops = {
.diff_two_keys = xfs_rmapbt_diff_two_keys,
.keys_inorder = xfs_rmapbt_keys_inorder,
.recs_inorder = xfs_rmapbt_recs_inorder,
+ .keys_contiguous = xfs_rmapbt_keys_contiguous,
};
static struct xfs_btree_cur *
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index 5ebdda7e1078..851220021484 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -204,6 +204,18 @@ enum xfs_ag_resv_type {
XFS_AG_RESV_RMAPBT,
};
+/* Results of scanning a btree keyspace to check occupancy. */
+enum xbtree_recpacking {
+ /* None of the keyspace maps to records. */
+ XBTREE_RECPACKING_EMPTY = 0,
+
+ /* Some, but not all, of the keyspace maps to records. */
+ XBTREE_RECPACKING_SPARSE,
+
+ /* The entire keyspace maps to records. */
+ XBTREE_RECPACKING_FULL,
+};
+
/*
* Type verifier functions
*/
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 87cb13a6e84a..1a84153afa91 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -53,6 +53,7 @@ xchk_superblock_xref(
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_not_cow_staging(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
}
@@ -517,6 +518,7 @@ xchk_agf_xref(
xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_agf_xref_btreeblks(sc);
xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_not_cow_staging(sc, agbno, 1);
xchk_agf_xref_refcblks(sc);
/* scrub teardown will take care of sc->sa for us */
@@ -644,6 +646,7 @@ xchk_agfl_block_xref(
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_not_cow_staging(sc, agbno, 1);
}
/* Scrub an AGFL block. */
@@ -700,6 +703,7 @@ xchk_agfl_xref(
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_not_cow_staging(sc, agbno, 1);
/*
* Scrub teardown will take care of sc->sa for us. Leave sc->sa
@@ -855,6 +859,7 @@ xchk_agi_xref(
xchk_agi_xref_icounts(sc);
xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_not_cow_staging(sc, agbno, 1);
xchk_agi_xref_fiblocks(sc);
/* scrub teardown will take care of sc->sa for us */
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 53de04c6027c..12dd55ac2a4f 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -90,6 +90,7 @@ xchk_allocbt_xref(
xchk_xref_is_not_inode_chunk(sc, agbno, len);
xchk_xref_has_no_owner(sc, agbno, len);
xchk_xref_is_not_shared(sc, agbno, len);
+ xchk_xref_is_not_cow_staging(sc, agbno, len);
}
/* Scrub a bnobt/cntbt record. */
@@ -144,15 +145,15 @@ xchk_xref_is_used_space(
xfs_agblock_t agbno,
xfs_extlen_t len)
{
- bool is_freesp;
+ enum xbtree_recpacking outcome;
int error;
if (!sc->sa.bno_cur || xchk_skip_xref(sc->sm))
return;
- error = xfs_alloc_has_record(sc->sa.bno_cur, agbno, len, &is_freesp);
+ error = xfs_alloc_has_records(sc->sa.bno_cur, agbno, len, &outcome);
if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
return;
- if (is_freesp)
+ if (outcome != XBTREE_RECPACKING_EMPTY)
xchk_btree_xref_set_corrupt(sc, sc->sa.bno_cur, 0);
}
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 6188eba672e5..be2c4da2808b 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -328,12 +328,17 @@ xchk_bmap_iextent_xref(
xchk_bmap_xref_rmap(info, irec, agbno);
switch (info->whichfork) {
case XFS_DATA_FORK:
- if (xfs_is_reflink_inode(info->sc->ip))
- break;
- fallthrough;
+ if (!xfs_is_reflink_inode(info->sc->ip))
+ xchk_xref_is_not_shared(info->sc, agbno,
+ irec->br_blockcount);
+ xchk_xref_is_not_cow_staging(info->sc, agbno,
+ irec->br_blockcount);
+ break;
case XFS_ATTR_FORK:
xchk_xref_is_not_shared(info->sc, agbno,
irec->br_blockcount);
+ xchk_xref_is_not_cow_staging(info->sc, agbno,
+ irec->br_blockcount);
break;
case XFS_COW_FORK:
xchk_xref_is_cow_staging(info->sc, agbno,
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 4ec3b1cab018..1165dc05a179 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -161,20 +161,20 @@ xchk_btree_rec(
if (cur->bc_nlevels == 1)
return;
- /* Is this at least as large as the parent low key? */
+ /* Is low_key(rec) at least as large as the parent low key? */
cur->bc_ops->init_key_from_rec(&key, rec);
keyblock = xfs_btree_get_block(cur, 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_levels[1].ptr, keyblock);
- if (cur->bc_ops->diff_two_keys(cur, &key, keyp) < 0)
+ if (xfs_btree_keycmp_lt(cur, &key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
- /* Is this no larger than the parent high key? */
+ /* Is high_key(rec) no larger than the parent high key? */
cur->bc_ops->init_high_key_from_rec(&hkey, rec);
keyp = xfs_btree_high_key_addr(cur, cur->bc_levels[1].ptr, keyblock);
- if (cur->bc_ops->diff_two_keys(cur, keyp, &hkey) < 0)
+ if (xfs_btree_keycmp_lt(cur, keyp, &hkey))
xchk_btree_set_corrupt(bs->sc, cur, 1);
}
@@ -209,20 +209,20 @@ xchk_btree_key(
if (level + 1 >= cur->bc_nlevels)
return;
- /* Is this at least as large as the parent low key? */
+ /* Is this block's low key at least as large as the parent low key? */
keyblock = xfs_btree_get_block(cur, level + 1, &bp);
keyp = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr, keyblock);
- if (cur->bc_ops->diff_two_keys(cur, key, keyp) < 0)
+ if (xfs_btree_keycmp_lt(cur, key, keyp))
xchk_btree_set_corrupt(bs->sc, cur, level);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return;
- /* Is this no larger than the parent high key? */
+ /* Is this block's high key no larger than the parent high key? */
key = xfs_btree_high_key_addr(cur, cur->bc_levels[level].ptr, block);
keyp = xfs_btree_high_key_addr(cur, cur->bc_levels[level + 1].ptr,
keyblock);
- if (cur->bc_ops->diff_two_keys(cur, keyp, key) < 0)
+ if (xfs_btree_keycmp_lt(cur, keyp, key))
xchk_btree_set_corrupt(bs->sc, cur, level);
}
@@ -557,7 +557,7 @@ xchk_btree_block_check_keys(
parent_block = xfs_btree_get_block(cur, level + 1, &bp);
parent_low_key = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr,
parent_block);
- if (cur->bc_ops->diff_two_keys(cur, &block_key, parent_low_key)) {
+ if (xfs_btree_keycmp_ne(cur, &block_key, parent_low_key)) {
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
return;
}
@@ -569,7 +569,7 @@ xchk_btree_block_check_keys(
parent_high_key = xfs_btree_high_key_addr(cur,
cur->bc_levels[level + 1].ptr, parent_block);
block_high_key = xfs_btree_high_key_from_key(cur, &block_key);
- if (cur->bc_ops->diff_two_keys(cur, block_high_key, parent_high_key))
+ if (xfs_btree_keycmp_ne(cur, block_high_key, parent_high_key))
xchk_btree_set_corrupt(bs->sc, bs->cur, level);
}
@@ -661,7 +661,7 @@ xchk_btree_block_keys(
parent_keys = xfs_btree_key_addr(cur, cur->bc_levels[level + 1].ptr,
parent_block);
- if (cur->bc_ops->diff_two_keys(cur, &block_keys, parent_keys) != 0)
+ if (xfs_btree_keycmp_ne(cur, &block_keys, parent_keys))
xchk_btree_set_corrupt(bs->sc, cur, 1);
if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
@@ -672,7 +672,7 @@ xchk_btree_block_keys(
high_pk = xfs_btree_high_key_addr(cur, cur->bc_levels[level + 1].ptr,
parent_block);
- if (cur->bc_ops->diff_two_keys(cur, high_bk, high_pk) != 0)
+ if (xfs_btree_keycmp_ne(cur, high_bk, high_pk))
xchk_btree_set_corrupt(bs->sc, cur, 1);
}
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index ca5a7e0f5451..6d08613db32f 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -115,7 +115,7 @@ xchk_iallocbt_chunk(
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
-
+ xchk_xref_is_not_cow_staging(bs->sc, bno, len);
return true;
}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index bbf9432c02c2..50ebd72f6d95 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -558,6 +558,7 @@ xchk_inode_xref(
xchk_inode_xref_finobt(sc, ino);
xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_INODES);
xchk_xref_is_not_shared(sc, agbno, 1);
+ xchk_xref_is_not_cow_staging(sc, agbno, 1);
xchk_inode_xref_bmap(sc, dip);
out_free:
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index 4d77049dfce2..db9e46a4f8d4 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -332,6 +332,64 @@ xchk_refcountbt_xref(
xchk_refcountbt_xref_rmap(sc, irec);
}
+struct xchk_refcbt_records {
+ /* The next AG block where we aren't expecting shared extents. */
+ xfs_agblock_t next_unshared_agbno;
+
+ /* Number of CoW blocks we expect. */
+ xfs_agblock_t cow_blocks;
+
+ /* Was the last record a shared or CoW staging extent? */
+ enum xfs_refc_domain prev_domain;
+};
+
+STATIC int
+xchk_refcountbt_rmap_check_gap(
+ struct xfs_btree_cur *cur,
+ const struct xfs_rmap_irec *rec,
+ void *priv)
+{
+ xfs_agblock_t *next_bno = priv;
+
+ if (*next_bno != NULLAGBLOCK && rec->rm_startblock < *next_bno)
+ return -ECANCELED;
+
+ *next_bno = rec->rm_startblock + rec->rm_blockcount;
+ return 0;
+}
+
+/*
+ * Make sure that a gap in the reference count records does not correspond to
+ * overlapping records (i.e. shared extents) in the reverse mappings.
+ */
+static inline void
+xchk_refcountbt_xref_gaps(
+ struct xfs_scrub *sc,
+ struct xchk_refcbt_records *rrc,
+ xfs_agblock_t bno)
+{
+ struct xfs_rmap_irec low;
+ struct xfs_rmap_irec high;
+ xfs_agblock_t next_bno = NULLAGBLOCK;
+ int error;
+
+ if (bno <= rrc->next_unshared_agbno || !sc->sa.rmap_cur ||
+ xchk_skip_xref(sc->sm))
+ return;
+
+ memset(&low, 0, sizeof(low));
+ low.rm_startblock = rrc->next_unshared_agbno;
+ memset(&high, 0xFF, sizeof(high));
+ high.rm_startblock = bno - 1;
+
+ error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
+ xchk_refcountbt_rmap_check_gap, &next_bno);
+ if (error == -ECANCELED)
+ xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
+ else
+ xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur);
+}
+
/* Scrub a refcountbt record. */
STATIC int
xchk_refcountbt_rec(
@@ -339,7 +397,7 @@ xchk_refcountbt_rec(
const union xfs_btree_rec *rec)
{
struct xfs_refcount_irec irec;
- xfs_agblock_t *cow_blocks = bs->private;
+ struct xchk_refcbt_records *rrc = bs->private;
xfs_refcount_btrec_to_irec(rec, &irec);
if (xfs_refcount_check_irec(bs->cur, &irec) != NULL) {
@@ -348,10 +406,27 @@ xchk_refcountbt_rec(
}
if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
- (*cow_blocks) += irec.rc_blockcount;
+ rrc->cow_blocks += irec.rc_blockcount;
+
+ /* Shared records always come before CoW records. */
+ if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
+ rrc->prev_domain == XFS_REFC_DOMAIN_COW)
+ xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
+ rrc->prev_domain = irec.rc_domain;
xchk_refcountbt_xref(bs->sc, &irec);
+ /*
+ * If this is a record for a shared extent, check that all blocks
+ * between the previous record and this one have at most one reverse
+ * mapping.
+ */
+ if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
+ xchk_refcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
+ rrc->next_unshared_agbno = irec.rc_startblock +
+ irec.rc_blockcount;
+ }
+
return 0;
}
@@ -393,15 +468,25 @@ int
xchk_refcountbt(
struct xfs_scrub *sc)
{
- xfs_agblock_t cow_blocks = 0;
+ struct xchk_refcbt_records rrc = {
+ .cow_blocks = 0,
+ .next_unshared_agbno = 0,
+ .prev_domain = XFS_REFC_DOMAIN_SHARED,
+ };
int error;
error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
- &XFS_RMAP_OINFO_REFC, &cow_blocks);
+ &XFS_RMAP_OINFO_REFC, &rrc);
if (error)
return error;
- xchk_refcount_xref_rmap(sc, cow_blocks);
+ /*
+ * Check that all blocks between the last refcount > 1 record and the
+ * end of the AG have at most one reverse mapping.
+ */
+ xchk_refcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_agblocks);
+
+ xchk_refcount_xref_rmap(sc, rrc.cow_blocks);
return 0;
}
@@ -457,16 +542,37 @@ xchk_xref_is_not_shared(
xfs_agblock_t agbno,
xfs_extlen_t len)
{
- bool shared;
+ enum xbtree_recpacking outcome;
+ int error;
+
+ if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
+ return;
+
+ error = xfs_refcount_has_records(sc->sa.refc_cur,
+ XFS_REFC_DOMAIN_SHARED, agbno, len, &outcome);
+ if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
+ return;
+ if (outcome != XBTREE_RECPACKING_EMPTY)
+ xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
+}
+
+/* xref check that the extent is not being used for CoW staging. */
+void
+xchk_xref_is_not_cow_staging(
+ struct xfs_scrub *sc,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len)
+{
+ enum xbtree_recpacking outcome;
int error;
if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
return;
- error = xfs_refcount_has_record(sc->sa.refc_cur, XFS_REFC_DOMAIN_SHARED,
- agbno, len, &shared);
+ error = xfs_refcount_has_records(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW,
+ agbno, len, &outcome);
if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
return;
- if (shared)
+ if (outcome != XBTREE_RECPACKING_EMPTY)
xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
}
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index 8e78e1bc9eef..2f9e4f77db6b 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -219,15 +219,15 @@ xchk_xref_has_no_owner(
xfs_agblock_t bno,
xfs_extlen_t len)
{
- bool has_rmap;
+ enum xbtree_recpacking outcome;
int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
- error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
+ error = xfs_rmap_has_records(sc->sa.rmap_cur, bno, len, &outcome);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
- if (has_rmap)
+ if (outcome != XBTREE_RECPACKING_EMPTY)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index d85c3b883b4c..b6f452eb9645 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -172,6 +172,8 @@ void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
xfs_extlen_t len);
void xchk_xref_is_not_shared(struct xfs_scrub *sc, xfs_agblock_t bno,
xfs_extlen_t len);
+void xchk_xref_is_not_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
+ xfs_extlen_t len);
#ifdef CONFIG_XFS_RT
void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno,
xfs_extlen_t len);