aboutsummaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_discard.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_discard.c')
-rw-r--r--fs/xfs/xfs_discard.c424
1 files changed, 356 insertions, 68 deletions
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 268bb734dc0a..6f0fc7fe1f2b 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -20,6 +20,7 @@
#include "xfs_log.h"
#include "xfs_ag.h"
#include "xfs_health.h"
+#include "xfs_rtbitmap.h"
/*
* Notes on an efficient, low latency fstrim algorithm
@@ -145,14 +146,18 @@ xfs_discard_extents(
return error;
}
+struct xfs_trim_cur {
+ xfs_agblock_t start;
+ xfs_extlen_t count;
+ xfs_agblock_t end;
+ xfs_extlen_t minlen;
+ bool by_bno;
+};
static int
xfs_trim_gather_extents(
struct xfs_perag *pag,
- xfs_daddr_t start,
- xfs_daddr_t end,
- xfs_daddr_t minlen,
- struct xfs_alloc_rec_incore *tcur,
+ struct xfs_trim_cur *tcur,
struct xfs_busy_extents *extents,
uint64_t *blocks_trimmed)
{
@@ -179,21 +184,26 @@ xfs_trim_gather_extents(
if (error)
goto out_trans_cancel;
- cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
-
- /*
- * Look up the extent length requested in the AGF and start with it.
- */
- if (tcur->ar_startblock == NULLAGBLOCK)
- error = xfs_alloc_lookup_ge(cur, 0, tcur->ar_blockcount, &i);
- else
- error = xfs_alloc_lookup_le(cur, tcur->ar_startblock,
- tcur->ar_blockcount, &i);
+ if (tcur->by_bno) {
+ /* sub-AG discard request always starts at tcur->start */
+ cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
+ error = xfs_alloc_lookup_le(cur, tcur->start, 0, &i);
+ if (!error && !i)
+ error = xfs_alloc_lookup_ge(cur, tcur->start, 0, &i);
+ } else if (tcur->start == 0) {
+ /* first time through a by-len starts with max length */
+ cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
+ error = xfs_alloc_lookup_ge(cur, 0, tcur->count, &i);
+ } else {
+ /* nth time through a by-len starts where we left off */
+ cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
+ error = xfs_alloc_lookup_le(cur, tcur->start, tcur->count, &i);
+ }
if (error)
goto out_del_cursor;
if (i == 0) {
/* nothing of that length left in the AG, we are done */
- tcur->ar_blockcount = 0;
+ tcur->count = 0;
goto out_del_cursor;
}
@@ -204,8 +214,6 @@ xfs_trim_gather_extents(
while (i) {
xfs_agblock_t fbno;
xfs_extlen_t flen;
- xfs_daddr_t dbno;
- xfs_extlen_t dlen;
error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
if (error)
@@ -221,38 +229,46 @@ xfs_trim_gather_extents(
* Update the cursor to point at this extent so we
* restart the next batch from this extent.
*/
- tcur->ar_startblock = fbno;
- tcur->ar_blockcount = flen;
- break;
- }
-
- /*
- * use daddr format for all range/len calculations as that is
- * the format the range/len variables are supplied in by
- * userspace.
- */
- dbno = XFS_AGB_TO_DADDR(mp, pag->pag_agno, fbno);
- dlen = XFS_FSB_TO_BB(mp, flen);
-
- /*
- * Too small? Give up.
- */
- if (dlen < minlen) {
- trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
- tcur->ar_blockcount = 0;
+ tcur->start = fbno;
+ tcur->count = flen;
break;
}
/*
* If the extent is entirely outside of the range we are
- * supposed to discard skip it. Do not bother to trim
- * down partially overlapping ranges for now.
+ * supposed to skip it. Do not bother to trim down partially
+ * overlapping ranges for now.
*/
- if (dbno + dlen < start || dbno > end) {
+ if (fbno + flen < tcur->start) {
+ trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
+ goto next_extent;
+ }
+ if (fbno > tcur->end) {
trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
+ if (tcur->by_bno) {
+ tcur->count = 0;
+ break;
+ }
goto next_extent;
}
+ /* Trim the extent returned to the range we want. */
+ if (fbno < tcur->start) {
+ flen -= tcur->start - fbno;
+ fbno = tcur->start;
+ }
+ if (fbno + flen > tcur->end + 1)
+ flen = tcur->end - fbno + 1;
+
+ /* Too small? Give up. */
+ if (flen < tcur->minlen) {
+ trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
+ if (tcur->by_bno)
+ goto next_extent;
+ tcur->count = 0;
+ break;
+ }
+
/*
* If any blocks in the range are still busy, skip the
* discard and try again the next time.
@@ -266,7 +282,10 @@ xfs_trim_gather_extents(
&extents->extent_list);
*blocks_trimmed += flen;
next_extent:
- error = xfs_btree_decrement(cur, 0, &i);
+ if (tcur->by_bno)
+ error = xfs_btree_increment(cur, 0, &i);
+ else
+ error = xfs_btree_decrement(cur, 0, &i);
if (error)
break;
@@ -276,7 +295,7 @@ next_extent:
* is no more extents to search.
*/
if (i == 0)
- tcur->ar_blockcount = 0;
+ tcur->count = 0;
}
/*
@@ -304,19 +323,24 @@ xfs_trim_should_stop(void)
* we found in the last batch as the key to start the next.
*/
static int
-xfs_trim_extents(
+xfs_trim_perag_extents(
struct xfs_perag *pag,
- xfs_daddr_t start,
- xfs_daddr_t end,
- xfs_daddr_t minlen,
+ xfs_agblock_t start,
+ xfs_agblock_t end,
+ xfs_extlen_t minlen,
uint64_t *blocks_trimmed)
{
- struct xfs_alloc_rec_incore tcur = {
- .ar_blockcount = pag->pagf_longest,
- .ar_startblock = NULLAGBLOCK,
+ struct xfs_trim_cur tcur = {
+ .start = start,
+ .count = pag->pagf_longest,
+ .end = end,
+ .minlen = minlen,
};
int error = 0;
+ if (start != 0 || end != pag->block_count)
+ tcur.by_bno = true;
+
do {
struct xfs_busy_extents *extents;
@@ -330,8 +354,8 @@ xfs_trim_extents(
extents->owner = extents;
INIT_LIST_HEAD(&extents->extent_list);
- error = xfs_trim_gather_extents(pag, start, end, minlen,
- &tcur, extents, blocks_trimmed);
+ error = xfs_trim_gather_extents(pag, &tcur, extents,
+ blocks_trimmed);
if (error) {
kfree(extents);
break;
@@ -354,12 +378,265 @@ xfs_trim_extents(
if (xfs_trim_should_stop())
break;
- } while (tcur.ar_blockcount != 0);
+ } while (tcur.count != 0);
return error;
}
+static int
+xfs_trim_datadev_extents(
+ struct xfs_mount *mp,
+ xfs_daddr_t start,
+ xfs_daddr_t end,
+ xfs_extlen_t minlen,
+ uint64_t *blocks_trimmed)
+{
+ xfs_agnumber_t start_agno, end_agno;
+ xfs_agblock_t start_agbno, end_agbno;
+ xfs_daddr_t ddev_end;
+ struct xfs_perag *pag;
+ int last_error = 0, error;
+
+ ddev_end = min_t(xfs_daddr_t, end,
+ XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1);
+
+ start_agno = xfs_daddr_to_agno(mp, start);
+ start_agbno = xfs_daddr_to_agbno(mp, start);
+ end_agno = xfs_daddr_to_agno(mp, ddev_end);
+ end_agbno = xfs_daddr_to_agbno(mp, ddev_end);
+
+ for_each_perag_range(mp, start_agno, end_agno, pag) {
+ xfs_agblock_t agend = pag->block_count;
+
+ if (start_agno == end_agno)
+ agend = end_agbno;
+ error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen,
+ blocks_trimmed);
+ if (error)
+ last_error = error;
+
+ if (xfs_trim_should_stop()) {
+ xfs_perag_rele(pag);
+ break;
+ }
+ start_agbno = 0;
+ }
+
+ return last_error;
+}
+
+#ifdef CONFIG_XFS_RT
+struct xfs_trim_rtdev {
+ /* list of rt extents to free */
+ struct list_head extent_list;
+
+ /* pointer to count of blocks trimmed */
+ uint64_t *blocks_trimmed;
+
+ /* minimum length that caller allows us to trim */
+ xfs_rtblock_t minlen_fsb;
+
+ /* restart point for the rtbitmap walk */
+ xfs_rtxnum_t restart_rtx;
+
+ /* stopping point for the current rtbitmap walk */
+ xfs_rtxnum_t stop_rtx;
+};
+
+struct xfs_rtx_busy {
+ struct list_head list;
+ xfs_rtblock_t bno;
+ xfs_rtblock_t length;
+};
+
+static void
+xfs_discard_free_rtdev_extents(
+ struct xfs_trim_rtdev *tr)
+{
+ struct xfs_rtx_busy *busyp, *n;
+
+ list_for_each_entry_safe(busyp, n, &tr->extent_list, list) {
+ list_del_init(&busyp->list);
+ kfree(busyp);
+ }
+}
+
+/*
+ * Walk the discard list and issue discards on all the busy extents in the
+ * list. We plug and chain the bios so that we only need a single completion
+ * call to clear all the busy extents once the discards are complete.
+ */
+static int
+xfs_discard_rtdev_extents(
+ struct xfs_mount *mp,
+ struct xfs_trim_rtdev *tr)
+{
+ struct block_device *bdev = mp->m_rtdev_targp->bt_bdev;
+ struct xfs_rtx_busy *busyp;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
+ xfs_rtblock_t start = NULLRTBLOCK, length = 0;
+ int error = 0;
+
+ blk_start_plug(&plug);
+ list_for_each_entry(busyp, &tr->extent_list, list) {
+ if (start == NULLRTBLOCK)
+ start = busyp->bno;
+ length += busyp->length;
+
+ trace_xfs_discard_rtextent(mp, busyp->bno, busyp->length);
+
+ error = __blkdev_issue_discard(bdev,
+ XFS_FSB_TO_BB(mp, busyp->bno),
+ XFS_FSB_TO_BB(mp, busyp->length),
+ GFP_NOFS, &bio);
+ if (error)
+ break;
+ }
+ xfs_discard_free_rtdev_extents(tr);
+
+ if (bio) {
+ error = submit_bio_wait(bio);
+ if (error == -EOPNOTSUPP)
+ error = 0;
+ if (error)
+ xfs_info(mp,
+ "discard failed for rtextent [0x%llx,%llu], error %d",
+ (unsigned long long)start,
+ (unsigned long long)length,
+ error);
+ bio_put(bio);
+ }
+ blk_finish_plug(&plug);
+
+ return error;
+}
+
+static int
+xfs_trim_gather_rtextent(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ const struct xfs_rtalloc_rec *rec,
+ void *priv)
+{
+ struct xfs_trim_rtdev *tr = priv;
+ struct xfs_rtx_busy *busyp;
+ xfs_rtblock_t rbno, rlen;
+
+ if (rec->ar_startext > tr->stop_rtx) {
+ /*
+ * If we've scanned a large number of rtbitmap blocks, update
+ * the cursor to point at this extent so we restart the next
+ * batch from this extent.
+ */
+ tr->restart_rtx = rec->ar_startext;
+ return -ECANCELED;
+ }
+
+ rbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
+ rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
+
+ /* Ignore too small. */
+ if (rlen < tr->minlen_fsb) {
+ trace_xfs_discard_rttoosmall(mp, rbno, rlen);
+ return 0;
+ }
+
+ busyp = kzalloc(sizeof(struct xfs_rtx_busy), GFP_KERNEL);
+ if (!busyp)
+ return -ENOMEM;
+
+ busyp->bno = rbno;
+ busyp->length = rlen;
+ INIT_LIST_HEAD(&busyp->list);
+ list_add_tail(&busyp->list, &tr->extent_list);
+ *tr->blocks_trimmed += rlen;
+
+ tr->restart_rtx = rec->ar_startext + rec->ar_extcount;
+ return 0;
+}
+
+static int
+xfs_trim_rtdev_extents(
+ struct xfs_mount *mp,
+ xfs_daddr_t start,
+ xfs_daddr_t end,
+ xfs_daddr_t minlen,
+ uint64_t *blocks_trimmed)
+{
+ struct xfs_rtalloc_rec low = { };
+ struct xfs_rtalloc_rec high = { };
+ struct xfs_trim_rtdev tr = {
+ .blocks_trimmed = blocks_trimmed,
+ .minlen_fsb = XFS_BB_TO_FSB(mp, minlen),
+ };
+ struct xfs_trans *tp;
+ xfs_daddr_t rtdev_daddr;
+ int error;
+
+ INIT_LIST_HEAD(&tr.extent_list);
+
+ /* Shift the start and end downwards to match the rt device. */
+ rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
+ if (start > rtdev_daddr)
+ start -= rtdev_daddr;
+ else
+ start = 0;
+
+ if (end <= rtdev_daddr)
+ return 0;
+ end -= rtdev_daddr;
+
+ error = xfs_trans_alloc_empty(mp, &tp);
+ if (error)
+ return error;
+
+ end = min_t(xfs_daddr_t, end,
+ XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1);
+
+ /* Convert the rt blocks to rt extents */
+ low.ar_startext = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
+ high.ar_startext = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
+
+ /*
+ * Walk the free ranges between low and high. The query_range function
+ * trims the extents returned.
+ */
+ do {
+ tr.stop_rtx = low.ar_startext + (mp->m_sb.sb_blocksize * NBBY);
+ xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
+ error = xfs_rtalloc_query_range(mp, tp, &low, &high,
+ xfs_trim_gather_rtextent, &tr);
+
+ if (error == -ECANCELED)
+ error = 0;
+ if (error) {
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ xfs_discard_free_rtdev_extents(&tr);
+ break;
+ }
+
+ if (list_empty(&tr.extent_list)) {
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ break;
+ }
+
+ error = xfs_discard_rtdev_extents(mp, &tr);
+ xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
+ if (error)
+ break;
+
+ low.ar_startext = tr.restart_rtx;
+ } while (!xfs_trim_should_stop() && low.ar_startext <= high.ar_startext);
+
+ xfs_trans_cancel(tp);
+ return error;
+}
+#else
+# define xfs_trim_rtdev_extents(m,s,e,n,b) (-EOPNOTSUPP)
+#endif /* CONFIG_XFS_RT */
+
/*
* trim a range of the filesystem.
*
@@ -368,26 +645,37 @@ xfs_trim_extents(
* addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
* is a linear address range. Hence we need to use DADDR based conversions and
* comparisons for determining the correct offset and regions to trim.
+ *
+ * The realtime device is mapped into the FITRIM "address space" immediately
+ * after the data device.
*/
int
xfs_ioc_trim(
struct xfs_mount *mp,
struct fstrim_range __user *urange)
{
- struct xfs_perag *pag;
unsigned int granularity =
bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
+ struct block_device *rt_bdev = NULL;
struct fstrim_range range;
- xfs_daddr_t start, end, minlen;
- xfs_agnumber_t agno;
+ xfs_daddr_t start, end;
+ xfs_extlen_t minlen;
+ xfs_rfsblock_t max_blocks;
uint64_t blocks_trimmed = 0;
int error, last_error = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev))
+ if (mp->m_rtdev_targp &&
+ bdev_max_discard_sectors(mp->m_rtdev_targp->bt_bdev))
+ rt_bdev = mp->m_rtdev_targp->bt_bdev;
+ if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev) && !rt_bdev)
return -EOPNOTSUPP;
+ if (rt_bdev)
+ granularity = max(granularity,
+ bdev_discard_granularity(rt_bdev));
+
/*
* We haven't recovered the log, so we cannot use our bnobt-guided
* storage zapping commands.
@@ -399,7 +687,8 @@ xfs_ioc_trim(
return -EFAULT;
range.minlen = max_t(u64, granularity, range.minlen);
- minlen = BTOBB(range.minlen);
+ minlen = XFS_B_TO_FSB(mp, range.minlen);
+
/*
* Truncating down the len isn't actually quite correct, but using
* BBTOB would mean we trivially get overflows for values
@@ -407,7 +696,8 @@ xfs_ioc_trim(
* used by the fstrim application. In the end it really doesn't
* matter as trimming blocks is an advisory interface.
*/
- if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
+ max_blocks = mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks;
+ if (range.start >= XFS_FSB_TO_B(mp, max_blocks) ||
range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
range.len < mp->m_sb.sb_blocksize)
return -EINVAL;
@@ -415,20 +705,18 @@ xfs_ioc_trim(
start = BTOBB(range.start);
end = start + BTOBBT(range.len) - 1;
- if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1)
- end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1;
-
- agno = xfs_daddr_to_agno(mp, start);
- for_each_perag_range(mp, agno, xfs_daddr_to_agno(mp, end), pag) {
- error = xfs_trim_extents(pag, start, end, minlen,
- &blocks_trimmed);
+ if (bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev)) {
+ error = xfs_trim_datadev_extents(mp, start, end, minlen,
+ &blocks_trimmed);
if (error)
last_error = error;
+ }
- if (xfs_trim_should_stop()) {
- xfs_perag_rele(pag);
- break;
- }
+ if (rt_bdev && !xfs_trim_should_stop()) {
+ error = xfs_trim_rtdev_extents(mp, start, end, minlen,
+ &blocks_trimmed);
+ if (error)
+ last_error = error;
}
if (last_error)