diff options
Diffstat (limited to 'fs/xfs/libxfs/xfs_bmap.c')
| -rw-r--r-- | fs/xfs/libxfs/xfs_bmap.c | 709 | 
1 files changed, 565 insertions, 144 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index b060bca93402..c27344cf38e1 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -47,6 +47,8 @@  #include "xfs_attr_leaf.h"  #include "xfs_filestream.h"  #include "xfs_rmap.h" +#include "xfs_ag_resv.h" +#include "xfs_refcount.h"  kmem_zone_t		*xfs_bmap_free_item_zone; @@ -139,7 +141,8 @@ xfs_bmbt_lookup_ge(   */  static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)  { -	return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS && +	return whichfork != XFS_COW_FORK && +		XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&  		XFS_IFORK_NEXTENTS(ip, whichfork) >  			XFS_IFORK_MAXEXT(ip, whichfork);  } @@ -149,7 +152,8 @@ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)   */  static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)  { -	return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE && +	return whichfork != XFS_COW_FORK && +		XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&  		XFS_IFORK_NEXTENTS(ip, whichfork) <=  			XFS_IFORK_MAXEXT(ip, whichfork);  } @@ -639,6 +643,7 @@ xfs_bmap_btree_to_extents(  	mp = ip->i_mount;  	ifp = XFS_IFORK_PTR(ip, whichfork); +	ASSERT(whichfork != XFS_COW_FORK);  	ASSERT(ifp->if_flags & XFS_IFEXTENTS);  	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);  	rblock = ifp->if_broot; @@ -705,6 +710,7 @@ xfs_bmap_extents_to_btree(  	xfs_bmbt_ptr_t		*pp;		/* root block address pointer */  	mp = ip->i_mount; +	ASSERT(whichfork != XFS_COW_FORK);  	ifp = XFS_IFORK_PTR(ip, whichfork);  	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS); @@ -747,6 +753,7 @@ xfs_bmap_extents_to_btree(  		args.type = XFS_ALLOCTYPE_START_BNO;  		args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);  	} else if (dfops->dop_low) { +try_another_ag:  		args.type = XFS_ALLOCTYPE_START_BNO;  		args.fsbno = *firstblock;  	} else { @@ -761,6 +768,21 @@ xfs_bmap_extents_to_btree(  		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);  		return error;  	} + +	/* +	 * During a CoW operation, the allocation and bmbt updates occur in +	 * different transactions.  The mapping code tries to put new bmbt +	 * blocks near extents being mapped, but the only way to guarantee this +	 * is if the alloc and the mapping happen in a single transaction that +	 * has a block reservation.  That isn't the case here, so if we run out +	 * of space we'll try again with another AG. +	 */ +	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) && +	    args.fsbno == NULLFSBLOCK && +	    args.type == XFS_ALLOCTYPE_NEAR_BNO) { +		dfops->dop_low = true; +		goto try_another_ag; +	}  	/*  	 * Allocation can't fail, the space was reserved.  	 */ @@ -836,6 +858,7 @@ xfs_bmap_local_to_extents_empty(  {  	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork); +	ASSERT(whichfork != XFS_COW_FORK);  	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);  	ASSERT(ifp->if_bytes == 0);  	ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0); @@ -895,6 +918,7 @@ xfs_bmap_local_to_extents(  	 * file currently fits in an inode.  	 */  	if (*firstblock == NULLFSBLOCK) { +try_another_ag:  		args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);  		args.type = XFS_ALLOCTYPE_START_BNO;  	} else { @@ -907,6 +931,19 @@ xfs_bmap_local_to_extents(  	if (error)  		goto done; +	/* +	 * During a CoW operation, the allocation and bmbt updates occur in +	 * different transactions.  The mapping code tries to put new bmbt +	 * blocks near extents being mapped, but the only way to guarantee this +	 * is if the alloc and the mapping happen in a single transaction that +	 * has a block reservation.  That isn't the case here, so if we run out +	 * of space we'll try again with another AG. +	 */ +	if (xfs_sb_version_hasreflink(&ip->i_mount->m_sb) && +	    args.fsbno == NULLFSBLOCK && +	    args.type == XFS_ALLOCTYPE_NEAR_BNO) { +		goto try_another_ag; +	}  	/* Can't fail, the space was reserved. */  	ASSERT(args.fsbno != NULLFSBLOCK);  	ASSERT(args.len == 1); @@ -1388,7 +1425,7 @@ xfs_bmap_search_multi_extents(   * Else, *lastxp will be set to the index of the found   * entry; *gotp will contain the entry.   */ -STATIC xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */ +xfs_bmbt_rec_host_t *                 /* pointer to found extent entry */  xfs_bmap_search_extents(  	xfs_inode_t     *ip,            /* incore inode pointer */  	xfs_fileoff_t   bno,            /* block number searched for */ @@ -1669,7 +1706,8 @@ xfs_bmap_one_block(   */  STATIC int				/* error */  xfs_bmap_add_extent_delay_real( -	struct xfs_bmalloca	*bma) +	struct xfs_bmalloca	*bma, +	int			whichfork)  {  	struct xfs_bmbt_irec	*new = &bma->got;  	int			diff;	/* temp value */ @@ -1687,11 +1725,14 @@ xfs_bmap_add_extent_delay_real(  	xfs_filblks_t		temp=0;	/* value for da_new calculations */  	xfs_filblks_t		temp2=0;/* value for da_new calculations */  	int			tmp_rval;	/* partial logging flags */ -	int			whichfork = XFS_DATA_FORK;  	struct xfs_mount	*mp; +	xfs_extnum_t		*nextents;  	mp = bma->ip->i_mount;  	ifp = XFS_IFORK_PTR(bma->ip, whichfork); +	ASSERT(whichfork != XFS_ATTR_FORK); +	nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents : +						&bma->ip->i_d.di_nextents);  	ASSERT(bma->idx >= 0);  	ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); @@ -1705,6 +1746,9 @@ xfs_bmap_add_extent_delay_real(  #define	RIGHT		r[1]  #define	PREV		r[2] +	if (whichfork == XFS_COW_FORK) +		state |= BMAP_COWFORK; +  	/*  	 * Set up a bunch of variables to make the tests simpler.  	 */ @@ -1791,7 +1835,7 @@ xfs_bmap_add_extent_delay_real(  		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);  		xfs_iext_remove(bma->ip, bma->idx + 1, 2, state); -		bma->ip->i_d.di_nextents--; +		(*nextents)--;  		if (bma->cur == NULL)  			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;  		else { @@ -1893,7 +1937,7 @@ xfs_bmap_add_extent_delay_real(  		xfs_bmbt_set_startblock(ep, new->br_startblock);  		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_); -		bma->ip->i_d.di_nextents++; +		(*nextents)++;  		if (bma->cur == NULL)  			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;  		else { @@ -1963,7 +2007,7 @@ xfs_bmap_add_extent_delay_real(  		temp = PREV.br_blockcount - new->br_blockcount;  		xfs_bmbt_set_blockcount(ep, temp);  		xfs_iext_insert(bma->ip, bma->idx, 1, new, state); -		bma->ip->i_d.di_nextents++; +		(*nextents)++;  		if (bma->cur == NULL)  			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;  		else { @@ -2047,7 +2091,7 @@ xfs_bmap_add_extent_delay_real(  		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);  		xfs_bmbt_set_blockcount(ep, temp);  		xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state); -		bma->ip->i_d.di_nextents++; +		(*nextents)++;  		if (bma->cur == NULL)  			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;  		else { @@ -2116,7 +2160,7 @@ xfs_bmap_add_extent_delay_real(  		RIGHT.br_blockcount = temp2;  		/* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */  		xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state); -		bma->ip->i_d.di_nextents++; +		(*nextents)++;  		if (bma->cur == NULL)  			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;  		else { @@ -2214,7 +2258,8 @@ xfs_bmap_add_extent_delay_real(  	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);  done: -	bma->logflags |= rval; +	if (whichfork != XFS_COW_FORK) +		bma->logflags |= rval;  	return error;  #undef	LEFT  #undef	RIGHT @@ -2758,6 +2803,7 @@ done:  STATIC void  xfs_bmap_add_extent_hole_delay(  	xfs_inode_t		*ip,	/* incore inode pointer */ +	int			whichfork,  	xfs_extnum_t		*idx,	/* extent number to update/insert */  	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */  { @@ -2769,8 +2815,10 @@ xfs_bmap_add_extent_hole_delay(  	int			state;  /* state bits, accessed thru macros */  	xfs_filblks_t		temp=0;	/* temp for indirect calculations */ -	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); +	ifp = XFS_IFORK_PTR(ip, whichfork);  	state = 0; +	if (whichfork == XFS_COW_FORK) +		state |= BMAP_COWFORK;  	ASSERT(isnullstartblock(new->br_startblock));  	/* @@ -2788,7 +2836,7 @@ xfs_bmap_add_extent_hole_delay(  	 * Check and set flags if the current (right) segment exists.  	 * If it doesn't exist, we're converting the hole at end-of-file.  	 */ -	if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { +	if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {  		state |= BMAP_RIGHT_VALID;  		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); @@ -2922,6 +2970,7 @@ xfs_bmap_add_extent_hole_real(  	ASSERT(!isnullstartblock(new->br_startblock));  	ASSERT(!bma->cur ||  	       !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); +	ASSERT(whichfork != XFS_COW_FORK);  	XFS_STATS_INC(mp, xs_add_exlist); @@ -3347,7 +3396,8 @@ xfs_bmap_adjacent(  	mp = ap->ip->i_mount;  	nullfb = *ap->firstblock == NULLFSBLOCK; -	rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata; +	rt = XFS_IS_REALTIME_INODE(ap->ip) && +		xfs_alloc_is_userdata(ap->datatype);  	fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);  	/*  	 * If allocating at eof, and there's a previous real block, @@ -3501,7 +3551,8 @@ xfs_bmap_longest_free_extent(  	}  	longest = xfs_alloc_longest_free_extent(mp, pag, -					xfs_alloc_min_freelist(mp, pag)); +				xfs_alloc_min_freelist(mp, pag), +				xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));  	if (*blen < longest)  		*blen = longest; @@ -3622,7 +3673,7 @@ xfs_bmap_btalloc(  {  	xfs_mount_t	*mp;		/* mount point structure */  	xfs_alloctype_t	atype = 0;	/* type for allocation routines */ -	xfs_extlen_t	align;		/* minimum allocation alignment */ +	xfs_extlen_t	align = 0;	/* minimum allocation alignment */  	xfs_agnumber_t	fb_agno;	/* ag number of ap->firstblock */  	xfs_agnumber_t	ag;  	xfs_alloc_arg_t	args; @@ -3645,7 +3696,10 @@ xfs_bmap_btalloc(  	else if (mp->m_dalign)  		stripe_align = mp->m_dalign; -	align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; +	if (ap->flags & XFS_BMAPI_COWFORK) +		align = xfs_get_cowextsz_hint(ap->ip); +	else if (xfs_alloc_is_userdata(ap->datatype)) +		align = xfs_get_extsz_hint(ap->ip);  	if (unlikely(align)) {  		error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,  						align, 0, ap->eof, 0, ap->conv, @@ -3658,7 +3712,8 @@ xfs_bmap_btalloc(  	nullfb = *ap->firstblock == NULLFSBLOCK;  	fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);  	if (nullfb) { -		if (ap->userdata && xfs_inode_is_filestream(ap->ip)) { +		if (xfs_alloc_is_userdata(ap->datatype) && +		    xfs_inode_is_filestream(ap->ip)) {  			ag = xfs_filestream_lookup_ag(ap->ip);  			ag = (ag != NULLAGNUMBER) ? ag : 0;  			ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0); @@ -3698,7 +3753,8 @@ xfs_bmap_btalloc(  		 * enough for the request.  If one isn't found, then adjust  		 * the minimum allocation size to the largest space found.  		 */ -		if (ap->userdata && xfs_inode_is_filestream(ap->ip)) +		if (xfs_alloc_is_userdata(ap->datatype) && +		    xfs_inode_is_filestream(ap->ip))  			error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);  		else  			error = xfs_bmap_btalloc_nullfb(ap, &args, &blen); @@ -3781,9 +3837,9 @@ xfs_bmap_btalloc(  	}  	args.minleft = ap->minleft;  	args.wasdel = ap->wasdel; -	args.isfl = 0; -	args.userdata = ap->userdata; -	if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) +	args.resv = XFS_AG_RESV_NONE; +	args.datatype = ap->datatype; +	if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)  		args.ip = ap->ip;  	error = xfs_alloc_vextent(&args); @@ -3850,7 +3906,8 @@ xfs_bmap_btalloc(  		ASSERT(nullfb || fb_agno == args.agno ||  		       (ap->dfops->dop_low && fb_agno < args.agno));  		ap->length = args.len; -		ap->ip->i_d.di_nblocks += args.len; +		if (!(ap->flags & XFS_BMAPI_COWFORK)) +			ap->ip->i_d.di_nblocks += args.len;  		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);  		if (ap->wasdel)  			ap->ip->i_delayed_blks -= args.len; @@ -3870,6 +3927,63 @@ xfs_bmap_btalloc(  }  /* + * For a remap operation, just "allocate" an extent at the address that the + * caller passed in, and ensure that the AGFL is the right size.  The caller + * will then map the "allocated" extent into the file somewhere. + */ +STATIC int +xfs_bmap_remap_alloc( +	struct xfs_bmalloca	*ap) +{ +	struct xfs_trans	*tp = ap->tp; +	struct xfs_mount	*mp = tp->t_mountp; +	xfs_agblock_t		bno; +	struct xfs_alloc_arg	args; +	int			error; + +	/* +	 * validate that the block number is legal - the enables us to detect +	 * and handle a silent filesystem corruption rather than crashing. +	 */ +	memset(&args, 0, sizeof(struct xfs_alloc_arg)); +	args.tp = ap->tp; +	args.mp = ap->tp->t_mountp; +	bno = *ap->firstblock; +	args.agno = XFS_FSB_TO_AGNO(mp, bno); +	args.agbno = XFS_FSB_TO_AGBNO(mp, bno); +	if (args.agno >= mp->m_sb.sb_agcount || +	    args.agbno >= mp->m_sb.sb_agblocks) +		return -EFSCORRUPTED; + +	/* "Allocate" the extent from the range we passed in. */ +	trace_xfs_bmap_remap_alloc(ap->ip, *ap->firstblock, ap->length); +	ap->blkno = bno; +	ap->ip->i_d.di_nblocks += ap->length; +	xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE); + +	/* Fix the freelist, like a real allocator does. */ +	args.datatype = ap->datatype; +	args.pag = xfs_perag_get(args.mp, args.agno); +	ASSERT(args.pag); + +	/* +	 * The freelist fixing code will decline the allocation if +	 * the size and shape of the free space doesn't allow for +	 * allocating the extent and updating all the metadata that +	 * happens during an allocation.  We're remapping, not +	 * allocating, so skip that check by pretending to be freeing. +	 */ +	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING); +	if (error) +		goto error0; +error0: +	xfs_perag_put(args.pag); +	if (error) +		trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_); +	return error; +} + +/*   * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.   * It figures out where to ask the underlying allocator to put the new extent.   */ @@ -3877,7 +3991,10 @@ STATIC int  xfs_bmap_alloc(  	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */  { -	if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) +	if (ap->flags & XFS_BMAPI_REMAP) +		return xfs_bmap_remap_alloc(ap); +	if (XFS_IS_REALTIME_INODE(ap->ip) && +	    xfs_alloc_is_userdata(ap->datatype))  		return xfs_bmap_rtalloc(ap);  	return xfs_bmap_btalloc(ap);  } @@ -4005,12 +4122,11 @@ xfs_bmapi_read(  	int			error;  	int			eof;  	int			n = 0; -	int			whichfork = (flags & XFS_BMAPI_ATTRFORK) ? -						XFS_ATTR_FORK : XFS_DATA_FORK; +	int			whichfork = xfs_bmapi_whichfork(flags);  	ASSERT(*nmap >= 1);  	ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE| -			   XFS_BMAPI_IGSTATE))); +			   XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));  	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));  	if (unlikely(XFS_TEST_ERROR( @@ -4028,6 +4144,16 @@ xfs_bmapi_read(  	ifp = XFS_IFORK_PTR(ip, whichfork); +	/* No CoW fork?  Return a hole. */ +	if (whichfork == XFS_COW_FORK && !ifp) { +		mval->br_startoff = bno; +		mval->br_startblock = HOLESTARTBLOCK; +		mval->br_blockcount = len; +		mval->br_state = XFS_EXT_NORM; +		*nmap = 1; +		return 0; +	} +  	if (!(ifp->if_flags & XFS_IFEXTENTS)) {  		error = xfs_iread_extents(NULL, ip, whichfork);  		if (error) @@ -4074,9 +4200,10 @@ xfs_bmapi_read(  	return 0;  } -STATIC int +int  xfs_bmapi_reserve_delalloc(  	struct xfs_inode	*ip, +	int			whichfork,  	xfs_fileoff_t		aoff,  	xfs_filblks_t		len,  	struct xfs_bmbt_irec	*got, @@ -4085,7 +4212,7 @@ xfs_bmapi_reserve_delalloc(  	int			eof)  {  	struct xfs_mount	*mp = ip->i_mount; -	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); +	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);  	xfs_extlen_t		alen;  	xfs_extlen_t		indlen;  	char			rt = XFS_IS_REALTIME_INODE(ip); @@ -4097,7 +4224,10 @@ xfs_bmapi_reserve_delalloc(  		alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);  	/* Figure out the extent size, adjust alen */ -	extsz = xfs_get_extsz_hint(ip); +	if (whichfork == XFS_COW_FORK) +		extsz = xfs_get_cowextsz_hint(ip); +	else +		extsz = xfs_get_extsz_hint(ip);  	if (extsz) {  		error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,  					       1, 0, &aoff, &alen); @@ -4144,7 +4274,7 @@ xfs_bmapi_reserve_delalloc(  	got->br_startblock = nullstartblock(indlen);  	got->br_blockcount = alen;  	got->br_state = XFS_EXT_NORM; -	xfs_bmap_add_extent_hole_delay(ip, lastx, got); +	xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);  	/*  	 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay @@ -4170,98 +4300,12 @@ out_unreserve_quota:  	return error;  } -/* - * Map file blocks to filesystem blocks, adding delayed allocations as needed. - */ -int -xfs_bmapi_delay( -	struct xfs_inode	*ip,	/* incore inode */ -	xfs_fileoff_t		bno,	/* starting file offs. mapped */ -	xfs_filblks_t		len,	/* length to map in file */ -	struct xfs_bmbt_irec	*mval,	/* output: map values */ -	int			*nmap,	/* i/o: mval size/count */ -	int			flags)	/* XFS_BMAPI_... */ -{ -	struct xfs_mount	*mp = ip->i_mount; -	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); -	struct xfs_bmbt_irec	got;	/* current file extent record */ -	struct xfs_bmbt_irec	prev;	/* previous file extent record */ -	xfs_fileoff_t		obno;	/* old block number (offset) */ -	xfs_fileoff_t		end;	/* end of mapped file region */ -	xfs_extnum_t		lastx;	/* last useful extent number */ -	int			eof;	/* we've hit the end of extents */ -	int			n = 0;	/* current extent index */ -	int			error = 0; - -	ASSERT(*nmap >= 1); -	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); -	ASSERT(!(flags & ~XFS_BMAPI_ENTIRE)); -	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - -	if (unlikely(XFS_TEST_ERROR( -	    (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS && -	     XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), -	     mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { -		XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp); -		return -EFSCORRUPTED; -	} - -	if (XFS_FORCED_SHUTDOWN(mp)) -		return -EIO; - -	XFS_STATS_INC(mp, xs_blk_mapw); - -	if (!(ifp->if_flags & XFS_IFEXTENTS)) { -		error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); -		if (error) -			return error; -	} - -	xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev); -	end = bno + len; -	obno = bno; - -	while (bno < end && n < *nmap) { -		if (eof || got.br_startoff > bno) { -			error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got, -							   &prev, &lastx, eof); -			if (error) { -				if (n == 0) { -					*nmap = 0; -					return error; -				} -				break; -			} -		} - -		/* set up the extent map to return. */ -		xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags); -		xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags); - -		/* If we're done, stop now. */ -		if (bno >= end || n >= *nmap) -			break; - -		/* Else go on to the next record. */ -		prev = got; -		if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) -			xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got); -		else -			eof = 1; -	} - -	*nmap = n; -	return 0; -} - -  static int  xfs_bmapi_allocate(  	struct xfs_bmalloca	*bma)  {  	struct xfs_mount	*mp = bma->ip->i_mount; -	int			whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ? -						XFS_ATTR_FORK : XFS_DATA_FORK; +	int			whichfork = xfs_bmapi_whichfork(bma->flags);  	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);  	int			tmp_logflags = 0;  	int			error; @@ -4287,15 +4331,21 @@ xfs_bmapi_allocate(  	}  	/* -	 * Indicate if this is the first user data in the file, or just any -	 * user data. And if it is userdata, indicate whether it needs to -	 * be initialised to zero during allocation. +	 * Set the data type being allocated. For the data fork, the first data +	 * in the file is treated differently to all other allocations. For the +	 * attribute fork, we only need to ensure the allocated range is not on +	 * the busy list.  	 */  	if (!(bma->flags & XFS_BMAPI_METADATA)) { -		bma->userdata = (bma->offset == 0) ? -			XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA; +		bma->datatype = XFS_ALLOC_NOBUSY; +		if (whichfork == XFS_DATA_FORK) { +			if (bma->offset == 0) +				bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA; +			else +				bma->datatype |= XFS_ALLOC_USERDATA; +		}  		if (bma->flags & XFS_BMAPI_ZERO) -			bma->userdata |= XFS_ALLOC_USERDATA_ZERO; +			bma->datatype |= XFS_ALLOC_USERDATA_ZERO;  	}  	bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1; @@ -4350,7 +4400,7 @@ xfs_bmapi_allocate(  		bma->got.br_state = XFS_EXT_UNWRITTEN;  	if (bma->wasdel) -		error = xfs_bmap_add_extent_delay_real(bma); +		error = xfs_bmap_add_extent_delay_real(bma, whichfork);  	else  		error = xfs_bmap_add_extent_hole_real(bma, whichfork); @@ -4380,8 +4430,7 @@ xfs_bmapi_convert_unwritten(  	xfs_filblks_t		len,  	int			flags)  { -	int			whichfork = (flags & XFS_BMAPI_ATTRFORK) ? -						XFS_ATTR_FORK : XFS_DATA_FORK; +	int			whichfork = xfs_bmapi_whichfork(flags);  	struct xfs_ifork	*ifp = XFS_IFORK_PTR(bma->ip, whichfork);  	int			tmp_logflags = 0;  	int			error; @@ -4397,6 +4446,8 @@ xfs_bmapi_convert_unwritten(  			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))  		return 0; +	ASSERT(whichfork != XFS_COW_FORK); +  	/*  	 * Modify (by adding) the state flag, if writing.  	 */ @@ -4503,8 +4554,7 @@ xfs_bmapi_write(  	orig_mval = mval;  	orig_nmap = *nmap;  #endif -	whichfork = (flags & XFS_BMAPI_ATTRFORK) ? -		XFS_ATTR_FORK : XFS_DATA_FORK; +	whichfork = xfs_bmapi_whichfork(flags);  	ASSERT(*nmap >= 1);  	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP); @@ -4513,6 +4563,11 @@ xfs_bmapi_write(  	ASSERT(len > 0);  	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);  	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); +	ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK); +	ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP)); +	ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP)); +	ASSERT(!(flags & XFS_BMAPI_PREALLOC) || whichfork != XFS_COW_FORK); +	ASSERT(!(flags & XFS_BMAPI_CONVERT) || whichfork != XFS_COW_FORK);  	/* zeroing is for currently only for data extents, not metadata */  	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) != @@ -4565,7 +4620,7 @@ xfs_bmapi_write(  	bma.tp = tp;  	bma.ip = ip;  	bma.total = total; -	bma.userdata = 0; +	bma.datatype = 0;  	bma.dfops = dfops;  	bma.firstblock = firstblock; @@ -4574,6 +4629,14 @@ xfs_bmapi_write(  		wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);  		/* +		 * Make sure we only reflink into a hole. +		 */ +		if (flags & XFS_BMAPI_REMAP) +			ASSERT(inhole); +		if (flags & XFS_BMAPI_COWFORK) +			ASSERT(!inhole); + +		/*  		 * First, deal with the hole before the allocated space  		 * that we found, if any.  		 */ @@ -4603,6 +4666,17 @@ xfs_bmapi_write(  				goto error0;  			if (bma.blkno == NULLFSBLOCK)  				break; + +			/* +			 * If this is a CoW allocation, record the data in +			 * the refcount btree for orphan recovery. +			 */ +			if (whichfork == XFS_COW_FORK) { +				error = xfs_refcount_alloc_cow_extent(mp, dfops, +						bma.blkno, bma.length); +				if (error) +					goto error0; +			}  		}  		/* Deal with the allocated space we found.  */ @@ -4768,7 +4842,8 @@ xfs_bmap_del_extent(  	xfs_btree_cur_t		*cur,	/* if null, not a btree */  	xfs_bmbt_irec_t		*del,	/* data to remove from extents */  	int			*logflagsp, /* inode logging flags */ -	int			whichfork) /* data or attr fork */ +	int			whichfork, /* data or attr fork */ +	int			bflags)	/* bmapi flags */  {  	xfs_filblks_t		da_new;	/* new delay-alloc indirect blocks */  	xfs_filblks_t		da_old;	/* old delay-alloc indirect blocks */ @@ -4797,6 +4872,8 @@ xfs_bmap_del_extent(  	if (whichfork == XFS_ATTR_FORK)  		state |= BMAP_ATTRFORK; +	else if (whichfork == XFS_COW_FORK) +		state |= BMAP_COWFORK;  	ifp = XFS_IFORK_PTR(ip, whichfork);  	ASSERT((*idx >= 0) && (*idx < ifp->if_bytes / @@ -4877,6 +4954,7 @@ xfs_bmap_del_extent(  		/*  		 * Matches the whole extent.  Delete the entry.  		 */ +		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);  		xfs_iext_remove(ip, *idx, 1,  				whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);  		--*idx; @@ -5060,9 +5138,16 @@ xfs_bmap_del_extent(  	/*  	 * If we need to, add to list of extents to delete.  	 */ -	if (do_fx) -		xfs_bmap_add_free(mp, dfops, del->br_startblock, -				del->br_blockcount, NULL); +	if (do_fx && !(bflags & XFS_BMAPI_REMAP)) { +		if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { +			error = xfs_refcount_decrease_extent(mp, dfops, del); +			if (error) +				goto done; +		} else +			xfs_bmap_add_free(mp, dfops, del->br_startblock, +					del->br_blockcount, NULL); +	} +  	/*  	 * Adjust inode # blocks in the file.  	 */ @@ -5071,7 +5156,7 @@ xfs_bmap_del_extent(  	/*  	 * Adjust quota data.  	 */ -	if (qfield) +	if (qfield && !(bflags & XFS_BMAPI_REMAP))  		xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);  	/* @@ -5086,6 +5171,175 @@ done:  	return error;  } +/* Remove an extent from the CoW fork.  Similar to xfs_bmap_del_extent. */ +int +xfs_bunmapi_cow( +	struct xfs_inode		*ip, +	struct xfs_bmbt_irec		*del) +{ +	xfs_filblks_t			da_new; +	xfs_filblks_t			da_old; +	xfs_fsblock_t			del_endblock = 0; +	xfs_fileoff_t			del_endoff; +	int				delay; +	struct xfs_bmbt_rec_host	*ep; +	int				error; +	struct xfs_bmbt_irec		got; +	xfs_fileoff_t			got_endoff; +	struct xfs_ifork		*ifp; +	struct xfs_mount		*mp; +	xfs_filblks_t			nblks; +	struct xfs_bmbt_irec		new; +	/* REFERENCED */ +	uint				qfield; +	xfs_filblks_t			temp; +	xfs_filblks_t			temp2; +	int				state = BMAP_COWFORK; +	int				eof; +	xfs_extnum_t			eidx; + +	mp = ip->i_mount; +	XFS_STATS_INC(mp, xs_del_exlist); + +	ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof, +			&eidx, &got, &new); + +	ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp; +	ASSERT((eidx >= 0) && (eidx < ifp->if_bytes / +		(uint)sizeof(xfs_bmbt_rec_t))); +	ASSERT(del->br_blockcount > 0); +	ASSERT(got.br_startoff <= del->br_startoff); +	del_endoff = del->br_startoff + del->br_blockcount; +	got_endoff = got.br_startoff + got.br_blockcount; +	ASSERT(got_endoff >= del_endoff); +	delay = isnullstartblock(got.br_startblock); +	ASSERT(isnullstartblock(del->br_startblock) == delay); +	qfield = 0; +	error = 0; +	/* +	 * If deleting a real allocation, must free up the disk space. +	 */ +	if (!delay) { +		nblks = del->br_blockcount; +		qfield = XFS_TRANS_DQ_BCOUNT; +		/* +		 * Set up del_endblock and cur for later. +		 */ +		del_endblock = del->br_startblock + del->br_blockcount; +		da_old = da_new = 0; +	} else { +		da_old = startblockval(got.br_startblock); +		da_new = 0; +		nblks = 0; +	} +	qfield = qfield; +	nblks = nblks; + +	/* +	 * Set flag value to use in switch statement. +	 * Left-contig is 2, right-contig is 1. +	 */ +	switch (((got.br_startoff == del->br_startoff) << 1) | +		(got_endoff == del_endoff)) { +	case 3: +		/* +		 * Matches the whole extent.  Delete the entry. +		 */ +		xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK); +		--eidx; +		break; + +	case 2: +		/* +		 * Deleting the first part of the extent. +		 */ +		trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_); +		xfs_bmbt_set_startoff(ep, del_endoff); +		temp = got.br_blockcount - del->br_blockcount; +		xfs_bmbt_set_blockcount(ep, temp); +		if (delay) { +			temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), +				da_old); +			xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); +			trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); +			da_new = temp; +			break; +		} +		xfs_bmbt_set_startblock(ep, del_endblock); +		trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); +		break; + +	case 1: +		/* +		 * Deleting the last part of the extent. +		 */ +		temp = got.br_blockcount - del->br_blockcount; +		trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_); +		xfs_bmbt_set_blockcount(ep, temp); +		if (delay) { +			temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), +				da_old); +			xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); +			trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); +			da_new = temp; +			break; +		} +		trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); +		break; + +	case 0: +		/* +		 * Deleting the middle of the extent. +		 */ +		temp = del->br_startoff - got.br_startoff; +		trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_); +		xfs_bmbt_set_blockcount(ep, temp); +		new.br_startoff = del_endoff; +		temp2 = got_endoff - del_endoff; +		new.br_blockcount = temp2; +		new.br_state = got.br_state; +		if (!delay) { +			new.br_startblock = del_endblock; +		} else { +			temp = xfs_bmap_worst_indlen(ip, temp); +			xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); +			temp2 = xfs_bmap_worst_indlen(ip, temp2); +			new.br_startblock = nullstartblock((int)temp2); +			da_new = temp + temp2; +			while (da_new > da_old) { +				if (temp) { +					temp--; +					da_new--; +					xfs_bmbt_set_startblock(ep, +						nullstartblock((int)temp)); +				} +				if (da_new == da_old) +					break; +				if (temp2) { +					temp2--; +					da_new--; +					new.br_startblock = +						nullstartblock((int)temp2); +				} +			} +		} +		trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_); +		xfs_iext_insert(ip, eidx + 1, 1, &new, state); +		++eidx; +		break; +	} + +	/* +	 * Account for change in delayed indirect blocks. +	 * Nothing to do for disk quota accounting here. +	 */ +	ASSERT(da_old >= da_new); +	if (da_old > da_new) +		xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false); + +	return error; +} +  /*   * Unmap (remove) blocks from a file.   * If nexts is nonzero then the number of extents to remove is limited to @@ -5093,17 +5347,16 @@ done:   * *done is set.   */  int						/* error */ -xfs_bunmapi( +__xfs_bunmapi(  	xfs_trans_t		*tp,		/* transaction pointer */  	struct xfs_inode	*ip,		/* incore inode */  	xfs_fileoff_t		bno,		/* starting offset to unmap */ -	xfs_filblks_t		len,		/* length to unmap in file */ +	xfs_filblks_t		*rlen,		/* i/o: amount remaining */  	int			flags,		/* misc flags */  	xfs_extnum_t		nexts,		/* number of extents max */  	xfs_fsblock_t		*firstblock,	/* first allocated block  						   controls a.g. for allocs */ -	struct xfs_defer_ops	*dfops,		/* i/o: list extents to free */ -	int			*done)		/* set if not done yet */ +	struct xfs_defer_ops	*dfops)		/* i/o: deferred updates */  {  	xfs_btree_cur_t		*cur;		/* bmap btree cursor */  	xfs_bmbt_irec_t		del;		/* extent being deleted */ @@ -5125,11 +5378,12 @@ xfs_bunmapi(  	int			wasdel;		/* was a delayed alloc extent */  	int			whichfork;	/* data or attribute fork */  	xfs_fsblock_t		sum; +	xfs_filblks_t		len = *rlen;	/* length to unmap in file */  	trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); -	whichfork = (flags & XFS_BMAPI_ATTRFORK) ? -		XFS_ATTR_FORK : XFS_DATA_FORK; +	whichfork = xfs_bmapi_whichfork(flags); +	ASSERT(whichfork != XFS_COW_FORK);  	ifp = XFS_IFORK_PTR(ip, whichfork);  	if (unlikely(  	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && @@ -5151,7 +5405,7 @@ xfs_bunmapi(  		return error;  	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);  	if (nextents == 0) { -		*done = 1; +		*rlen = 0;  		return 0;  	}  	XFS_STATS_INC(mp, xs_blk_unmap); @@ -5396,7 +5650,7 @@ xfs_bunmapi(  			cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;  		error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del, -				&tmp_logflags, whichfork); +				&tmp_logflags, whichfork, flags);  		logflags |= tmp_logflags;  		if (error)  			goto error0; @@ -5422,7 +5676,10 @@ nodelete:  			extno++;  		}  	} -	*done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; +	if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0) +		*rlen = 0; +	else +		*rlen = bno - start + 1;  	/*  	 * Convert to a btree if necessary. @@ -5478,6 +5735,27 @@ error0:  	return error;  } +/* Unmap a range of a file. */ +int +xfs_bunmapi( +	xfs_trans_t		*tp, +	struct xfs_inode	*ip, +	xfs_fileoff_t		bno, +	xfs_filblks_t		len, +	int			flags, +	xfs_extnum_t		nexts, +	xfs_fsblock_t		*firstblock, +	struct xfs_defer_ops	*dfops, +	int			*done) +{ +	int			error; + +	error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock, +			dfops); +	*done = (len == 0); +	return error; +} +  /*   * Determine whether an extent shift can be accomplished by a merge with the   * extent that precedes the target hole of the shift. @@ -6057,3 +6335,146 @@ out:  	xfs_trans_cancel(tp);  	return error;  } + +/* Deferred mapping is only for real extents in the data fork. */ +static bool +xfs_bmap_is_update_needed( +	struct xfs_bmbt_irec	*bmap) +{ +	return  bmap->br_startblock != HOLESTARTBLOCK && +		bmap->br_startblock != DELAYSTARTBLOCK; +} + +/* Record a bmap intent. */ +static int +__xfs_bmap_add( +	struct xfs_mount		*mp, +	struct xfs_defer_ops		*dfops, +	enum xfs_bmap_intent_type	type, +	struct xfs_inode		*ip, +	int				whichfork, +	struct xfs_bmbt_irec		*bmap) +{ +	int				error; +	struct xfs_bmap_intent		*bi; + +	trace_xfs_bmap_defer(mp, +			XFS_FSB_TO_AGNO(mp, bmap->br_startblock), +			type, +			XFS_FSB_TO_AGBNO(mp, bmap->br_startblock), +			ip->i_ino, whichfork, +			bmap->br_startoff, +			bmap->br_blockcount, +			bmap->br_state); + +	bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS); +	INIT_LIST_HEAD(&bi->bi_list); +	bi->bi_type = type; +	bi->bi_owner = ip; +	bi->bi_whichfork = whichfork; +	bi->bi_bmap = *bmap; + +	error = xfs_defer_join(dfops, bi->bi_owner); +	if (error) { +		kmem_free(bi); +		return error; +	} + +	xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list); +	return 0; +} + +/* Map an extent into a file. */ +int +xfs_bmap_map_extent( +	struct xfs_mount	*mp, +	struct xfs_defer_ops	*dfops, +	struct xfs_inode	*ip, +	struct xfs_bmbt_irec	*PREV) +{ +	if (!xfs_bmap_is_update_needed(PREV)) +		return 0; + +	return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip, +			XFS_DATA_FORK, PREV); +} + +/* Unmap an extent out of a file. */ +int +xfs_bmap_unmap_extent( +	struct xfs_mount	*mp, +	struct xfs_defer_ops	*dfops, +	struct xfs_inode	*ip, +	struct xfs_bmbt_irec	*PREV) +{ +	if (!xfs_bmap_is_update_needed(PREV)) +		return 0; + +	return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip, +			XFS_DATA_FORK, PREV); +} + +/* + * Process one of the deferred bmap operations.  We pass back the + * btree cursor to maintain our lock on the bmapbt between calls. + */ +int +xfs_bmap_finish_one( +	struct xfs_trans		*tp, +	struct xfs_defer_ops		*dfops, +	struct xfs_inode		*ip, +	enum xfs_bmap_intent_type	type, +	int				whichfork, +	xfs_fileoff_t			startoff, +	xfs_fsblock_t			startblock, +	xfs_filblks_t			blockcount, +	xfs_exntst_t			state) +{ +	struct xfs_bmbt_irec		bmap; +	int				nimaps = 1; +	xfs_fsblock_t			firstfsb; +	int				flags = XFS_BMAPI_REMAP; +	int				done; +	int				error = 0; + +	bmap.br_startblock = startblock; +	bmap.br_startoff = startoff; +	bmap.br_blockcount = blockcount; +	bmap.br_state = state; + +	trace_xfs_bmap_deferred(tp->t_mountp, +			XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, +			XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), +			ip->i_ino, whichfork, startoff, blockcount, state); + +	if (whichfork != XFS_DATA_FORK && whichfork != XFS_ATTR_FORK) +		return -EFSCORRUPTED; +	if (whichfork == XFS_ATTR_FORK) +		flags |= XFS_BMAPI_ATTRFORK; + +	if (XFS_TEST_ERROR(false, tp->t_mountp, +			XFS_ERRTAG_BMAP_FINISH_ONE, +			XFS_RANDOM_BMAP_FINISH_ONE)) +		return -EIO; + +	switch (type) { +	case XFS_BMAP_MAP: +		firstfsb = bmap.br_startblock; +		error = xfs_bmapi_write(tp, ip, bmap.br_startoff, +					bmap.br_blockcount, flags, &firstfsb, +					bmap.br_blockcount, &bmap, &nimaps, +					dfops); +		break; +	case XFS_BMAP_UNMAP: +		error = xfs_bunmapi(tp, ip, bmap.br_startoff, +				bmap.br_blockcount, flags, 1, &firstfsb, +				dfops, &done); +		ASSERT(done); +		break; +	default: +		ASSERT(0); +		error = -EFSCORRUPTED; +	} + +	return error; +}  |