diff options
Diffstat (limited to 'fs/xfs/xfs_ialloc.c')
| -rw-r--r-- | fs/xfs/xfs_ialloc.c | 53 | 
1 files changed, 23 insertions, 30 deletions
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index e87719c5bebe..5d7f105a1c82 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -52,7 +52,7 @@ xfs_ialloc_cluster_alignment(  {  	if (xfs_sb_version_hasalign(&args->mp->m_sb) &&  	    args->mp->m_sb.sb_inoalignmt >= -	     XFS_B_TO_FSBT(args->mp, XFS_INODE_CLUSTER_SIZE(args->mp))) +	     XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size))  		return args->mp->m_sb.sb_inoalignmt;  	return 1;  } @@ -170,27 +170,20 @@ xfs_ialloc_inode_init(  {  	struct xfs_buf		*fbuf;  	struct xfs_dinode	*free; -	int			blks_per_cluster, nbufs, ninodes; +	int			nbufs, blks_per_cluster, inodes_per_cluster;  	int			version;  	int			i, j;  	xfs_daddr_t		d;  	xfs_ino_t		ino = 0;  	/* -	 * Loop over the new block(s), filling in the inodes. -	 * For small block sizes, manipulate the inodes in buffers -	 * which are multiples of the blocks size. +	 * Loop over the new block(s), filling in the inodes.  For small block +	 * sizes, manipulate the inodes in buffers  which are multiples of the +	 * blocks size.  	 */ -	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { -		blks_per_cluster = 1; -		nbufs = length; -		ninodes = mp->m_sb.sb_inopblock; -	} else { -		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) / -				   mp->m_sb.sb_blocksize; -		nbufs = length / blks_per_cluster; -		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock; -	} +	blks_per_cluster = xfs_icluster_size_fsb(mp); +	inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; +	nbufs = length / blks_per_cluster;  	/*  	 * Figure out what version number to use in the inodes we create.  If @@ -225,7 +218,7 @@ xfs_ialloc_inode_init(  		 * they track in the AIL as if they were physically logged.  		 */  		if (tp) -			xfs_icreate_log(tp, agno, agbno, XFS_IALLOC_INODES(mp), +			xfs_icreate_log(tp, agno, agbno, mp->m_ialloc_inos,  					mp->m_sb.sb_inodesize, length, gen);  	} else if (xfs_sb_version_hasnlink(&mp->m_sb))  		version = 2; @@ -246,7 +239,7 @@ xfs_ialloc_inode_init(  		/* Initialize the inode buffers and log them appropriately. */  		fbuf->b_ops = &xfs_inode_buf_ops;  		xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length)); -		for (i = 0; i < ninodes; i++) { +		for (i = 0; i < inodes_per_cluster; i++) {  			int	ioffset = i << mp->m_sb.sb_inodelog;  			uint	isize = xfs_dinode_size(version); @@ -329,11 +322,11 @@ xfs_ialloc_ag_alloc(  	 * Locking will ensure that we don't have two callers in here  	 * at one time.  	 */ -	newlen = XFS_IALLOC_INODES(args.mp); +	newlen = args.mp->m_ialloc_inos;  	if (args.mp->m_maxicount &&  	    args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount)  		return XFS_ERROR(ENOSPC); -	args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); +	args.minlen = args.maxlen = args.mp->m_ialloc_blks;  	/*  	 * First try to allocate inodes contiguous with the last-allocated  	 * chunk of inodes.  If the filesystem is striped, this will fill @@ -343,7 +336,7 @@ xfs_ialloc_ag_alloc(  	newino = be32_to_cpu(agi->agi_newino);  	agno = be32_to_cpu(agi->agi_seqno);  	args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + -			XFS_IALLOC_BLOCKS(args.mp); +		     args.mp->m_ialloc_blks;  	if (likely(newino != NULLAGINO &&  		  (args.agbno < be32_to_cpu(agi->agi_length)))) {  		args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); @@ -585,7 +578,7 @@ xfs_ialloc_ag_select(  		 * Is there enough free space for the file plus a block of  		 * inodes? (if we need to allocate some)?  		 */ -		ineed = XFS_IALLOC_BLOCKS(mp); +		ineed = mp->m_ialloc_blks;  		longest = pag->pagf_longest;  		if (!longest)  			longest = pag->pagf_flcount > 0; @@ -999,7 +992,7 @@ xfs_dialloc(  	 * inode.  	 */  	if (mp->m_maxicount && -	    mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { +	    mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) {  		noroom = 1;  		okalloc = 0;  	} @@ -1202,7 +1195,7 @@ xfs_difree(  	 * When an inode cluster is free, it becomes eligible for removal  	 */  	if (!(mp->m_flags & XFS_MOUNT_IKEEP) && -	    (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { +	    (rec.ir_freecount == mp->m_ialloc_inos)) {  		*delete = 1;  		*first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); @@ -1212,7 +1205,7 @@ xfs_difree(  		 * AGI and Superblock inode counts, and mark the disk space  		 * to be freed when the transaction is committed.  		 */ -		ilen = XFS_IALLOC_INODES(mp); +		ilen = mp->m_ialloc_inos;  		be32_add_cpu(&agi->agi_count, -ilen);  		be32_add_cpu(&agi->agi_freecount, -(ilen - 1));  		xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); @@ -1228,9 +1221,9 @@ xfs_difree(  			goto error0;  		} -		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, -				agno, XFS_INO_TO_AGBNO(mp,rec.ir_startino)), -				XFS_IALLOC_BLOCKS(mp), flist, mp); +		xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, +				  XFS_AGINO_TO_AGBNO(mp, rec.ir_startino)), +				  mp->m_ialloc_blks, flist, mp);  	} else {  		*delete = 0; @@ -1311,7 +1304,7 @@ xfs_imap_lookup(  	/* check that the returned record contains the required inode */  	if (rec.ir_startino > agino || -	    rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) +	    rec.ir_startino + mp->m_ialloc_inos <= agino)  		return EINVAL;  	/* for untrusted inodes check it is allocated first */ @@ -1384,7 +1377,7 @@ xfs_imap(  		return XFS_ERROR(EINVAL);  	} -	blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; +	blks_per_cluster = xfs_icluster_size_fsb(mp);  	/*  	 * For bulkstat and handle lookups, we have an untrusted inode number @@ -1405,7 +1398,7 @@ xfs_imap(  	 * If the inode cluster size is the same as the blocksize or  	 * smaller we get to the buffer by simple arithmetics.  	 */ -	if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { +	if (blks_per_cluster == 1) {  		offset = XFS_INO_TO_OFFSET(mp, ino);  		ASSERT(offset < mp->m_sb.sb_inopblock);  |