diff options
Diffstat (limited to 'fs/direct-io.c')
| -rw-r--r-- | fs/direct-io.c | 44 | 
1 files changed, 20 insertions, 24 deletions
diff --git a/fs/direct-io.c b/fs/direct-io.c index e181b6b2e297..c3b560b24a46 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -37,7 +37,6 @@  #include <linux/uio.h>  #include <linux/atomic.h>  #include <linux/prefetch.h> -#include <linux/aio.h>  /*   * How many user pages to map in one call to get_user_pages().  This determines @@ -265,7 +264,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,  				ret = err;  		} -		aio_complete(dio->iocb, ret, 0); +		dio->iocb->ki_complete(dio->iocb, ret, 0);  	}  	kmem_cache_free(dio_cache, dio); @@ -1056,7 +1055,7 @@ static inline int drop_refcount(struct dio *dio)  	 * operation.  AIO can if it was a broken operation described above or  	 * in fact if all the bios race to complete before we get here.  In  	 * that case dio_complete() translates the EIOCBQUEUED into the proper -	 * return code that the caller will hand to aio_complete(). +	 * return code that the caller will hand to ->complete().  	 *  	 * This is managed by the bio_lock instead of being an atomic_t so that  	 * completion paths can drop their ref and use the remaining count to @@ -1094,10 +1093,10 @@ static inline int drop_refcount(struct dio *dio)   * for the whole file.   */  static inline ssize_t -do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, -	struct block_device *bdev, struct iov_iter *iter, loff_t offset,  -	get_block_t get_block, dio_iodone_t end_io, -	dio_submit_t submit_io,	int flags) +do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, +		      struct block_device *bdev, struct iov_iter *iter, +		      loff_t offset, get_block_t get_block, dio_iodone_t end_io, +		      dio_submit_t submit_io, int flags)  {  	unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);  	unsigned blkbits = i_blkbits; @@ -1111,9 +1110,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	struct blk_plug plug;  	unsigned long align = offset | iov_iter_alignment(iter); -	if (rw & WRITE) -		rw = WRITE_ODIRECT; -  	/*  	 * Avoid references to bdev if not absolutely needed to give  	 * the early prefetch in the caller enough time. @@ -1128,7 +1124,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	}  	/* watch out for a 0 len io from a tricksy fs */ -	if (rw == READ && !iov_iter_count(iter)) +	if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))  		return 0;  	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL); @@ -1144,7 +1140,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	dio->flags = flags;  	if (dio->flags & DIO_LOCKING) { -		if (rw == READ) { +		if (iov_iter_rw(iter) == READ) {  			struct address_space *mapping =  					iocb->ki_filp->f_mapping; @@ -1170,19 +1166,19 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	if (is_sync_kiocb(iocb))  		dio->is_async = false;  	else if (!(dio->flags & DIO_ASYNC_EXTEND) && -            (rw & WRITE) && end > i_size_read(inode)) +		 iov_iter_rw(iter) == WRITE && end > i_size_read(inode))  		dio->is_async = false;  	else  		dio->is_async = true;  	dio->inode = inode; -	dio->rw = rw; +	dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;  	/*  	 * For AIO O_(D)SYNC writes we need to defer completions to a workqueue  	 * so that we can call ->fsync.  	 */ -	if (dio->is_async && (rw & WRITE) && +	if (dio->is_async && iov_iter_rw(iter) == WRITE &&  	    ((iocb->ki_filp->f_flags & O_DSYNC) ||  	     IS_SYNC(iocb->ki_filp->f_mapping->host))) {  		retval = dio_set_defer_completion(dio); @@ -1275,7 +1271,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	 * we can let i_mutex go now that its achieved its purpose  	 * of protecting us from looking up uninitialized blocks.  	 */ -	if (rw == READ && (dio->flags & DIO_LOCKING)) +	if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))  		mutex_unlock(&dio->inode->i_mutex);  	/* @@ -1287,7 +1283,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	 */  	BUG_ON(retval == -EIOCBQUEUED);  	if (dio->is_async && retval == 0 && dio->result && -	    (rw == READ || dio->result == count)) +	    (iov_iter_rw(iter) == READ || dio->result == count))  		retval = -EIOCBQUEUED;  	else  		dio_await_completion(dio); @@ -1301,11 +1297,11 @@ out:  	return retval;  } -ssize_t -__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, -	struct block_device *bdev, struct iov_iter *iter, loff_t offset, -	get_block_t get_block, dio_iodone_t end_io, -	dio_submit_t submit_io,	int flags) +ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, +			     struct block_device *bdev, struct iov_iter *iter, +			     loff_t offset, get_block_t get_block, +			     dio_iodone_t end_io, dio_submit_t submit_io, +			     int flags)  {  	/*  	 * The block device state is needed in the end to finally @@ -1319,8 +1315,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,  	prefetch(bdev->bd_queue);  	prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); -	return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset, -				     get_block, end_io, submit_io, flags); +	return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, +				     end_io, submit_io, flags);  }  EXPORT_SYMBOL(__blockdev_direct_IO);  |