diff options
Diffstat (limited to 'fs/netfs/buffered_read.c')
| -rw-r--r-- | fs/netfs/buffered_read.c | 229 | 
1 files changed, 213 insertions, 16 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 2cd3ccf4c439..a59e7b2edaac 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -16,6 +16,7 @@  void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)  {  	struct netfs_io_subrequest *subreq; +	struct netfs_folio *finfo;  	struct folio *folio;  	pgoff_t start_page = rreq->start / PAGE_SIZE;  	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1; @@ -63,6 +64,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)  				break;  			}  			if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { +				trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);  				folio_start_fscache(folio);  				folio_started = true;  			} @@ -86,6 +88,15 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)  		if (!pg_failed) {  			flush_dcache_folio(folio); +			finfo = netfs_folio_info(folio); +			if (finfo) { +				trace_netfs_folio(folio, netfs_folio_trace_filled_gaps); +				if (finfo->netfs_group) +					folio_change_private(folio, finfo->netfs_group); +				else +					folio_detach_private(folio); +				kfree(finfo); +			}  			folio_mark_uptodate(folio);  		} @@ -147,6 +158,15 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq,  	}  } +/* + * Begin an operation, and fetch the stored zero point value from the cookie if + * available. + */ +static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx) +{ +	return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); +} +  /**   * netfs_readahead - Helper to manage a read request   * @ractl: The description of the readahead request @@ -180,11 +200,9 @@ void netfs_readahead(struct readahead_control *ractl)  	if (IS_ERR(rreq))  		return; -	if (ctx->ops->begin_cache_operation) { -		ret = ctx->ops->begin_cache_operation(rreq); -		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) -			goto cleanup_free; -	} +	ret = netfs_begin_cache_read(rreq, ctx); +	if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) +		goto cleanup_free;  	netfs_stat(&netfs_n_rh_readahead);  	trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl), @@ -192,6 +210,10 @@ void netfs_readahead(struct readahead_control *ractl)  	netfs_rreq_expand(rreq, ractl); +	/* Set up the output buffer */ +	iov_iter_xarray(&rreq->iter, ITER_DEST, &ractl->mapping->i_pages, +			rreq->start, rreq->len); +  	/* Drop the refs on the folios here rather than in the cache or  	 * filesystem.  The locks will be dropped in netfs_rreq_unlock().  	 */ @@ -199,6 +221,7 @@ void netfs_readahead(struct readahead_control *ractl)  		;  	netfs_begin_read(rreq, false); +	netfs_put_request(rreq, false, netfs_rreq_trace_put_return);  	return;  cleanup_free: @@ -226,6 +249,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)  	struct address_space *mapping = folio_file_mapping(folio);  	struct netfs_io_request *rreq;  	struct netfs_inode *ctx = netfs_inode(mapping->host); +	struct folio *sink = NULL;  	int ret;  	_enter("%lx", folio_index(folio)); @@ -238,15 +262,64 @@ int netfs_read_folio(struct file *file, struct folio *folio)  		goto alloc_error;  	} -	if (ctx->ops->begin_cache_operation) { -		ret = ctx->ops->begin_cache_operation(rreq); -		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) -			goto discard; -	} +	ret = netfs_begin_cache_read(rreq, ctx); +	if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) +		goto discard;  	netfs_stat(&netfs_n_rh_readpage);  	trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); -	return netfs_begin_read(rreq, true); + +	/* Set up the output buffer */ +	if (folio_test_dirty(folio)) { +		/* Handle someone trying to read from an unflushed streaming +		 * write.  We fiddle the buffer so that a gap at the beginning +		 * and/or a gap at the end get copied to, but the middle is +		 * discarded. +		 */ +		struct netfs_folio *finfo = netfs_folio_info(folio); +		struct bio_vec *bvec; +		unsigned int from = finfo->dirty_offset; +		unsigned int to = from + finfo->dirty_len; +		unsigned int off = 0, i = 0; +		size_t flen = folio_size(folio); +		size_t nr_bvec = flen / PAGE_SIZE + 2; +		size_t part; + +		ret = -ENOMEM; +		bvec = kmalloc_array(nr_bvec, sizeof(*bvec), GFP_KERNEL); +		if (!bvec) +			goto discard; + +		sink = folio_alloc(GFP_KERNEL, 0); +		if (!sink) +			goto discard; + +		trace_netfs_folio(folio, netfs_folio_trace_read_gaps); + +		rreq->direct_bv = bvec; +		rreq->direct_bv_count = nr_bvec; +		if (from > 0) { +			bvec_set_folio(&bvec[i++], folio, from, 0); +			off = from; +		} +		while (off < to) { +			part = min_t(size_t, to - off, PAGE_SIZE); +			bvec_set_folio(&bvec[i++], sink, part, 0); +			off += part; +		} +		if (to < flen) +			bvec_set_folio(&bvec[i++], folio, flen - to, to); +		iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len); +	} else { +		iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages, +				rreq->start, rreq->len); +	} + +	ret = netfs_begin_read(rreq, true); +	if (sink) +		folio_put(sink); +	netfs_put_request(rreq, false, netfs_rreq_trace_put_return); +	return ret < 0 ? ret : 0;  discard:  	netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); @@ -390,11 +463,9 @@ retry:  	rreq->no_unlock_folio	= folio_index(folio);  	__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); -	if (ctx->ops->begin_cache_operation) { -		ret = ctx->ops->begin_cache_operation(rreq); -		if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) -			goto error_put; -	} +	ret = netfs_begin_cache_read(rreq, ctx); +	if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) +		goto error_put;  	netfs_stat(&netfs_n_rh_write_begin);  	trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); @@ -405,6 +476,10 @@ retry:  	ractl._nr_pages = folio_nr_pages(folio);  	netfs_rreq_expand(rreq, &ractl); +	/* Set up the output buffer */ +	iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages, +			rreq->start, rreq->len); +  	/* We hold the folio locks, so we can drop the references */  	folio_get(folio);  	while (readahead_folio(&ractl)) @@ -413,6 +488,7 @@ retry:  	ret = netfs_begin_read(rreq, true);  	if (ret < 0)  		goto error; +	netfs_put_request(rreq, false, netfs_rreq_trace_put_return);  have_folio:  	ret = folio_wait_fscache_killable(folio); @@ -434,3 +510,124 @@ error:  	return ret;  }  EXPORT_SYMBOL(netfs_write_begin); + +/* + * Preload the data into a page we're proposing to write into. + */ +int netfs_prefetch_for_write(struct file *file, struct folio *folio, +			     size_t offset, size_t len) +{ +	struct netfs_io_request *rreq; +	struct address_space *mapping = folio_file_mapping(folio); +	struct netfs_inode *ctx = netfs_inode(mapping->host); +	unsigned long long start = folio_pos(folio); +	size_t flen = folio_size(folio); +	int ret; + +	_enter("%zx @%llx", flen, start); + +	ret = -ENOMEM; + +	rreq = netfs_alloc_request(mapping, file, start, flen, +				   NETFS_READ_FOR_WRITE); +	if (IS_ERR(rreq)) { +		ret = PTR_ERR(rreq); +		goto error; +	} + +	rreq->no_unlock_folio = folio_index(folio); +	__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); +	ret = netfs_begin_cache_read(rreq, ctx); +	if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) +		goto error_put; + +	netfs_stat(&netfs_n_rh_write_begin); +	trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write); + +	/* Set up the output buffer */ +	iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages, +			rreq->start, rreq->len); + +	ret = netfs_begin_read(rreq, true); +	netfs_put_request(rreq, false, netfs_rreq_trace_put_return); +	return ret; + +error_put: +	netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); +error: +	_leave(" = %d", ret); +	return ret; +} + +/** + * netfs_buffered_read_iter - Filesystem buffered I/O read routine + * @iocb: kernel I/O control block + * @iter: destination for the data read + * + * This is the ->read_iter() routine for all filesystems that can use the page + * cache directly. + * + * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be + * returned when no data can be read without waiting for I/O requests to + * complete; it doesn't prevent readahead. + * + * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests + * shall be made for the read or for readahead.  When no data can be read, + * -EAGAIN shall be returned.  When readahead would be triggered, a partial, + * possibly empty read shall be returned. + * + * Return: + * * number of bytes copied, even for partial reads + * * negative error code (or 0 if IOCB_NOIO) if nothing was read + */ +ssize_t netfs_buffered_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ +	struct inode *inode = file_inode(iocb->ki_filp); +	struct netfs_inode *ictx = netfs_inode(inode); +	ssize_t ret; + +	if (WARN_ON_ONCE((iocb->ki_flags & IOCB_DIRECT) || +			 test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))) +		return -EINVAL; + +	ret = netfs_start_io_read(inode); +	if (ret == 0) { +		ret = filemap_read(iocb, iter, 0); +		netfs_end_io_read(inode); +	} +	return ret; +} +EXPORT_SYMBOL(netfs_buffered_read_iter); + +/** + * netfs_file_read_iter - Generic filesystem read routine + * @iocb: kernel I/O control block + * @iter: destination for the data read + * + * This is the ->read_iter() routine for all filesystems that can use the page + * cache directly. + * + * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall be + * returned when no data can be read without waiting for I/O requests to + * complete; it doesn't prevent readahead. + * + * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O requests + * shall be made for the read or for readahead.  When no data can be read, + * -EAGAIN shall be returned.  When readahead would be triggered, a partial, + * possibly empty read shall be returned. + * + * Return: + * * number of bytes copied, even for partial reads + * * negative error code (or 0 if IOCB_NOIO) if nothing was read + */ +ssize_t netfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ +	struct netfs_inode *ictx = netfs_inode(iocb->ki_filp->f_mapping->host); + +	if ((iocb->ki_flags & IOCB_DIRECT) || +	    test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags)) +		return netfs_unbuffered_read_iter(iocb, iter); + +	return netfs_buffered_read_iter(iocb, iter); +} +EXPORT_SYMBOL(netfs_file_read_iter);  |