diff options
Diffstat (limited to 'fs/netfs')
-rw-r--r-- | fs/netfs/buffered_read.c | 51 | ||||
-rw-r--r-- | fs/netfs/internal.h | 2 | ||||
-rw-r--r-- | fs/netfs/objects.c | 8 |
3 files changed, 30 insertions, 31 deletions
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 281a88a5b8dc..0ce535852151 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -155,7 +155,7 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq, void netfs_readahead(struct readahead_control *ractl) { struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host); + struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); int ret; _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); @@ -198,25 +198,24 @@ cleanup_free: EXPORT_SYMBOL(netfs_readahead); /** - * netfs_readpage - Helper to manage a readpage request + * netfs_read_folio - Helper to manage a read_folio request * @file: The file to read from - * @subpage: A subpage of the folio to read + * @folio: The folio to read * - * Fulfil a readpage request by drawing data from the cache if possible, or the - * netfs if not. Space beyond the EOF is zero-filled. Multiple I/O requests - * from different sources will get munged together. + * Fulfil a read_folio request by drawing data from the cache if + * possible, or the netfs if not. Space beyond the EOF is zero-filled. + * Multiple I/O requests from different sources will get munged together. * * The calling netfs must initialise a netfs context contiguous to the vfs * inode before calling this. * * This is usable whether or not caching is enabled. */ -int netfs_readpage(struct file *file, struct page *subpage) +int netfs_read_folio(struct file *file, struct folio *folio) { - struct folio *folio = page_folio(subpage); struct address_space *mapping = folio_file_mapping(folio); struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(mapping->host); + struct netfs_inode *ctx = netfs_inode(mapping->host); int ret; _enter("%lx", folio_index(folio)); @@ -245,7 +244,7 @@ alloc_error: folio_unlock(folio); return ret; } -EXPORT_SYMBOL(netfs_readpage); +EXPORT_SYMBOL(netfs_read_folio); /* * Prepare a folio for writing without reading first @@ -298,11 +297,11 @@ zero_out: /** * netfs_write_begin - Helper to prepare for writing + * @ctx: The netfs context * @file: The file to read from * @mapping: The mapping to read from * @pos: File position at which the write will begin * @len: The length of the write (may extend beyond the end of the folio chosen) - * @aop_flags: AOP_* flags * @_folio: Where to put the resultant folio * @_fsdata: Place for the netfs to store a cookie * @@ -320,31 +319,29 @@ zero_out: * conflicting writes once the folio is grabbed and locked. It is passed a * pointer to the fsdata cookie that gets returned to the VM to be passed to * write_end. It is permitted to sleep. It should return 0 if the request - * should go ahead; unlock the folio and return -EAGAIN to cause the folio to - * be regot; or return an error. + * should go ahead or it may return an error. It may also unlock and put the + * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0 + * will cause the folio to be re-got and the process to be retried. * * The calling netfs must initialise a netfs context contiguous to the vfs * inode before calling this. * * This is usable whether or not caching is enabled. */ -int netfs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned int len, unsigned int aop_flags, - struct folio **_folio, void **_fsdata) +int netfs_write_begin(struct netfs_inode *ctx, + struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, struct folio **_folio, + void **_fsdata) { struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); struct folio *folio; - unsigned int fgp_flags; + unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; pgoff_t index = pos >> PAGE_SHIFT; int ret; DEFINE_READAHEAD(ractl, file, NULL, mapping, index); retry: - fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; - if (aop_flags & AOP_FLAG_NOFS) - fgp_flags |= FGP_NOFS; folio = __filemap_get_folio(mapping, index, fgp_flags, mapping_gfp_mask(mapping)); if (!folio) @@ -352,13 +349,13 @@ retry: if (ctx->ops->check_write_begin) { /* Allow the netfs (eg. ceph) to flush conflicts. */ - ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata); + ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata); if (ret < 0) { trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin); - if (ret == -EAGAIN) - goto retry; goto error; } + if (!folio) + goto retry; } if (folio_test_uptodate(folio)) @@ -420,8 +417,10 @@ have_folio_no_wait: error_put: netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); error: - folio_unlock(folio); - folio_put(folio); + if (folio) { + folio_unlock(folio); + folio_put(folio); + } _leave(" = %d", ret); return ret; } diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index b7b0e3d18d9e..43fac1b14e40 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -91,7 +91,7 @@ static inline void netfs_stat_d(atomic_t *stat) /* * Miscellaneous functions. */ -static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx) +static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) { #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cookie = ctx->cache; diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index e86107b30ba4..e17cdf53f6a7 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -18,7 +18,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, { static atomic_t debug_ids; struct inode *inode = file ? file_inode(file) : mapping->host; - struct netfs_i_context *ctx = netfs_i_context(inode); + struct netfs_inode *ctx = netfs_inode(inode); struct netfs_io_request *rreq; int ret; @@ -75,10 +75,10 @@ static void netfs_free_request(struct work_struct *work) struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); - netfs_clear_subrequests(rreq, false); - if (rreq->netfs_priv) - rreq->netfs_ops->cleanup(rreq->mapping, rreq->netfs_priv); trace_netfs_rreq(rreq, netfs_rreq_trace_free); + netfs_clear_subrequests(rreq, false); + if (rreq->netfs_ops->free_request) + rreq->netfs_ops->free_request(rreq); if (rreq->cache_resources.ops) rreq->cache_resources.ops->end_operation(&rreq->cache_resources); kfree(rreq); |