summaryrefslogtreecommitdiffstats
path: root/fs/netfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/netfs')
-rw-r--r--fs/netfs/Kconfig23
-rw-r--r--fs/netfs/Makefile12
-rw-r--r--fs/netfs/buffered_read.c436
-rw-r--r--fs/netfs/internal.h145
-rw-r--r--fs/netfs/io.c660
-rw-r--r--fs/netfs/iterator.c103
-rw-r--r--fs/netfs/main.c20
-rw-r--r--fs/netfs/objects.c160
-rw-r--r--fs/netfs/stats.c58
9 files changed, 1617 insertions, 0 deletions
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
new file mode 100644
index 0000000000..b4db21022c
--- /dev/null
+++ b/fs/netfs/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NETFS_SUPPORT
+ tristate
+ help
+ This option enables support for network filesystems, including
+ helpers for high-level buffered I/O, abstracting out read
+ segmentation, local caching and transparent huge page support.
+
+config NETFS_STATS
+ bool "Gather statistical information on local caching"
+ depends on NETFS_SUPPORT && PROC_FS
+ help
+ This option causes statistical information to be gathered on local
+ caching and exported through file:
+
+ /proc/fs/fscache/stats
+
+ The gathering of statistics adds a certain amount of overhead to
+ execution as there are a quite a few stats gathered, and on a
+ multi-CPU system these may be on cachelines that keep bouncing
+ between CPUs. On the other hand, the stats are very useful for
+ debugging purposes. Saying 'Y' here is recommended.
diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile
new file mode 100644
index 0000000000..386d6fb927
--- /dev/null
+++ b/fs/netfs/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+netfs-y := \
+ buffered_read.o \
+ io.o \
+ iterator.o \
+ main.o \
+ objects.o
+
+netfs-$(CONFIG_NETFS_STATS) += stats.o
+
+obj-$(CONFIG_NETFS_SUPPORT) := netfs.o
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
new file mode 100644
index 0000000000..2cd3ccf4c4
--- /dev/null
+++ b/fs/netfs/buffered_read.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level buffered read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Unlock the folios in a read operation. We need to set PG_fscache on any
+ * folios we're going to write back before we unlock them.
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+ struct folio *folio;
+ pgoff_t start_page = rreq->start / PAGE_SIZE;
+ pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+ size_t account = 0;
+ bool subreq_failed = false;
+
+ XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+
+ if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
+ __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
+ }
+ }
+
+ /* Walk through the pagecache and the I/O request lists simultaneously.
+ * We may have a mixture of cached and uncached sections and we only
+ * really want to write out the uncached sections. This is slightly
+ * complicated by the possibility that we might have huge pages with a
+ * mixture inside.
+ */
+ subreq = list_first_entry(&rreq->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ subreq_failed = (subreq->error < 0);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, last_page) {
+ loff_t pg_end;
+ bool pg_failed = false;
+ bool folio_started;
+
+ if (xas_retry(&xas, folio))
+ continue;
+
+ pg_end = folio_pos(folio) + folio_size(folio) - 1;
+
+ folio_started = false;
+ for (;;) {
+ loff_t sreq_end;
+
+ if (!subreq) {
+ pg_failed = true;
+ break;
+ }
+ if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ folio_start_fscache(folio);
+ folio_started = true;
+ }
+ pg_failed |= subreq_failed;
+ sreq_end = subreq->start + subreq->len - 1;
+ if (pg_end < sreq_end)
+ break;
+
+ account += subreq->transferred;
+ if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+ subreq = list_next_entry(subreq, rreq_link);
+ subreq_failed = (subreq->error < 0);
+ } else {
+ subreq = NULL;
+ subreq_failed = false;
+ }
+
+ if (pg_end == sreq_end)
+ break;
+ }
+
+ if (!pg_failed) {
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+ }
+
+ if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+ if (folio_index(folio) == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
+ _debug("no unlock");
+ else
+ folio_unlock(folio);
+ }
+ }
+ rcu_read_unlock();
+
+ task_io_account_read(account);
+ if (rreq->netfs_ops->done)
+ rreq->netfs_ops->done(rreq);
+}
+
+static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
+ loff_t *_start, size_t *_len, loff_t i_size)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ if (cres->ops && cres->ops->expand_readahead)
+ cres->ops->expand_readahead(cres, _start, _len, i_size);
+}
+
+static void netfs_rreq_expand(struct netfs_io_request *rreq,
+ struct readahead_control *ractl)
+{
+ /* Give the cache a chance to change the request parameters. The
+ * resultant request must contain the original region.
+ */
+ netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
+
+ /* Give the netfs a chance to change the request parameters. The
+ * resultant request must contain the original region.
+ */
+ if (rreq->netfs_ops->expand_readahead)
+ rreq->netfs_ops->expand_readahead(rreq);
+
+ /* Expand the request if the cache wants it to start earlier. Note
+ * that the expansion may get further extended if the VM wishes to
+ * insert THPs and the preferred start and/or end wind up in the middle
+ * of THPs.
+ *
+ * If this is the case, however, the THP size should be an integer
+ * multiple of the cache granule size, so we get a whole number of
+ * granules to deal with.
+ */
+ if (rreq->start != readahead_pos(ractl) ||
+ rreq->len != readahead_length(ractl)) {
+ readahead_expand(ractl, rreq->start, rreq->len);
+ rreq->start = readahead_pos(ractl);
+ rreq->len = readahead_length(ractl);
+
+ trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+ netfs_read_trace_expanded);
+ }
+}
+
+/**
+ * netfs_readahead - Helper to manage a read request
+ * @ractl: The description of the readahead request
+ *
+ * Fulfil a readahead request by drawing data from the cache if possible, or
+ * the netfs if not. Space beyond the EOF is zero-filled. Multiple I/O
+ * requests from different sources will get munged together. If necessary, the
+ * readahead window can be expanded in either direction to a more convenient
+ * alighment for RPC efficiency or to make storage in the cache feasible.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+void netfs_readahead(struct readahead_control *ractl)
+{
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
+ int ret;
+
+ _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
+
+ if (readahead_count(ractl) == 0)
+ return;
+
+ rreq = netfs_alloc_request(ractl->mapping, ractl->file,
+ readahead_pos(ractl),
+ readahead_length(ractl),
+ NETFS_READAHEAD);
+ if (IS_ERR(rreq))
+ return;
+
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto cleanup_free;
+ }
+
+ netfs_stat(&netfs_n_rh_readahead);
+ trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
+ netfs_read_trace_readahead);
+
+ netfs_rreq_expand(rreq, ractl);
+
+ /* Drop the refs on the folios here rather than in the cache or
+ * filesystem. The locks will be dropped in netfs_rreq_unlock().
+ */
+ while (readahead_folio(ractl))
+ ;
+
+ netfs_begin_read(rreq, false);
+ return;
+
+cleanup_free:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+ return;
+}
+EXPORT_SYMBOL(netfs_readahead);
+
+/**
+ * netfs_read_folio - Helper to manage a read_folio request
+ * @file: The file to read from
+ * @folio: The folio to read
+ *
+ * Fulfil a read_folio request by drawing data from the cache if
+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_read_folio(struct file *file, struct folio *folio)
+{
+ struct address_space *mapping = folio_file_mapping(folio);
+ struct netfs_io_request *rreq;
+ struct netfs_inode *ctx = netfs_inode(mapping->host);
+ int ret;
+
+ _enter("%lx", folio_index(folio));
+
+ rreq = netfs_alloc_request(mapping, file,
+ folio_file_pos(folio), folio_size(folio),
+ NETFS_READPAGE);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto alloc_error;
+ }
+
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto discard;
+ }
+
+ netfs_stat(&netfs_n_rh_readpage);
+ trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
+ return netfs_begin_read(rreq, true);
+
+discard:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
+alloc_error:
+ folio_unlock(folio);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_read_folio);
+
+/*
+ * Prepare a folio for writing without reading first
+ * @folio: The folio being prepared
+ * @pos: starting position for the write
+ * @len: length of write
+ * @always_fill: T if the folio should always be completely filled/cleared
+ *
+ * In some cases, write_begin doesn't need to read at all:
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
+ *
+ * If any of these criteria are met, then zero out the unwritten parts
+ * of the folio and return true. Otherwise, return false.
+ */
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len,
+ bool always_fill)
+{
+ struct inode *inode = folio_inode(folio);
+ loff_t i_size = i_size_read(inode);
+ size_t offset = offset_in_folio(folio, pos);
+ size_t plen = folio_size(folio);
+
+ if (unlikely(always_fill)) {
+ if (pos - offset + len <= i_size)
+ return false; /* Page entirely before EOF */
+ zero_user_segment(&folio->page, 0, plen);
+ folio_mark_uptodate(folio);
+ return true;
+ }
+
+ /* Full folio write */
+ if (offset == 0 && len >= plen)
+ return true;
+
+ /* Page entirely beyond the end of the file */
+ if (pos - offset >= i_size)
+ goto zero_out;
+
+ /* Write that covers from the start of the folio to EOF or beyond */
+ if (offset == 0 && (pos + len) >= i_size)
+ goto zero_out;
+
+ return false;
+zero_out:
+ zero_user_segments(&folio->page, 0, offset, offset + len, plen);
+ return true;
+}
+
+/**
+ * netfs_write_begin - Helper to prepare for writing
+ * @ctx: The netfs context
+ * @file: The file to read from
+ * @mapping: The mapping to read from
+ * @pos: File position at which the write will begin
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @_folio: Where to put the resultant folio
+ * @_fsdata: Place for the netfs to store a cookie
+ *
+ * Pre-read data for a write-begin request by drawing data from the cache if
+ * possible, or the netfs if not. Space beyond the EOF is zero-filled.
+ * Multiple I/O requests from different sources will get munged together. If
+ * necessary, the readahead window can be expanded in either direction to a
+ * more convenient alighment for RPC efficiency or to make storage in the cache
+ * feasible.
+ *
+ * The calling netfs must provide a table of operations, only one of which,
+ * issue_op, is mandatory.
+ *
+ * The check_write_begin() operation can be provided to check for and flush
+ * conflicting writes once the folio is grabbed and locked. It is passed a
+ * pointer to the fsdata cookie that gets returned to the VM to be passed to
+ * write_end. It is permitted to sleep. It should return 0 if the request
+ * should go ahead or it may return an error. It may also unlock and put the
+ * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0
+ * will cause the folio to be re-got and the process to be retried.
+ *
+ * The calling netfs must initialise a netfs context contiguous to the vfs
+ * inode before calling this.
+ *
+ * This is usable whether or not caching is enabled.
+ */
+int netfs_write_begin(struct netfs_inode *ctx,
+ struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, struct folio **_folio,
+ void **_fsdata)
+{
+ struct netfs_io_request *rreq;
+ struct folio *folio;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ int ret;
+
+ DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
+
+retry:
+ folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ mapping_gfp_mask(mapping));
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
+
+ if (ctx->ops->check_write_begin) {
+ /* Allow the netfs (eg. ceph) to flush conflicts. */
+ ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);
+ if (ret < 0) {
+ trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
+ goto error;
+ }
+ if (!folio)
+ goto retry;
+ }
+
+ if (folio_test_uptodate(folio))
+ goto have_folio;
+
+ /* If the page is beyond the EOF, we want to clear it - unless it's
+ * within the cache granule containing the EOF, in which case we need
+ * to preload the granule.
+ */
+ if (!netfs_is_cache_enabled(ctx) &&
+ netfs_skip_folio_read(folio, pos, len, false)) {
+ netfs_stat(&netfs_n_rh_write_zskip);
+ goto have_folio_no_wait;
+ }
+
+ rreq = netfs_alloc_request(mapping, file,
+ folio_file_pos(folio), folio_size(folio),
+ NETFS_READ_FOR_WRITE);
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
+ goto error;
+ }
+ rreq->no_unlock_folio = folio_index(folio);
+ __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
+
+ if (ctx->ops->begin_cache_operation) {
+ ret = ctx->ops->begin_cache_operation(rreq);
+ if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
+ goto error_put;
+ }
+
+ netfs_stat(&netfs_n_rh_write_begin);
+ trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
+
+ /* Expand the request to meet caching requirements and download
+ * preferences.
+ */
+ ractl._nr_pages = folio_nr_pages(folio);
+ netfs_rreq_expand(rreq, &ractl);
+
+ /* We hold the folio locks, so we can drop the references */
+ folio_get(folio);
+ while (readahead_folio(&ractl))
+ ;
+
+ ret = netfs_begin_read(rreq, true);
+ if (ret < 0)
+ goto error;
+
+have_folio:
+ ret = folio_wait_fscache_killable(folio);
+ if (ret < 0)
+ goto error;
+have_folio_no_wait:
+ *_folio = folio;
+ _leave(" = 0");
+ return 0;
+
+error_put:
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
+error:
+ if (folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(netfs_write_begin);
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
new file mode 100644
index 0000000000..43fac1b14e
--- /dev/null
+++ b/fs/netfs/internal.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Internal definitions for network filesystem support
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/netfs.h>
+#include <linux/fscache.h>
+#include <trace/events/netfs.h>
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "netfs: " fmt
+
+/*
+ * buffered_read.c
+ */
+void netfs_rreq_unlock_folios(struct netfs_io_request *rreq);
+
+/*
+ * io.c
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
+
+/*
+ * main.c
+ */
+extern unsigned int netfs_debug;
+
+/*
+ * objects.c
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ loff_t start, size_t len,
+ enum netfs_io_origin origin);
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+ enum netfs_rreq_ref_trace what);
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
+
+static inline void netfs_see_request(struct netfs_io_request *rreq,
+ enum netfs_rreq_ref_trace what)
+{
+ trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
+}
+
+/*
+ * stats.c
+ */
+#ifdef CONFIG_NETFS_STATS
+extern atomic_t netfs_n_rh_readahead;
+extern atomic_t netfs_n_rh_readpage;
+extern atomic_t netfs_n_rh_rreq;
+extern atomic_t netfs_n_rh_sreq;
+extern atomic_t netfs_n_rh_download;
+extern atomic_t netfs_n_rh_download_done;
+extern atomic_t netfs_n_rh_download_failed;
+extern atomic_t netfs_n_rh_download_instead;
+extern atomic_t netfs_n_rh_read;
+extern atomic_t netfs_n_rh_read_done;
+extern atomic_t netfs_n_rh_read_failed;
+extern atomic_t netfs_n_rh_zero;
+extern atomic_t netfs_n_rh_short_read;
+extern atomic_t netfs_n_rh_write;
+extern atomic_t netfs_n_rh_write_begin;
+extern atomic_t netfs_n_rh_write_done;
+extern atomic_t netfs_n_rh_write_failed;
+extern atomic_t netfs_n_rh_write_zskip;
+
+
+static inline void netfs_stat(atomic_t *stat)
+{
+ atomic_inc(stat);
+}
+
+static inline void netfs_stat_d(atomic_t *stat)
+{
+ atomic_dec(stat);
+}
+
+#else
+#define netfs_stat(x) do {} while(0)
+#define netfs_stat_d(x) do {} while(0)
+#endif
+
+/*
+ * Miscellaneous functions.
+ */
+static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
+{
+#if IS_ENABLED(CONFIG_FSCACHE)
+ struct fscache_cookie *cookie = ctx->cache;
+
+ return fscache_cookie_valid(cookie) && cookie->cache_priv &&
+ fscache_cookie_enabled(cookie);
+#else
+ return false;
+#endif
+}
+
+/*****************************************************************************/
+/*
+ * debug tracing
+ */
+#define dbgprintk(FMT, ...) \
+ printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+
+#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
+
+#ifdef __KDEBUG
+#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
+#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
+#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
+
+#elif defined(CONFIG_NETFS_DEBUG)
+#define _enter(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kenter(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _leave(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kleave(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _debug(FMT, ...) \
+do { \
+ if (netfs_debug) \
+ kdebug(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#else
+#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
+#endif
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
new file mode 100644
index 0000000000..7f753380e0
--- /dev/null
+++ b/fs/netfs/io.c
@@ -0,0 +1,660 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Network filesystem high-level read support.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+#include "internal.h"
+
+/*
+ * Clear the unread part of an I/O request.
+ */
+static void netfs_clear_unread(struct netfs_io_subrequest *subreq)
+{
+ struct iov_iter iter;
+
+ iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
+ subreq->start + subreq->transferred,
+ subreq->len - subreq->transferred);
+ iov_iter_zero(iov_iter_count(&iter), &iter);
+}
+
+static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq = priv;
+
+ netfs_subreq_terminated(subreq, transferred_or_error, was_async);
+}
+
+/*
+ * Issue a read against the cache.
+ * - Eats the caller's ref on subreq.
+ */
+static void netfs_read_from_cache(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq,
+ enum netfs_read_from_hole read_hole)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+ struct iov_iter iter;
+
+ netfs_stat(&netfs_n_rh_read);
+ iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
+ subreq->start + subreq->transferred,
+ subreq->len - subreq->transferred);
+
+ cres->ops->read(cres, subreq->start, &iter, read_hole,
+ netfs_cache_read_terminated, subreq);
+}
+
+/*
+ * Fill a subrequest region with zeroes.
+ */
+static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ netfs_stat(&netfs_n_rh_zero);
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ netfs_subreq_terminated(subreq, 0, false);
+}
+
+/*
+ * Ask the netfs to issue a read request to the server for us.
+ *
+ * The netfs is expected to read from subreq->pos + subreq->transferred to
+ * subreq->pos + subreq->len - 1. It may not backtrack and write data into the
+ * buffer prior to the transferred point as it might clobber dirty data
+ * obtained from the cache.
+ *
+ * Alternatively, the netfs is allowed to indicate one of two things:
+ *
+ * - NETFS_SREQ_SHORT_READ: A short read - it will get called again to try and
+ * make progress.
+ *
+ * - NETFS_SREQ_CLEAR_TAIL: A short read - the rest of the buffer will be
+ * cleared.
+ */
+static void netfs_read_from_server(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ netfs_stat(&netfs_n_rh_download);
+ rreq->netfs_ops->issue_read(subreq);
+}
+
+/*
+ * Release those waiting.
+ */
+static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
+{
+ trace_netfs_rreq(rreq, netfs_rreq_trace_done);
+ netfs_clear_subrequests(rreq, was_async);
+ netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
+}
+
+/*
+ * Deal with the completion of writing the data to the cache. We have to clear
+ * the PG_fscache bits on the folios involved and release the caller's ref.
+ *
+ * May be called in softirq mode and we inherit a ref from the caller.
+ */
+static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq;
+ struct folio *folio;
+ pgoff_t unlocked = 0;
+ bool have_unlocked = false;
+
+ rcu_read_lock();
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
+
+ xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+ if (xas_retry(&xas, folio))
+ continue;
+
+ /* We might have multiple writes from the same huge
+ * folio, but we mustn't unlock a folio more than once.
+ */
+ if (have_unlocked && folio_index(folio) <= unlocked)
+ continue;
+ unlocked = folio_index(folio);
+ folio_end_fscache(folio);
+ have_unlocked = true;
+ }
+ }
+
+ rcu_read_unlock();
+ netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq = priv;
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ if (IS_ERR_VALUE(transferred_or_error)) {
+ netfs_stat(&netfs_n_rh_write_failed);
+ trace_netfs_failure(rreq, subreq, transferred_or_error,
+ netfs_fail_copy_to_cache);
+ } else {
+ netfs_stat(&netfs_n_rh_write_done);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
+
+ /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_copy_ops))
+ netfs_rreq_unmark_after_write(rreq, was_async);
+
+ netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+
+/*
+ * Perform any outstanding writes to the cache. We inherit a ref from the
+ * caller.
+ */
+static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+ struct netfs_io_subrequest *subreq, *next, *p;
+ struct iov_iter iter;
+ int ret;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
+
+ /* We don't want terminating writes trying to wake us up whilst we're
+ * still going through the list.
+ */
+ atomic_inc(&rreq->nr_copy_ops);
+
+ list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
+ if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ list_del_init(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, false,
+ netfs_sreq_trace_put_no_copy);
+ }
+ }
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ /* Amalgamate adjacent writes */
+ while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+ next = list_next_entry(subreq, rreq_link);
+ if (next->start != subreq->start + subreq->len)
+ break;
+ subreq->len += next->len;
+ list_del_init(&next->rreq_link);
+ netfs_put_subrequest(next, false,
+ netfs_sreq_trace_put_merged);
+ }
+
+ ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+ rreq->i_size, true);
+ if (ret < 0) {
+ trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+ continue;
+ }
+
+ iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
+ subreq->start, subreq->len);
+
+ atomic_inc(&rreq->nr_copy_ops);
+ netfs_stat(&netfs_n_rh_write);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write);
+ cres->ops->write(cres, subreq->start, &iter,
+ netfs_rreq_copy_terminated, subreq);
+ }
+
+ /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_copy_ops))
+ netfs_rreq_unmark_after_write(rreq, false);
+}
+
+static void netfs_rreq_write_to_cache_work(struct work_struct *work)
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, work);
+
+ netfs_rreq_do_write_to_cache(rreq);
+}
+
+static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
+{
+ rreq->work.func = netfs_rreq_write_to_cache_work;
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
+}
+
+/*
+ * Handle a short read.
+ */
+static void netfs_rreq_short_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ __clear_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+ __set_bit(NETFS_SREQ_SEEK_DATA_READ, &subreq->flags);
+
+ netfs_stat(&netfs_n_rh_short_read);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
+
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
+ atomic_inc(&rreq->nr_outstanding);
+ if (subreq->source == NETFS_READ_FROM_CACHE)
+ netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
+ else
+ netfs_read_from_server(rreq, subreq);
+}
+
+/*
+ * Resubmit any short or failed operations. Returns true if we got the rreq
+ * ref back.
+ */
+static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+
+ WARN_ON(in_interrupt());
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
+
+ /* We don't want terminating submissions trying to wake us up whilst
+ * we're still going through the list.
+ */
+ atomic_inc(&rreq->nr_outstanding);
+
+ __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ if (subreq->error) {
+ if (subreq->source != NETFS_READ_FROM_CACHE)
+ break;
+ subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
+ subreq->error = 0;
+ netfs_stat(&netfs_n_rh_download_instead);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
+ atomic_inc(&rreq->nr_outstanding);
+ netfs_read_from_server(rreq, subreq);
+ } else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
+ netfs_rreq_short_read(rreq, subreq);
+ }
+ }
+
+ /* If we decrement nr_outstanding to 0, the usage ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ return true;
+
+ wake_up_var(&rreq->nr_outstanding);
+ return false;
+}
+
+/*
+ * Check to see if the data read is still valid.
+ */
+static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+
+ if (!rreq->netfs_ops->is_still_valid ||
+ rreq->netfs_ops->is_still_valid(rreq))
+ return;
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ if (subreq->source == NETFS_READ_FROM_CACHE) {
+ subreq->error = -ESTALE;
+ __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+ }
+ }
+}
+
+/*
+ * Assess the state of a read request and decide what to do next.
+ *
+ * Note that we could be in an ordinary kernel thread, on a workqueue or in
+ * softirq context at this point. We inherit a ref from the caller.
+ */
+static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
+{
+ trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
+
+again:
+ netfs_rreq_is_still_valid(rreq);
+
+ if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
+ test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
+ if (netfs_rreq_perform_resubmissions(rreq))
+ goto again;
+ return;
+ }
+
+ netfs_rreq_unlock_folios(rreq);
+
+ clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+
+ if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
+ return netfs_rreq_write_to_cache(rreq);
+
+ netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_work(struct work_struct *work)
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, work);
+ netfs_rreq_assess(rreq, false);
+}
+
+/*
+ * Handle the completion of all outstanding I/O operations on a read request.
+ * We inherit a ref from the caller.
+ */
+static void netfs_rreq_terminated(struct netfs_io_request *rreq,
+ bool was_async)
+{
+ if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
+ was_async) {
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
+ } else {
+ netfs_rreq_assess(rreq, was_async);
+ }
+}
+
+/**
+ * netfs_subreq_terminated - Note the termination of an I/O operation.
+ * @subreq: The I/O request that has terminated.
+ * @transferred_or_error: The amount of data transferred or an error code.
+ * @was_async: The termination was asynchronous
+ *
+ * This tells the read helper that a contributory I/O operation has terminated,
+ * one way or another, and that it should integrate the results.
+ *
+ * The caller indicates in @transferred_or_error the outcome of the operation,
+ * supplying a positive value to indicate the number of bytes transferred, 0 to
+ * indicate a failure to transfer anything that should be retried or a negative
+ * error code. The helper will look after reissuing I/O operations as
+ * appropriate and writing downloaded data to the cache.
+ *
+ * If @was_async is true, the caller might be running in softirq or interrupt
+ * context and we can't sleep.
+ */
+void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
+ ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ int u;
+
+ _enter("[%u]{%llx,%lx},%zd",
+ subreq->debug_index, subreq->start, subreq->flags,
+ transferred_or_error);
+
+ switch (subreq->source) {
+ case NETFS_READ_FROM_CACHE:
+ netfs_stat(&netfs_n_rh_read_done);
+ break;
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_stat(&netfs_n_rh_download_done);
+ break;
+ default:
+ break;
+ }
+
+ if (IS_ERR_VALUE(transferred_or_error)) {
+ subreq->error = transferred_or_error;
+ trace_netfs_failure(rreq, subreq, transferred_or_error,
+ netfs_fail_read);
+ goto failed;
+ }
+
+ if (WARN(transferred_or_error > subreq->len - subreq->transferred,
+ "Subreq overread: R%x[%x] %zd > %zu - %zu",
+ rreq->debug_id, subreq->debug_index,
+ transferred_or_error, subreq->len, subreq->transferred))
+ transferred_or_error = subreq->len - subreq->transferred;
+
+ subreq->error = 0;
+ subreq->transferred += transferred_or_error;
+ if (subreq->transferred < subreq->len)
+ goto incomplete;
+
+complete:
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags))
+ set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
+
+out:
+ trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
+
+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+ u = atomic_dec_return(&rreq->nr_outstanding);
+ if (u == 0)
+ netfs_rreq_terminated(rreq, was_async);
+ else if (u == 1)
+ wake_up_var(&rreq->nr_outstanding);
+
+ netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+ return;
+
+incomplete:
+ if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) {
+ netfs_clear_unread(subreq);
+ subreq->transferred = subreq->len;
+ goto complete;
+ }
+
+ if (transferred_or_error == 0) {
+ if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
+ subreq->error = -ENODATA;
+ goto failed;
+ }
+ } else {
+ __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
+ }
+
+ __set_bit(NETFS_SREQ_SHORT_IO, &subreq->flags);
+ set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+ goto out;
+
+failed:
+ if (subreq->source == NETFS_READ_FROM_CACHE) {
+ netfs_stat(&netfs_n_rh_read_failed);
+ set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
+ } else {
+ netfs_stat(&netfs_n_rh_download_failed);
+ set_bit(NETFS_RREQ_FAILED, &rreq->flags);
+ rreq->error = subreq->error;
+ }
+ goto out;
+}
+EXPORT_SYMBOL(netfs_subreq_terminated);
+
+static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_subrequest *subreq,
+ loff_t i_size)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+
+ if (cres->ops)
+ return cres->ops->prepare_read(subreq, i_size);
+ if (subreq->start >= rreq->i_size)
+ return NETFS_FILL_WITH_ZEROES;
+ return NETFS_DOWNLOAD_FROM_SERVER;
+}
+
+/*
+ * Work out what sort of subrequest the next one will be.
+ */
+static enum netfs_io_source
+netfs_rreq_prepare_read(struct netfs_io_request *rreq,
+ struct netfs_io_subrequest *subreq)
+{
+ enum netfs_io_source source;
+
+ _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
+
+ source = netfs_cache_prepare_read(subreq, rreq->i_size);
+ if (source == NETFS_INVALID_READ)
+ goto out;
+
+ if (source == NETFS_DOWNLOAD_FROM_SERVER) {
+ /* Call out to the netfs to let it shrink the request to fit
+ * its own I/O sizes and boundaries. If it shinks it here, it
+ * will be called again to make simultaneous calls; if it wants
+ * to make serial calls, it can indicate a short read and then
+ * we will call it again.
+ */
+ if (subreq->len > rreq->i_size - subreq->start)
+ subreq->len = rreq->i_size - subreq->start;
+
+ if (rreq->netfs_ops->clamp_length &&
+ !rreq->netfs_ops->clamp_length(subreq)) {
+ source = NETFS_INVALID_READ;
+ goto out;
+ }
+ }
+
+ if (WARN_ON(subreq->len == 0))
+ source = NETFS_INVALID_READ;
+
+out:
+ subreq->source = source;
+ trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
+ return source;
+}
+
+/*
+ * Slice off a piece of a read request and submit an I/O request for it.
+ */
+static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
+ unsigned int *_debug_index)
+{
+ struct netfs_io_subrequest *subreq;
+ enum netfs_io_source source;
+
+ subreq = netfs_alloc_subrequest(rreq);
+ if (!subreq)
+ return false;
+
+ subreq->debug_index = (*_debug_index)++;
+ subreq->start = rreq->start + rreq->submitted;
+ subreq->len = rreq->len - rreq->submitted;
+
+ _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
+ list_add_tail(&subreq->rreq_link, &rreq->subrequests);
+
+ /* Call out to the cache to find out what it can do with the remaining
+ * subset. It tells us in subreq->flags what it decided should be done
+ * and adjusts subreq->len down if the subset crosses a cache boundary.
+ *
+ * Then when we hand the subset, it can choose to take a subset of that
+ * (the starts must coincide), in which case, we go around the loop
+ * again and ask it to download the next piece.
+ */
+ source = netfs_rreq_prepare_read(rreq, subreq);
+ if (source == NETFS_INVALID_READ)
+ goto subreq_failed;
+
+ atomic_inc(&rreq->nr_outstanding);
+
+ rreq->submitted += subreq->len;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
+ switch (source) {
+ case NETFS_FILL_WITH_ZEROES:
+ netfs_fill_with_zeroes(rreq, subreq);
+ break;
+ case NETFS_DOWNLOAD_FROM_SERVER:
+ netfs_read_from_server(rreq, subreq);
+ break;
+ case NETFS_READ_FROM_CACHE:
+ netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
+ break;
+ default:
+ BUG();
+ }
+
+ return true;
+
+subreq_failed:
+ rreq->error = subreq->error;
+ netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
+ return false;
+}
+
+/*
+ * Begin the process of reading in a chunk of data, where that data may be
+ * stitched together from multiple sources, including multiple servers and the
+ * local cache.
+ */
+int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
+{
+ unsigned int debug_index = 0;
+ int ret;
+
+ _enter("R=%x %llx-%llx",
+ rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
+
+ if (rreq->len == 0) {
+ pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
+ return -EIO;
+ }
+
+ INIT_WORK(&rreq->work, netfs_rreq_work);
+
+ if (sync)
+ netfs_get_request(rreq, netfs_rreq_trace_get_hold);
+
+ /* Chop the read into slices according to what the cache and the netfs
+ * want and submit each one.
+ */
+ atomic_set(&rreq->nr_outstanding, 1);
+ do {
+ if (!netfs_rreq_submit_slice(rreq, &debug_index))
+ break;
+
+ } while (rreq->submitted < rreq->len);
+
+ if (sync) {
+ /* Keep nr_outstanding incremented so that the ref always belongs to
+ * us, and the service code isn't punted off to a random thread pool to
+ * process.
+ */
+ for (;;) {
+ wait_var_event(&rreq->nr_outstanding,
+ atomic_read(&rreq->nr_outstanding) == 1);
+ netfs_rreq_assess(rreq, false);
+ if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
+ break;
+ cond_resched();
+ }
+
+ ret = rreq->error;
+ if (ret == 0 && rreq->submitted < rreq->len) {
+ trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
+ ret = -EIO;
+ }
+ netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
+ } else {
+ /* If we decrement nr_outstanding to 0, the ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_outstanding))
+ netfs_rreq_assess(rreq, false);
+ ret = 0;
+ }
+ return ret;
+}
diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
new file mode 100644
index 0000000000..2ff07ba655
--- /dev/null
+++ b/fs/netfs/iterator.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Iterator helpers.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uio.h>
+#include <linux/scatterlist.h>
+#include <linux/netfs.h>
+#include "internal.h"
+
+/**
+ * netfs_extract_user_iter - Extract the pages from a user iterator into a bvec
+ * @orig: The original iterator
+ * @orig_len: The amount of iterator to copy
+ * @new: The iterator to be set up
+ * @extraction_flags: Flags to qualify the request
+ *
+ * Extract the page fragments from the given amount of the source iterator and
+ * build up a second iterator that refers to all of those bits. This allows
+ * the original iterator to disposed of.
+ *
+ * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA be
+ * allowed on the pages extracted.
+ *
+ * On success, the number of elements in the bvec is returned, the original
+ * iterator will have been advanced by the amount extracted.
+ *
+ * The iov_iter_extract_mode() function should be used to query how cleanup
+ * should be performed.
+ */
+ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
+ struct iov_iter *new,
+ iov_iter_extraction_t extraction_flags)
+{
+ struct bio_vec *bv = NULL;
+ struct page **pages;
+ unsigned int cur_npages;
+ unsigned int max_pages;
+ unsigned int npages = 0;
+ unsigned int i;
+ ssize_t ret;
+ size_t count = orig_len, offset, len;
+ size_t bv_size, pg_size;
+
+ if (WARN_ON_ONCE(!iter_is_ubuf(orig) && !iter_is_iovec(orig)))
+ return -EIO;
+
+ max_pages = iov_iter_npages(orig, INT_MAX);
+ bv_size = array_size(max_pages, sizeof(*bv));
+ bv = kvmalloc(bv_size, GFP_KERNEL);
+ if (!bv)
+ return -ENOMEM;
+
+ /* Put the page list at the end of the bvec list storage. bvec
+ * elements are larger than page pointers, so as long as we work
+ * 0->last, we should be fine.
+ */
+ pg_size = array_size(max_pages, sizeof(*pages));
+ pages = (void *)bv + bv_size - pg_size;
+
+ while (count && npages < max_pages) {
+ ret = iov_iter_extract_pages(orig, &pages, count,
+ max_pages - npages, extraction_flags,
+ &offset);
+ if (ret < 0) {
+ pr_err("Couldn't get user pages (rc=%zd)\n", ret);
+ break;
+ }
+
+ if (ret > count) {
+ pr_err("get_pages rc=%zd more than %zu\n", ret, count);
+ break;
+ }
+
+ count -= ret;
+ ret += offset;
+ cur_npages = DIV_ROUND_UP(ret, PAGE_SIZE);
+
+ if (npages + cur_npages > max_pages) {
+ pr_err("Out of bvec array capacity (%u vs %u)\n",
+ npages + cur_npages, max_pages);
+ break;
+ }
+
+ for (i = 0; i < cur_npages; i++) {
+ len = ret > PAGE_SIZE ? PAGE_SIZE : ret;
+ bvec_set_page(bv + npages + i, *pages++, len - offset, offset);
+ ret -= len;
+ offset = 0;
+ }
+
+ npages += cur_npages;
+ }
+
+ iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count);
+ return npages;
+}
+EXPORT_SYMBOL_GPL(netfs_extract_user_iter);
diff --git a/fs/netfs/main.c b/fs/netfs/main.c
new file mode 100644
index 0000000000..0685687029
--- /dev/null
+++ b/fs/netfs/main.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Miscellaneous bits for the netfs support library.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/module.h>
+#include <linux/export.h>
+#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/netfs.h>
+
+MODULE_DESCRIPTION("Network fs support");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+unsigned netfs_debug;
+module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
new file mode 100644
index 0000000000..e17cdf53f6
--- /dev/null
+++ b/fs/netfs/objects.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Object lifetime handling and tracing.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * Allocate an I/O request and initialise it.
+ */
+struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
+ struct file *file,
+ loff_t start, size_t len,
+ enum netfs_io_origin origin)
+{
+ static atomic_t debug_ids;
+ struct inode *inode = file ? file_inode(file) : mapping->host;
+ struct netfs_inode *ctx = netfs_inode(inode);
+ struct netfs_io_request *rreq;
+ int ret;
+
+ rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
+ if (!rreq)
+ return ERR_PTR(-ENOMEM);
+
+ rreq->start = start;
+ rreq->len = len;
+ rreq->origin = origin;
+ rreq->netfs_ops = ctx->ops;
+ rreq->mapping = mapping;
+ rreq->inode = inode;
+ rreq->i_size = i_size_read(inode);
+ rreq->debug_id = atomic_inc_return(&debug_ids);
+ INIT_LIST_HEAD(&rreq->subrequests);
+ refcount_set(&rreq->ref, 1);
+ __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
+ if (rreq->netfs_ops->init_request) {
+ ret = rreq->netfs_ops->init_request(rreq, file);
+ if (ret < 0) {
+ kfree(rreq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ netfs_stat(&netfs_n_rh_rreq);
+ return rreq;
+}
+
+void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
+{
+ int r;
+
+ __refcount_inc(&rreq->ref, &r);
+ trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
+}
+
+void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
+{
+ struct netfs_io_subrequest *subreq;
+
+ while (!list_empty(&rreq->subrequests)) {
+ subreq = list_first_entry(&rreq->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ list_del(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, was_async,
+ netfs_sreq_trace_put_clear);
+ }
+}
+
+static void netfs_free_request(struct work_struct *work)
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, work);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_free);
+ netfs_clear_subrequests(rreq, false);
+ if (rreq->netfs_ops->free_request)
+ rreq->netfs_ops->free_request(rreq);
+ if (rreq->cache_resources.ops)
+ rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
+ kfree(rreq);
+ netfs_stat_d(&netfs_n_rh_rreq);
+}
+
+void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
+ enum netfs_rreq_ref_trace what)
+{
+ unsigned int debug_id = rreq->debug_id;
+ bool dead;
+ int r;
+
+ dead = __refcount_dec_and_test(&rreq->ref, &r);
+ trace_netfs_rreq_ref(debug_id, r - 1, what);
+ if (dead) {
+ if (was_async) {
+ rreq->work.func = netfs_free_request;
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
+ } else {
+ netfs_free_request(&rreq->work);
+ }
+ }
+}
+
+/*
+ * Allocate and partially initialise an I/O request structure.
+ */
+struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
+{
+ struct netfs_io_subrequest *subreq;
+
+ subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
+ if (subreq) {
+ INIT_LIST_HEAD(&subreq->rreq_link);
+ refcount_set(&subreq->ref, 2);
+ subreq->rreq = rreq;
+ netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
+ netfs_stat(&netfs_n_rh_sreq);
+ }
+
+ return subreq;
+}
+
+void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
+ enum netfs_sreq_ref_trace what)
+{
+ int r;
+
+ __refcount_inc(&subreq->ref, &r);
+ trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
+ what);
+}
+
+static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
+ bool was_async)
+{
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_free);
+ kfree(subreq);
+ netfs_stat_d(&netfs_n_rh_sreq);
+ netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
+}
+
+void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
+ enum netfs_sreq_ref_trace what)
+{
+ unsigned int debug_index = subreq->debug_index;
+ unsigned int debug_id = subreq->rreq->debug_id;
+ bool dead;
+ int r;
+
+ dead = __refcount_dec_and_test(&subreq->ref, &r);
+ trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
+ if (dead)
+ netfs_free_subrequest(subreq, was_async);
+}
diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c
new file mode 100644
index 0000000000..5510a7a14a
--- /dev/null
+++ b/fs/netfs/stats.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Netfs support statistics
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/seq_file.h>
+#include "internal.h"
+
+atomic_t netfs_n_rh_readahead;
+atomic_t netfs_n_rh_readpage;
+atomic_t netfs_n_rh_rreq;
+atomic_t netfs_n_rh_sreq;
+atomic_t netfs_n_rh_download;
+atomic_t netfs_n_rh_download_done;
+atomic_t netfs_n_rh_download_failed;
+atomic_t netfs_n_rh_download_instead;
+atomic_t netfs_n_rh_read;
+atomic_t netfs_n_rh_read_done;
+atomic_t netfs_n_rh_read_failed;
+atomic_t netfs_n_rh_zero;
+atomic_t netfs_n_rh_short_read;
+atomic_t netfs_n_rh_write;
+atomic_t netfs_n_rh_write_begin;
+atomic_t netfs_n_rh_write_done;
+atomic_t netfs_n_rh_write_failed;
+atomic_t netfs_n_rh_write_zskip;
+
+void netfs_stats_show(struct seq_file *m)
+{
+ seq_printf(m, "RdHelp : RA=%u RP=%u WB=%u WBZ=%u rr=%u sr=%u\n",
+ atomic_read(&netfs_n_rh_readahead),
+ atomic_read(&netfs_n_rh_readpage),
+ atomic_read(&netfs_n_rh_write_begin),
+ atomic_read(&netfs_n_rh_write_zskip),
+ atomic_read(&netfs_n_rh_rreq),
+ atomic_read(&netfs_n_rh_sreq));
+ seq_printf(m, "RdHelp : ZR=%u sh=%u sk=%u\n",
+ atomic_read(&netfs_n_rh_zero),
+ atomic_read(&netfs_n_rh_short_read),
+ atomic_read(&netfs_n_rh_write_zskip));
+ seq_printf(m, "RdHelp : DL=%u ds=%u df=%u di=%u\n",
+ atomic_read(&netfs_n_rh_download),
+ atomic_read(&netfs_n_rh_download_done),
+ atomic_read(&netfs_n_rh_download_failed),
+ atomic_read(&netfs_n_rh_download_instead));
+ seq_printf(m, "RdHelp : RD=%u rs=%u rf=%u\n",
+ atomic_read(&netfs_n_rh_read),
+ atomic_read(&netfs_n_rh_read_done),
+ atomic_read(&netfs_n_rh_read_failed));
+ seq_printf(m, "RdHelp : WR=%u ws=%u wf=%u\n",
+ atomic_read(&netfs_n_rh_write),
+ atomic_read(&netfs_n_rh_write_done),
+ atomic_read(&netfs_n_rh_write_failed));
+}
+EXPORT_SYMBOL(netfs_stats_show);