diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /fs/nfs/flexfilelayout | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fs/nfs/flexfilelayout')
-rw-r--r-- | fs/nfs/flexfilelayout/Makefile | 6 | ||||
-rw-r--r-- | fs/nfs/flexfilelayout/flexfilelayout.c | 2623 | ||||
-rw-r--r-- | fs/nfs/flexfilelayout/flexfilelayout.h | 226 | ||||
-rw-r--r-- | fs/nfs/flexfilelayout/flexfilelayoutdev.c | 621 |
4 files changed, 3476 insertions, 0 deletions
diff --git a/fs/nfs/flexfilelayout/Makefile b/fs/nfs/flexfilelayout/Makefile new file mode 100644 index 0000000000..49f03422b6 --- /dev/null +++ b/fs/nfs/flexfilelayout/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the pNFS Flexfile Layout Driver kernel module +# +obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += nfs_layout_flexfiles.o +nfs_layout_flexfiles-y := flexfilelayout.o flexfilelayoutdev.o diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c new file mode 100644 index 0000000000..ef817a0475 --- /dev/null +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -0,0 +1,2623 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Module for pnfs flexfile layout driver. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tao Peng <bergwolf@primarydata.com> + */ + +#include <linux/nfs_fs.h> +#include <linux/nfs_mount.h> +#include <linux/nfs_page.h> +#include <linux/module.h> +#include <linux/sched/mm.h> + +#include <linux/sunrpc/metrics.h> + +#include "flexfilelayout.h" +#include "../nfs4session.h" +#include "../nfs4idmap.h" +#include "../internal.h" +#include "../delegation.h" +#include "../nfs4trace.h" +#include "../iostat.h" +#include "../nfs.h" +#include "../nfs42.h" + +#define NFSDBG_FACILITY NFSDBG_PNFS_LD + +#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ) +#define FF_LAYOUTRETURN_MAXERR 20 + +enum nfs4_ff_op_type { + NFS4_FF_OP_LAYOUTSTATS, + NFS4_FF_OP_LAYOUTRETURN, +}; + +static unsigned short io_maxretrans; + +static const struct pnfs_commit_ops ff_layout_commit_ops; +static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, + struct nfs_pgio_header *hdr); +static int +ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, + struct nfs42_layoutstat_devinfo *devinfo, + int dev_limit, enum nfs4_ff_op_type type); +static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, + const struct nfs42_layoutstat_devinfo *devinfo, + struct nfs4_ff_layout_mirror *mirror); + +static struct pnfs_layout_hdr * +ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) +{ + struct nfs4_flexfile_layout *ffl; + + ffl = kzalloc(sizeof(*ffl), gfp_flags); + if (ffl) { + pnfs_init_ds_commit_info(&ffl->commit_info); + INIT_LIST_HEAD(&ffl->error_list); + INIT_LIST_HEAD(&ffl->mirrors); + ffl->last_report_time = ktime_get(); + ffl->commit_info.ops = &ff_layout_commit_ops; + return &ffl->generic_hdr; + } else + return NULL; +} + +static void +ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo) +{ + struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo); + struct nfs4_ff_layout_ds_err *err, *n; + + list_for_each_entry_safe(err, n, &ffl->error_list, list) { + list_del(&err->list); + kfree(err); + } + kfree_rcu(ffl, generic_hdr.plh_rcu); +} + +static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE); + if (unlikely(p == NULL)) + return -ENOBUFS; + stateid->type = NFS4_PNFS_DS_STATEID_TYPE; + memcpy(stateid->data, p, NFS4_STATEID_SIZE); + dprintk("%s: stateid id= [%x%x%x%x]\n", __func__, + p[0], p[1], p[2], p[3]); + return 0; +} + +static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); + if (unlikely(!p)) + return -ENOBUFS; + memcpy(devid, p, NFS4_DEVICEID4_SIZE); + nfs4_print_deviceid(devid); + return 0; +} + +static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -ENOBUFS; + fh->size = be32_to_cpup(p++); + if (fh->size > NFS_MAXFHSIZE) { + printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", + fh->size); + return -EOVERFLOW; + } + /* fh.data */ + p = xdr_inline_decode(xdr, fh->size); + if (unlikely(!p)) + return -ENOBUFS; + memcpy(&fh->data, p, fh->size); + dprintk("%s: fh len %d\n", __func__, fh->size); + + return 0; +} + +/* + * Currently only stringified uids and gids are accepted. + * I.e., kerberos is not supported to the DSes, so no pricipals. + * + * That means that one common function will suffice, but when + * principals are added, this should be split to accomodate + * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid(). + */ +static int +decode_name(struct xdr_stream *xdr, u32 *id) +{ + __be32 *p; + int len; + + /* opaque_length(4)*/ + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -ENOBUFS; + len = be32_to_cpup(p++); + if (len < 0) + return -EINVAL; + + dprintk("%s: len %u\n", __func__, len); + + /* opaque body */ + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -ENOBUFS; + + if (!nfs_map_string_to_numeric((char *)p, len, id)) + return -EINVAL; + + return 0; +} + +static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, + const struct nfs4_ff_layout_mirror *m2) +{ + int i, j; + + if (m1->fh_versions_cnt != m2->fh_versions_cnt) + return false; + for (i = 0; i < m1->fh_versions_cnt; i++) { + bool found_fh = false; + for (j = 0; j < m2->fh_versions_cnt; j++) { + if (nfs_compare_fh(&m1->fh_versions[i], + &m2->fh_versions[j]) == 0) { + found_fh = true; + break; + } + } + if (!found_fh) + return false; + } + return true; +} + +static struct nfs4_ff_layout_mirror * +ff_layout_add_mirror(struct pnfs_layout_hdr *lo, + struct nfs4_ff_layout_mirror *mirror) +{ + struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); + struct nfs4_ff_layout_mirror *pos; + struct inode *inode = lo->plh_inode; + + spin_lock(&inode->i_lock); + list_for_each_entry(pos, &ff_layout->mirrors, mirrors) { + if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0) + continue; + if (!ff_mirror_match_fh(mirror, pos)) + continue; + if (refcount_inc_not_zero(&pos->ref)) { + spin_unlock(&inode->i_lock); + return pos; + } + } + list_add(&mirror->mirrors, &ff_layout->mirrors); + mirror->layout = lo; + spin_unlock(&inode->i_lock); + return mirror; +} + +static void +ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror) +{ + struct inode *inode; + if (mirror->layout == NULL) + return; + inode = mirror->layout->plh_inode; + spin_lock(&inode->i_lock); + list_del(&mirror->mirrors); + spin_unlock(&inode->i_lock); + mirror->layout = NULL; +} + +static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) +{ + struct nfs4_ff_layout_mirror *mirror; + + mirror = kzalloc(sizeof(*mirror), gfp_flags); + if (mirror != NULL) { + spin_lock_init(&mirror->lock); + refcount_set(&mirror->ref, 1); + INIT_LIST_HEAD(&mirror->mirrors); + } + return mirror; +} + +static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) +{ + const struct cred *cred; + + ff_layout_remove_mirror(mirror); + kfree(mirror->fh_versions); + cred = rcu_access_pointer(mirror->ro_cred); + put_cred(cred); + cred = rcu_access_pointer(mirror->rw_cred); + put_cred(cred); + nfs4_ff_layout_put_deviceid(mirror->mirror_ds); + kfree(mirror); +} + +static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror) +{ + if (mirror != NULL && refcount_dec_and_test(&mirror->ref)) + ff_layout_free_mirror(mirror); +} + +static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) +{ + u32 i; + + for (i = 0; i < fls->mirror_array_cnt; i++) + ff_layout_put_mirror(fls->mirror_array[i]); +} + +static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) +{ + if (fls) { + ff_layout_free_mirror_array(fls); + kfree(fls); + } +} + +static bool +ff_lseg_match_mirrors(struct pnfs_layout_segment *l1, + struct pnfs_layout_segment *l2) +{ + const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1); + const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1); + u32 i; + + if (fl1->mirror_array_cnt != fl2->mirror_array_cnt) + return false; + for (i = 0; i < fl1->mirror_array_cnt; i++) { + if (fl1->mirror_array[i] != fl2->mirror_array[i]) + return false; + } + return true; +} + +static bool +ff_lseg_range_is_after(const struct pnfs_layout_range *l1, + const struct pnfs_layout_range *l2) +{ + u64 end1, end2; + + if (l1->iomode != l2->iomode) + return l1->iomode != IOMODE_READ; + end1 = pnfs_calc_offset_end(l1->offset, l1->length); + end2 = pnfs_calc_offset_end(l2->offset, l2->length); + if (end1 < l2->offset) + return false; + if (end2 < l1->offset) + return true; + return l2->offset <= l1->offset; +} + +static bool +ff_lseg_merge(struct pnfs_layout_segment *new, + struct pnfs_layout_segment *old) +{ + u64 new_end, old_end; + + if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags)) + return false; + if (new->pls_range.iomode != old->pls_range.iomode) + return false; + old_end = pnfs_calc_offset_end(old->pls_range.offset, + old->pls_range.length); + if (old_end < new->pls_range.offset) + return false; + new_end = pnfs_calc_offset_end(new->pls_range.offset, + new->pls_range.length); + if (new_end < old->pls_range.offset) + return false; + if (!ff_lseg_match_mirrors(new, old)) + return false; + + /* Mergeable: copy info from 'old' to 'new' */ + if (new_end < old_end) + new_end = old_end; + if (new->pls_range.offset < old->pls_range.offset) + new->pls_range.offset = old->pls_range.offset; + new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset, + new_end); + if (test_bit(NFS_LSEG_ROC, &old->pls_flags)) + set_bit(NFS_LSEG_ROC, &new->pls_flags); + return true; +} + +static void +ff_layout_add_lseg(struct pnfs_layout_hdr *lo, + struct pnfs_layout_segment *lseg, + struct list_head *free_me) +{ + pnfs_generic_layout_insert_lseg(lo, lseg, + ff_lseg_range_is_after, + ff_lseg_merge, + free_me); +} + +static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) +{ + int i, j; + + for (i = 0; i < fls->mirror_array_cnt - 1; i++) { + for (j = i + 1; j < fls->mirror_array_cnt; j++) + if (fls->mirror_array[i]->efficiency < + fls->mirror_array[j]->efficiency) + swap(fls->mirror_array[i], + fls->mirror_array[j]); + } +} + +static struct pnfs_layout_segment * +ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, + struct nfs4_layoutget_res *lgr, + gfp_t gfp_flags) +{ + struct pnfs_layout_segment *ret; + struct nfs4_ff_layout_segment *fls = NULL; + struct xdr_stream stream; + struct xdr_buf buf; + struct page *scratch; + u64 stripe_unit; + u32 mirror_array_cnt; + __be32 *p; + int i, rc; + + dprintk("--> %s\n", __func__); + scratch = alloc_page(gfp_flags); + if (!scratch) + return ERR_PTR(-ENOMEM); + + xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, + lgr->layoutp->len); + xdr_set_scratch_page(&stream, scratch); + + /* stripe unit and mirror_array_cnt */ + rc = -EIO; + p = xdr_inline_decode(&stream, 8 + 4); + if (!p) + goto out_err_free; + + p = xdr_decode_hyper(p, &stripe_unit); + mirror_array_cnt = be32_to_cpup(p++); + dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__, + stripe_unit, mirror_array_cnt); + + if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT || + mirror_array_cnt == 0) + goto out_err_free; + + rc = -ENOMEM; + fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt), + gfp_flags); + if (!fls) + goto out_err_free; + + fls->mirror_array_cnt = mirror_array_cnt; + fls->stripe_unit = stripe_unit; + + for (i = 0; i < fls->mirror_array_cnt; i++) { + struct nfs4_ff_layout_mirror *mirror; + struct cred *kcred; + const struct cred __rcu *cred; + kuid_t uid; + kgid_t gid; + u32 ds_count, fh_count, id; + int j; + + rc = -EIO; + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_err_free; + ds_count = be32_to_cpup(p); + + /* FIXME: allow for striping? */ + if (ds_count != 1) + goto out_err_free; + + fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags); + if (fls->mirror_array[i] == NULL) { + rc = -ENOMEM; + goto out_err_free; + } + + fls->mirror_array[i]->ds_count = ds_count; + + /* deviceid */ + rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid); + if (rc) + goto out_err_free; + + /* efficiency */ + rc = -EIO; + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_err_free; + fls->mirror_array[i]->efficiency = be32_to_cpup(p); + + /* stateid */ + rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid); + if (rc) + goto out_err_free; + + /* fh */ + rc = -EIO; + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_err_free; + fh_count = be32_to_cpup(p); + + fls->mirror_array[i]->fh_versions = + kcalloc(fh_count, sizeof(struct nfs_fh), + gfp_flags); + if (fls->mirror_array[i]->fh_versions == NULL) { + rc = -ENOMEM; + goto out_err_free; + } + + for (j = 0; j < fh_count; j++) { + rc = decode_nfs_fh(&stream, + &fls->mirror_array[i]->fh_versions[j]); + if (rc) + goto out_err_free; + } + + fls->mirror_array[i]->fh_versions_cnt = fh_count; + + /* user */ + rc = decode_name(&stream, &id); + if (rc) + goto out_err_free; + + uid = make_kuid(&init_user_ns, id); + + /* group */ + rc = decode_name(&stream, &id); + if (rc) + goto out_err_free; + + gid = make_kgid(&init_user_ns, id); + + if (gfp_flags & __GFP_FS) + kcred = prepare_kernel_cred(&init_task); + else { + unsigned int nofs_flags = memalloc_nofs_save(); + kcred = prepare_kernel_cred(&init_task); + memalloc_nofs_restore(nofs_flags); + } + rc = -ENOMEM; + if (!kcred) + goto out_err_free; + kcred->fsuid = uid; + kcred->fsgid = gid; + cred = RCU_INITIALIZER(kcred); + + if (lgr->range.iomode == IOMODE_READ) + rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); + else + rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); + + mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]); + if (mirror != fls->mirror_array[i]) { + /* swap cred ptrs so free_mirror will clean up old */ + if (lgr->range.iomode == IOMODE_READ) { + cred = xchg(&mirror->ro_cred, cred); + rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); + } else { + cred = xchg(&mirror->rw_cred, cred); + rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); + } + ff_layout_free_mirror(fls->mirror_array[i]); + fls->mirror_array[i] = mirror; + } + + dprintk("%s: iomode %s uid %u gid %u\n", __func__, + lgr->range.iomode == IOMODE_READ ? "READ" : "RW", + from_kuid(&init_user_ns, uid), + from_kgid(&init_user_ns, gid)); + } + + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_sort_mirrors; + fls->flags = be32_to_cpup(p); + + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_sort_mirrors; + for (i=0; i < fls->mirror_array_cnt; i++) + fls->mirror_array[i]->report_interval = be32_to_cpup(p); + +out_sort_mirrors: + ff_layout_sort_mirrors(fls); + ret = &fls->generic_hdr; + dprintk("<-- %s (success)\n", __func__); +out_free_page: + __free_page(scratch); + return ret; +out_err_free: + _ff_layout_free_lseg(fls); + ret = ERR_PTR(rc); + dprintk("<-- %s (%d)\n", __func__, rc); + goto out_free_page; +} + +static void +ff_layout_free_lseg(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); + + dprintk("--> %s\n", __func__); + + if (lseg->pls_range.iomode == IOMODE_RW) { + struct nfs4_flexfile_layout *ffl; + struct inode *inode; + + ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); + inode = ffl->generic_hdr.plh_inode; + spin_lock(&inode->i_lock); + pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg); + spin_unlock(&inode->i_lock); + } + _ff_layout_free_lseg(fls); +} + +static void +nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) +{ + /* first IO request? */ + if (atomic_inc_return(&timer->n_ops) == 1) { + timer->start_time = now; + } +} + +static ktime_t +nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) +{ + ktime_t start; + + if (atomic_dec_return(&timer->n_ops) < 0) + WARN_ON_ONCE(1); + + start = timer->start_time; + timer->start_time = now; + return ktime_sub(now, start); +} + +static bool +nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, + struct nfs4_ff_layoutstat *layoutstat, + ktime_t now) +{ + s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; + struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); + + nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); + if (!mirror->start_time) + mirror->start_time = now; + if (mirror->report_interval != 0) + report_interval = (s64)mirror->report_interval * 1000LL; + else if (layoutstats_timer != 0) + report_interval = (s64)layoutstats_timer * 1000LL; + if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >= + report_interval) { + ffl->last_report_time = now; + return true; + } + + return false; +} + +static void +nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat, + __u64 requested) +{ + struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; + + iostat->ops_requested++; + iostat->bytes_requested += requested; +} + +static void +nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, + __u64 requested, + __u64 completed, + ktime_t time_completed, + ktime_t time_started) +{ + struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; + ktime_t completion_time = ktime_sub(time_completed, time_started); + ktime_t timer; + + iostat->ops_completed++; + iostat->bytes_completed += completed; + iostat->bytes_not_delivered += requested - completed; + + timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed); + iostat->total_busy_time = + ktime_add(iostat->total_busy_time, timer); + iostat->aggregate_completion_time = + ktime_add(iostat->aggregate_completion_time, + completion_time); +} + +static void +nfs4_ff_layout_stat_io_start_read(struct inode *inode, + struct nfs4_ff_layout_mirror *mirror, + __u64 requested, ktime_t now) +{ + bool report; + + spin_lock(&mirror->lock); + report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now); + nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested); + set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); + spin_unlock(&mirror->lock); + + if (report) + pnfs_report_layoutstat(inode, nfs_io_gfp_mask()); +} + +static void +nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, + struct nfs4_ff_layout_mirror *mirror, + __u64 requested, + __u64 completed) +{ + spin_lock(&mirror->lock); + nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat, + requested, completed, + ktime_get(), task->tk_start); + set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); + spin_unlock(&mirror->lock); +} + +static void +nfs4_ff_layout_stat_io_start_write(struct inode *inode, + struct nfs4_ff_layout_mirror *mirror, + __u64 requested, ktime_t now) +{ + bool report; + + spin_lock(&mirror->lock); + report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now); + nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested); + set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); + spin_unlock(&mirror->lock); + + if (report) + pnfs_report_layoutstat(inode, nfs_io_gfp_mask()); +} + +static void +nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, + struct nfs4_ff_layout_mirror *mirror, + __u64 requested, + __u64 completed, + enum nfs3_stable_how committed) +{ + if (committed == NFS_UNSTABLE) + requested = completed = 0; + + spin_lock(&mirror->lock); + nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat, + requested, completed, ktime_get(), task->tk_start); + set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); + spin_unlock(&mirror->lock); +} + +static void +ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx) +{ + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + + if (devid) + nfs4_mark_deviceid_unavailable(devid); +} + +static void +ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx) +{ + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + + if (devid) + nfs4_mark_deviceid_available(devid); +} + +static struct nfs4_pnfs_ds * +ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, + u32 start_idx, u32 *best_idx, + bool check_device) +{ + struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_pnfs_ds *ds; + u32 idx; + + /* mirrors are initially sorted by efficiency */ + for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { + mirror = FF_LAYOUT_COMP(lseg, idx); + ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false); + if (!ds) + continue; + + if (check_device && + nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) + continue; + + *best_idx = idx; + return ds; + } + + return NULL; +} + +static struct nfs4_pnfs_ds * +ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg, + u32 start_idx, u32 *best_idx) +{ + return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false); +} + +static struct nfs4_pnfs_ds * +ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg, + u32 start_idx, u32 *best_idx) +{ + return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true); +} + +static struct nfs4_pnfs_ds * +ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, + u32 start_idx, u32 *best_idx) +{ + struct nfs4_pnfs_ds *ds; + + ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx); + if (ds) + return ds; + return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx); +} + +static struct nfs4_pnfs_ds * +ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, + u32 *best_idx) +{ + struct pnfs_layout_segment *lseg = pgio->pg_lseg; + struct nfs4_pnfs_ds *ds; + + ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx, + best_idx); + if (ds || !pgio->pg_mirror_idx) + return ds; + return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx); +} + +static void +ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req, + bool strict_iomode) +{ + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), req->wb_bytes, IOMODE_READ, + strict_iomode, nfs_io_gfp_mask()); + if (IS_ERR(pgio->pg_lseg)) { + pgio->pg_error = PTR_ERR(pgio->pg_lseg); + pgio->pg_lseg = NULL; + } +} + +static void +ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + pnfs_generic_pg_check_layout(pgio); + pnfs_generic_pg_check_range(pgio, req); +} + +static void +ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + struct nfs_pgio_mirror *pgm; + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_pnfs_ds *ds; + u32 ds_idx; + +retry: + ff_layout_pg_check_layout(pgio, req); + /* Use full layout for now */ + if (!pgio->pg_lseg) { + ff_layout_pg_get_read(pgio, req, false); + if (!pgio->pg_lseg) + goto out_nolseg; + } + if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) { + ff_layout_pg_get_read(pgio, req, true); + if (!pgio->pg_lseg) + goto out_nolseg; + } + + ds = ff_layout_get_ds_for_read(pgio, &ds_idx); + if (!ds) { + if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) + goto out_mds; + pnfs_generic_pg_cleanup(pgio); + /* Sleep for 1 second before retrying */ + ssleep(1); + goto retry; + } + + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); + pgm = &pgio->pg_mirrors[0]; + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; + + pgio->pg_mirror_idx = ds_idx; + + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; + return; +out_nolseg: + if (pgio->pg_error < 0) + return; +out_mds: + trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode, + 0, NFS4_MAX_UINT64, IOMODE_READ, + NFS_I(pgio->pg_inode)->layout, + pgio->pg_lseg); + pgio->pg_maxretrans = 0; + nfs_pageio_reset_read_mds(pgio); +} + +static void +ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + struct nfs4_ff_layout_mirror *mirror; + struct nfs_pgio_mirror *pgm; + struct nfs4_pnfs_ds *ds; + u32 i; + +retry: + ff_layout_pg_check_layout(pgio, req); + if (!pgio->pg_lseg) { + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), req->wb_bytes, + IOMODE_RW, false, nfs_io_gfp_mask()); + if (IS_ERR(pgio->pg_lseg)) { + pgio->pg_error = PTR_ERR(pgio->pg_lseg); + pgio->pg_lseg = NULL; + return; + } + } + /* If no lseg, fall back to write through mds */ + if (pgio->pg_lseg == NULL) + goto out_mds; + + /* Use a direct mapping of ds_idx to pgio mirror_idx */ + if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)) + goto out_eagain; + + for (i = 0; i < pgio->pg_mirror_count; i++) { + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); + ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true); + if (!ds) { + if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) + goto out_mds; + pnfs_generic_pg_cleanup(pgio); + /* Sleep for 1 second before retrying */ + ssleep(1); + goto retry; + } + pgm = &pgio->pg_mirrors[i]; + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; + } + + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; + return; +out_eagain: + pnfs_generic_pg_cleanup(pgio); + pgio->pg_error = -EAGAIN; + return; +out_mds: + trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode, + 0, NFS4_MAX_UINT64, IOMODE_RW, + NFS_I(pgio->pg_inode)->layout, + pgio->pg_lseg); + pgio->pg_maxretrans = 0; + nfs_pageio_reset_write_mds(pgio); + pgio->pg_error = -EAGAIN; +} + +static unsigned int +ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + if (!pgio->pg_lseg) { + pgio->pg_lseg = + pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), + req_offset(req), req->wb_bytes, + IOMODE_RW, false, nfs_io_gfp_mask()); + if (IS_ERR(pgio->pg_lseg)) { + pgio->pg_error = PTR_ERR(pgio->pg_lseg); + pgio->pg_lseg = NULL; + goto out; + } + } + if (pgio->pg_lseg) + return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); + + trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode, + 0, NFS4_MAX_UINT64, IOMODE_RW, + NFS_I(pgio->pg_inode)->layout, + pgio->pg_lseg); + /* no lseg means that pnfs is not in use, so no mirroring here */ + nfs_pageio_reset_write_mds(pgio); +out: + return 1; +} + +static u32 +ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) +{ + u32 old = desc->pg_mirror_idx; + + desc->pg_mirror_idx = idx; + return old; +} + +static struct nfs_pgio_mirror * +ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) +{ + return &desc->pg_mirrors[idx]; +} + +static const struct nfs_pageio_ops ff_layout_pg_read_ops = { + .pg_init = ff_layout_pg_init_read, + .pg_test = pnfs_generic_pg_test, + .pg_doio = pnfs_generic_pg_readpages, + .pg_cleanup = pnfs_generic_pg_cleanup, +}; + +static const struct nfs_pageio_ops ff_layout_pg_write_ops = { + .pg_init = ff_layout_pg_init_write, + .pg_test = pnfs_generic_pg_test, + .pg_doio = pnfs_generic_pg_writepages, + .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, + .pg_cleanup = pnfs_generic_pg_cleanup, + .pg_get_mirror = ff_layout_pg_get_mirror_write, + .pg_set_mirror = ff_layout_pg_set_mirror_write, +}; + +static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) +{ + struct rpc_task *task = &hdr->task; + + pnfs_layoutcommit_inode(hdr->inode, false); + + if (retry_pnfs) { + dprintk("%s Reset task %5u for i/o through pNFS " + "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, + hdr->task.tk_pid, + hdr->inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(hdr->inode), + hdr->args.count, + (unsigned long long)hdr->args.offset); + + hdr->completion_ops->reschedule_io(hdr); + return; + } + + if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { + dprintk("%s Reset task %5u for i/o through MDS " + "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, + hdr->task.tk_pid, + hdr->inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(hdr->inode), + hdr->args.count, + (unsigned long long)hdr->args.offset); + + trace_pnfs_mds_fallback_write_done(hdr->inode, + hdr->args.offset, hdr->args.count, + IOMODE_RW, NFS_I(hdr->inode)->layout, + hdr->lseg); + task->tk_status = pnfs_write_done_resend_to_mds(hdr); + } +} + +static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr) +{ + u32 idx = hdr->pgio_mirror_idx + 1; + u32 new_idx = 0; + + if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx)) + ff_layout_send_layouterror(hdr->lseg); + else + pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); + pnfs_read_resend_pnfs(hdr, new_idx); +} + +static void ff_layout_reset_read(struct nfs_pgio_header *hdr) +{ + struct rpc_task *task = &hdr->task; + + pnfs_layoutcommit_inode(hdr->inode, false); + pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); + + if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { + dprintk("%s Reset task %5u for i/o through MDS " + "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, + hdr->task.tk_pid, + hdr->inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(hdr->inode), + hdr->args.count, + (unsigned long long)hdr->args.offset); + + trace_pnfs_mds_fallback_read_done(hdr->inode, + hdr->args.offset, hdr->args.count, + IOMODE_READ, NFS_I(hdr->inode)->layout, + hdr->lseg); + task->tk_status = pnfs_read_done_resend_to_mds(hdr); + } +} + +static int ff_layout_async_handle_error_v4(struct rpc_task *task, + struct nfs4_state *state, + struct nfs_client *clp, + struct pnfs_layout_segment *lseg, + u32 idx) +{ + struct pnfs_layout_hdr *lo = lseg->pls_layout; + struct inode *inode = lo->plh_inode; + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; + + switch (task->tk_status) { + case -NFS4ERR_BADSESSION: + case -NFS4ERR_BADSLOT: + case -NFS4ERR_BAD_HIGH_SLOT: + case -NFS4ERR_DEADSESSION: + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case -NFS4ERR_SEQ_FALSE_RETRY: + case -NFS4ERR_SEQ_MISORDERED: + dprintk("%s ERROR %d, Reset session. Exchangeid " + "flags 0x%x\n", __func__, task->tk_status, + clp->cl_exchange_flags); + nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); + break; + case -NFS4ERR_DELAY: + case -NFS4ERR_GRACE: + rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); + break; + case -NFS4ERR_RETRY_UNCACHED_REP: + break; + /* Invalidate Layout errors */ + case -NFS4ERR_PNFS_NO_LAYOUT: + case -ESTALE: /* mapped NFS4ERR_STALE */ + case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ + case -EISDIR: /* mapped NFS4ERR_ISDIR */ + case -NFS4ERR_FHEXPIRED: + case -NFS4ERR_WRONG_TYPE: + dprintk("%s Invalid layout error %d\n", __func__, + task->tk_status); + /* + * Destroy layout so new i/o will get a new layout. + * Layout will not be destroyed until all current lseg + * references are put. Mark layout as invalid to resend failed + * i/o and all i/o waiting on the slot table to the MDS until + * layout is destroyed and a new valid layout is obtained. + */ + pnfs_destroy_layout(NFS_I(inode)); + rpc_wake_up(&tbl->slot_tbl_waitq); + goto reset; + /* RPC connection errors */ + case -ECONNREFUSED: + case -EHOSTDOWN: + case -EHOSTUNREACH: + case -ENETUNREACH: + case -EIO: + case -ETIMEDOUT: + case -EPIPE: + case -EPROTO: + case -ENODEV: + dprintk("%s DS connection error %d\n", __func__, + task->tk_status); + nfs4_delete_deviceid(devid->ld, devid->nfs_client, + &devid->deviceid); + rpc_wake_up(&tbl->slot_tbl_waitq); + fallthrough; + default: + if (ff_layout_avoid_mds_available_ds(lseg)) + return -NFS4ERR_RESET_TO_PNFS; +reset: + dprintk("%s Retry through MDS. Error %d\n", __func__, + task->tk_status); + return -NFS4ERR_RESET_TO_MDS; + } + task->tk_status = 0; + return -EAGAIN; +} + +/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ +static int ff_layout_async_handle_error_v3(struct rpc_task *task, + struct pnfs_layout_segment *lseg, + u32 idx) +{ + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + + switch (task->tk_status) { + /* File access problems. Don't mark the device as unavailable */ + case -EACCES: + case -ESTALE: + case -EISDIR: + case -EBADHANDLE: + case -ELOOP: + case -ENOSPC: + break; + case -EJUKEBOX: + nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); + goto out_retry; + default: + dprintk("%s DS connection error %d\n", __func__, + task->tk_status); + nfs4_delete_deviceid(devid->ld, devid->nfs_client, + &devid->deviceid); + } + /* FIXME: Need to prevent infinite looping here. */ + return -NFS4ERR_RESET_TO_PNFS; +out_retry: + task->tk_status = 0; + rpc_restart_call_prepare(task); + rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); + return -EAGAIN; +} + +static int ff_layout_async_handle_error(struct rpc_task *task, + struct nfs4_state *state, + struct nfs_client *clp, + struct pnfs_layout_segment *lseg, + u32 idx) +{ + int vers = clp->cl_nfs_mod->rpc_vers->number; + + if (task->tk_status >= 0) { + ff_layout_mark_ds_reachable(lseg, idx); + return 0; + } + + /* Handle the case of an invalid layout segment */ + if (!pnfs_is_valid_lseg(lseg)) + return -NFS4ERR_RESET_TO_PNFS; + + switch (vers) { + case 3: + return ff_layout_async_handle_error_v3(task, lseg, idx); + case 4: + return ff_layout_async_handle_error_v4(task, state, clp, + lseg, idx); + default: + /* should never happen */ + WARN_ON_ONCE(1); + return 0; + } +} + +static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, + u32 idx, u64 offset, u64 length, + u32 *op_status, int opnum, int error) +{ + struct nfs4_ff_layout_mirror *mirror; + u32 status = *op_status; + int err; + + if (status == 0) { + switch (error) { + case -ETIMEDOUT: + case -EPFNOSUPPORT: + case -EPROTONOSUPPORT: + case -EOPNOTSUPP: + case -EINVAL: + case -ECONNREFUSED: + case -ECONNRESET: + case -EHOSTDOWN: + case -EHOSTUNREACH: + case -ENETUNREACH: + case -EADDRINUSE: + case -ENOBUFS: + case -EPIPE: + case -EPERM: + case -EPROTO: + case -ENODEV: + *op_status = status = NFS4ERR_NXIO; + break; + case -EACCES: + *op_status = status = NFS4ERR_ACCESS; + break; + default: + return; + } + } + + mirror = FF_LAYOUT_COMP(lseg, idx); + err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), + mirror, offset, length, status, opnum, + nfs_io_gfp_mask()); + + switch (status) { + case NFS4ERR_DELAY: + case NFS4ERR_GRACE: + break; + case NFS4ERR_NXIO: + ff_layout_mark_ds_unreachable(lseg, idx); + /* + * Don't return the layout if this is a read and we still + * have layouts to try + */ + if (opnum == OP_READ) + break; + fallthrough; + default: + pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, + lseg); + } + + dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); +} + +/* NFS_PROTO call done callback routines */ +static int ff_layout_read_done_cb(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + int err; + + if (task->tk_status < 0) { + ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, + hdr->args.offset, hdr->args.count, + &hdr->res.op_status, OP_READ, + task->tk_status); + trace_ff_layout_read_error(hdr); + } + + err = ff_layout_async_handle_error(task, hdr->args.context->state, + hdr->ds_clp, hdr->lseg, + hdr->pgio_mirror_idx); + + trace_nfs4_pnfs_read(hdr, err); + clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); + clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); + switch (err) { + case -NFS4ERR_RESET_TO_PNFS: + set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); + return task->tk_status; + case -NFS4ERR_RESET_TO_MDS: + set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); + return task->tk_status; + case -EAGAIN: + goto out_eagain; + } + + return 0; +out_eagain: + rpc_restart_call_prepare(task); + return -EAGAIN; +} + +static bool +ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg) +{ + return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT); +} + +/* + * We reference the rpc_cred of the first WRITE that triggers the need for + * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. + * rfc5661 is not clear about which credential should be used. + * + * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so + * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751 + * we always send layoutcommit after DS writes. + */ +static void +ff_layout_set_layoutcommit(struct inode *inode, + struct pnfs_layout_segment *lseg, + loff_t end_offset) +{ + if (!ff_layout_need_layoutcommit(lseg)) + return; + + pnfs_set_layoutcommit(inode, lseg, end_offset); + dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino, + (unsigned long long) NFS_I(inode)->layout->plh_lwb); +} + +static void ff_layout_read_record_layoutstats_start(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) + return; + nfs4_ff_layout_stat_io_start_read(hdr->inode, + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count, + task->tk_start); +} + +static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) + return; + nfs4_ff_layout_stat_io_end_read(task, + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count, + hdr->res.count); + set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); +} + +static int ff_layout_read_prepare_common(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { + rpc_exit(task, -EIO); + return -EIO; + } + + if (!pnfs_is_valid_lseg(hdr->lseg)) { + rpc_exit(task, -EAGAIN); + return -EAGAIN; + } + + ff_layout_read_record_layoutstats_start(task, hdr); + return 0; +} + +/* + * Call ops for the async read/write cases + * In the case of dense layouts, the offset needs to be reset to its + * original value. + */ +static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (ff_layout_read_prepare_common(task, hdr)) + return; + + rpc_call_start(task); +} + +static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (nfs4_setup_sequence(hdr->ds_clp, + &hdr->args.seq_args, + &hdr->res.seq_res, + task)) + return; + + ff_layout_read_prepare_common(task, hdr); +} + +static void ff_layout_read_call_done(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && + task->tk_status == 0) { + nfs4_sequence_done(task, &hdr->res.seq_res); + return; + } + + /* Note this may cause RPC to be resent */ + hdr->mds_ops->rpc_call_done(task, hdr); +} + +static void ff_layout_read_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + ff_layout_read_record_layoutstats_done(task, hdr); + rpc_count_iostats_metrics(task, + &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]); +} + +static void ff_layout_read_release(void *data) +{ + struct nfs_pgio_header *hdr = data; + + ff_layout_read_record_layoutstats_done(&hdr->task, hdr); + if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) + ff_layout_resend_pnfs_read(hdr); + else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) + ff_layout_reset_read(hdr); + pnfs_generic_rw_release(data); +} + + +static int ff_layout_write_done_cb(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + loff_t end_offs = 0; + int err; + + if (task->tk_status < 0) { + ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, + hdr->args.offset, hdr->args.count, + &hdr->res.op_status, OP_WRITE, + task->tk_status); + trace_ff_layout_write_error(hdr); + } + + err = ff_layout_async_handle_error(task, hdr->args.context->state, + hdr->ds_clp, hdr->lseg, + hdr->pgio_mirror_idx); + + trace_nfs4_pnfs_write(hdr, err); + clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); + clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); + switch (err) { + case -NFS4ERR_RESET_TO_PNFS: + set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); + return task->tk_status; + case -NFS4ERR_RESET_TO_MDS: + set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); + return task->tk_status; + case -EAGAIN: + return -EAGAIN; + } + + if (hdr->res.verf->committed == NFS_FILE_SYNC || + hdr->res.verf->committed == NFS_DATA_SYNC) + end_offs = hdr->mds_offset + (loff_t)hdr->res.count; + + /* Note: if the write is unstable, don't set end_offs until commit */ + ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs); + + /* zero out fattr since we don't care DS attr at all */ + hdr->fattr.valid = 0; + if (task->tk_status >= 0) + nfs_writeback_update_inode(hdr); + + return 0; +} + +static int ff_layout_commit_done_cb(struct rpc_task *task, + struct nfs_commit_data *data) +{ + int err; + + if (task->tk_status < 0) { + ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index, + data->args.offset, data->args.count, + &data->res.op_status, OP_COMMIT, + task->tk_status); + trace_ff_layout_commit_error(data); + } + + err = ff_layout_async_handle_error(task, NULL, data->ds_clp, + data->lseg, data->ds_commit_index); + + trace_nfs4_pnfs_commit_ds(data, err); + switch (err) { + case -NFS4ERR_RESET_TO_PNFS: + pnfs_generic_prepare_to_resend_writes(data); + return -EAGAIN; + case -NFS4ERR_RESET_TO_MDS: + pnfs_generic_prepare_to_resend_writes(data); + return -EAGAIN; + case -EAGAIN: + rpc_restart_call_prepare(task); + return -EAGAIN; + } + + ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb); + + return 0; +} + +static void ff_layout_write_record_layoutstats_start(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) + return; + nfs4_ff_layout_stat_io_start_write(hdr->inode, + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count, + task->tk_start); +} + +static void ff_layout_write_record_layoutstats_done(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) + return; + nfs4_ff_layout_stat_io_end_write(task, + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count, hdr->res.count, + hdr->res.verf->committed); + set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); +} + +static int ff_layout_write_prepare_common(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { + rpc_exit(task, -EIO); + return -EIO; + } + + if (!pnfs_is_valid_lseg(hdr->lseg)) { + rpc_exit(task, -EAGAIN); + return -EAGAIN; + } + + ff_layout_write_record_layoutstats_start(task, hdr); + return 0; +} + +static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (ff_layout_write_prepare_common(task, hdr)) + return; + + rpc_call_start(task); +} + +static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (nfs4_setup_sequence(hdr->ds_clp, + &hdr->args.seq_args, + &hdr->res.seq_res, + task)) + return; + + ff_layout_write_prepare_common(task, hdr); +} + +static void ff_layout_write_call_done(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && + task->tk_status == 0) { + nfs4_sequence_done(task, &hdr->res.seq_res); + return; + } + + /* Note this may cause RPC to be resent */ + hdr->mds_ops->rpc_call_done(task, hdr); +} + +static void ff_layout_write_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + ff_layout_write_record_layoutstats_done(task, hdr); + rpc_count_iostats_metrics(task, + &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]); +} + +static void ff_layout_write_release(void *data) +{ + struct nfs_pgio_header *hdr = data; + + ff_layout_write_record_layoutstats_done(&hdr->task, hdr); + if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) { + ff_layout_send_layouterror(hdr->lseg); + ff_layout_reset_write(hdr, true); + } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) + ff_layout_reset_write(hdr, false); + pnfs_generic_rw_release(data); +} + +static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task, + struct nfs_commit_data *cdata) +{ + if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags)) + return; + nfs4_ff_layout_stat_io_start_write(cdata->inode, + FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), + 0, task->tk_start); +} + +static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task, + struct nfs_commit_data *cdata) +{ + struct nfs_page *req; + __u64 count = 0; + + if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags)) + return; + + if (task->tk_status == 0) { + list_for_each_entry(req, &cdata->pages, wb_list) + count += req->wb_bytes; + } + nfs4_ff_layout_stat_io_end_write(task, + FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), + count, count, NFS_FILE_SYNC); + set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags); +} + +static int ff_layout_commit_prepare_common(struct rpc_task *task, + struct nfs_commit_data *cdata) +{ + if (!pnfs_is_valid_lseg(cdata->lseg)) { + rpc_exit(task, -EAGAIN); + return -EAGAIN; + } + + ff_layout_commit_record_layoutstats_start(task, cdata); + return 0; +} + +static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) +{ + if (ff_layout_commit_prepare_common(task, data)) + return; + + rpc_call_start(task); +} + +static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) +{ + struct nfs_commit_data *wdata = data; + + if (nfs4_setup_sequence(wdata->ds_clp, + &wdata->args.seq_args, + &wdata->res.seq_res, + task)) + return; + ff_layout_commit_prepare_common(task, data); +} + +static void ff_layout_commit_done(struct rpc_task *task, void *data) +{ + pnfs_generic_write_commit_done(task, data); +} + +static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_commit_data *cdata = data; + + ff_layout_commit_record_layoutstats_done(task, cdata); + rpc_count_iostats_metrics(task, + &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]); +} + +static void ff_layout_commit_release(void *data) +{ + struct nfs_commit_data *cdata = data; + + ff_layout_commit_record_layoutstats_done(&cdata->task, cdata); + pnfs_generic_commit_release(data); +} + +static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { + .rpc_call_prepare = ff_layout_read_prepare_v3, + .rpc_call_done = ff_layout_read_call_done, + .rpc_count_stats = ff_layout_read_count_stats, + .rpc_release = ff_layout_read_release, +}; + +static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { + .rpc_call_prepare = ff_layout_read_prepare_v4, + .rpc_call_done = ff_layout_read_call_done, + .rpc_count_stats = ff_layout_read_count_stats, + .rpc_release = ff_layout_read_release, +}; + +static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { + .rpc_call_prepare = ff_layout_write_prepare_v3, + .rpc_call_done = ff_layout_write_call_done, + .rpc_count_stats = ff_layout_write_count_stats, + .rpc_release = ff_layout_write_release, +}; + +static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { + .rpc_call_prepare = ff_layout_write_prepare_v4, + .rpc_call_done = ff_layout_write_call_done, + .rpc_count_stats = ff_layout_write_count_stats, + .rpc_release = ff_layout_write_release, +}; + +static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = { + .rpc_call_prepare = ff_layout_commit_prepare_v3, + .rpc_call_done = ff_layout_commit_done, + .rpc_count_stats = ff_layout_commit_count_stats, + .rpc_release = ff_layout_commit_release, +}; + +static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = { + .rpc_call_prepare = ff_layout_commit_prepare_v4, + .rpc_call_done = ff_layout_commit_done, + .rpc_count_stats = ff_layout_commit_count_stats, + .rpc_release = ff_layout_commit_release, +}; + +static enum pnfs_try_status +ff_layout_read_pagelist(struct nfs_pgio_header *hdr) +{ + struct pnfs_layout_segment *lseg = hdr->lseg; + struct nfs4_pnfs_ds *ds; + struct rpc_clnt *ds_clnt; + struct nfs4_ff_layout_mirror *mirror; + const struct cred *ds_cred; + loff_t offset = hdr->args.offset; + u32 idx = hdr->pgio_mirror_idx; + int vers; + struct nfs_fh *fh; + + dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n", + __func__, hdr->inode->i_ino, + hdr->args.pgbase, (size_t)hdr->args.count, offset); + + mirror = FF_LAYOUT_COMP(lseg, idx); + ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false); + if (!ds) + goto out_failed; + + ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, + hdr->inode); + if (IS_ERR(ds_clnt)) + goto out_failed; + + ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred); + if (!ds_cred) + goto out_failed; + + vers = nfs4_ff_layout_ds_version(mirror); + + dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__, + ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers); + + hdr->pgio_done_cb = ff_layout_read_done_cb; + refcount_inc(&ds->ds_clp->cl_count); + hdr->ds_clp = ds->ds_clp; + fh = nfs4_ff_layout_select_ds_fh(mirror); + if (fh) + hdr->args.fh = fh; + + nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid); + + /* + * Note that if we ever decide to split across DSes, + * then we may need to handle dense-like offsets. + */ + hdr->args.offset = offset; + hdr->mds_offset = offset; + + /* Perform an asynchronous read to ds */ + nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, + vers == 3 ? &ff_layout_read_call_ops_v3 : + &ff_layout_read_call_ops_v4, + 0, RPC_TASK_SOFTCONN); + put_cred(ds_cred); + return PNFS_ATTEMPTED; + +out_failed: + if (ff_layout_avoid_mds_available_ds(lseg)) + return PNFS_TRY_AGAIN; + trace_pnfs_mds_fallback_read_pagelist(hdr->inode, + hdr->args.offset, hdr->args.count, + IOMODE_READ, NFS_I(hdr->inode)->layout, lseg); + return PNFS_NOT_ATTEMPTED; +} + +/* Perform async writes. */ +static enum pnfs_try_status +ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) +{ + struct pnfs_layout_segment *lseg = hdr->lseg; + struct nfs4_pnfs_ds *ds; + struct rpc_clnt *ds_clnt; + struct nfs4_ff_layout_mirror *mirror; + const struct cred *ds_cred; + loff_t offset = hdr->args.offset; + int vers; + struct nfs_fh *fh; + u32 idx = hdr->pgio_mirror_idx; + + mirror = FF_LAYOUT_COMP(lseg, idx); + ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); + if (!ds) + goto out_failed; + + ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, + hdr->inode); + if (IS_ERR(ds_clnt)) + goto out_failed; + + ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred); + if (!ds_cred) + goto out_failed; + + vers = nfs4_ff_layout_ds_version(mirror); + + dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n", + __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, + offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), + vers); + + hdr->pgio_done_cb = ff_layout_write_done_cb; + refcount_inc(&ds->ds_clp->cl_count); + hdr->ds_clp = ds->ds_clp; + hdr->ds_commit_idx = idx; + fh = nfs4_ff_layout_select_ds_fh(mirror); + if (fh) + hdr->args.fh = fh; + + nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid); + + /* + * Note that if we ever decide to split across DSes, + * then we may need to handle dense-like offsets. + */ + hdr->args.offset = offset; + + /* Perform an asynchronous write */ + nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, + vers == 3 ? &ff_layout_write_call_ops_v3 : + &ff_layout_write_call_ops_v4, + sync, RPC_TASK_SOFTCONN); + put_cred(ds_cred); + return PNFS_ATTEMPTED; + +out_failed: + if (ff_layout_avoid_mds_available_ds(lseg)) + return PNFS_TRY_AGAIN; + trace_pnfs_mds_fallback_write_pagelist(hdr->inode, + hdr->args.offset, hdr->args.count, + IOMODE_RW, NFS_I(hdr->inode)->layout, lseg); + return PNFS_NOT_ATTEMPTED; +} + +static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) +{ + return i; +} + +static struct nfs_fh * +select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i) +{ + struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); + + /* FIXME: Assume that there is only one NFS version available + * for the DS. + */ + return &flseg->mirror_array[i]->fh_versions[0]; +} + +static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) +{ + struct pnfs_layout_segment *lseg = data->lseg; + struct nfs4_pnfs_ds *ds; + struct rpc_clnt *ds_clnt; + struct nfs4_ff_layout_mirror *mirror; + const struct cred *ds_cred; + u32 idx; + int vers, ret; + struct nfs_fh *fh; + + if (!lseg || !(pnfs_is_valid_lseg(lseg) || + test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))) + goto out_err; + + idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); + mirror = FF_LAYOUT_COMP(lseg, idx); + ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); + if (!ds) + goto out_err; + + ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, + data->inode); + if (IS_ERR(ds_clnt)) + goto out_err; + + ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred); + if (!ds_cred) + goto out_err; + + vers = nfs4_ff_layout_ds_version(mirror); + + dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__, + data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count), + vers); + data->commit_done_cb = ff_layout_commit_done_cb; + data->cred = ds_cred; + refcount_inc(&ds->ds_clp->cl_count); + data->ds_clp = ds->ds_clp; + fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); + if (fh) + data->args.fh = fh; + + ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops, + vers == 3 ? &ff_layout_commit_call_ops_v3 : + &ff_layout_commit_call_ops_v4, + how, RPC_TASK_SOFTCONN); + put_cred(ds_cred); + return ret; +out_err: + pnfs_generic_prepare_to_resend_writes(data); + pnfs_generic_commit_release(data); + return -EAGAIN; +} + +static int +ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, + int how, struct nfs_commit_info *cinfo) +{ + return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo, + ff_layout_initiate_commit); +} + +static bool ff_layout_match_rw(const struct rpc_task *task, + const struct nfs_pgio_header *hdr, + const struct pnfs_layout_segment *lseg) +{ + return hdr->lseg == lseg; +} + +static bool ff_layout_match_commit(const struct rpc_task *task, + const struct nfs_commit_data *cdata, + const struct pnfs_layout_segment *lseg) +{ + return cdata->lseg == lseg; +} + +static bool ff_layout_match_io(const struct rpc_task *task, const void *data) +{ + const struct rpc_call_ops *ops = task->tk_ops; + + if (ops == &ff_layout_read_call_ops_v3 || + ops == &ff_layout_read_call_ops_v4 || + ops == &ff_layout_write_call_ops_v3 || + ops == &ff_layout_write_call_ops_v4) + return ff_layout_match_rw(task, task->tk_calldata, data); + if (ops == &ff_layout_commit_call_ops_v3 || + ops == &ff_layout_commit_call_ops_v4) + return ff_layout_match_commit(task, task->tk_calldata, data); + return false; +} + +static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_ff_layout_ds *mirror_ds; + struct nfs4_pnfs_ds *ds; + struct nfs_client *ds_clp; + struct rpc_clnt *clnt; + u32 idx; + + for (idx = 0; idx < flseg->mirror_array_cnt; idx++) { + mirror = flseg->mirror_array[idx]; + mirror_ds = mirror->mirror_ds; + if (!mirror_ds) + continue; + ds = mirror->mirror_ds->ds; + if (!ds) + continue; + ds_clp = ds->ds_clp; + if (!ds_clp) + continue; + clnt = ds_clp->cl_rpcclient; + if (!clnt) + continue; + if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg)) + continue; + rpc_clnt_disconnect(clnt); + } +} + +static struct pnfs_ds_commit_info * +ff_layout_get_ds_info(struct inode *inode) +{ + struct pnfs_layout_hdr *layout = NFS_I(inode)->layout; + + if (layout == NULL) + return NULL; + + return &FF_LAYOUT_FROM_HDR(layout)->commit_info; +} + +static void +ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, + struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); + struct inode *inode = lseg->pls_layout->plh_inode; + struct pnfs_commit_array *array, *new; + + new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, + nfs_io_gfp_mask()); + if (new) { + spin_lock(&inode->i_lock); + array = pnfs_add_commit_array(fl_cinfo, new, lseg); + spin_unlock(&inode->i_lock); + if (array != new) + pnfs_free_commit_array(new); + } +} + +static void +ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, + struct inode *inode) +{ + spin_lock(&inode->i_lock); + pnfs_generic_ds_cinfo_destroy(fl_cinfo); + spin_unlock(&inode->i_lock); +} + +static void +ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) +{ + nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, + id_node)); +} + +static int ff_layout_encode_ioerr(struct xdr_stream *xdr, + const struct nfs4_layoutreturn_args *args, + const struct nfs4_flexfile_layoutreturn_args *ff_args) +{ + __be32 *start; + + start = xdr_reserve_space(xdr, 4); + if (unlikely(!start)) + return -E2BIG; + + *start = cpu_to_be32(ff_args->num_errors); + /* This assume we always return _ALL_ layouts */ + return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors); +} + +static void +encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) +{ + WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); +} + +static void +ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr, + const nfs4_stateid *stateid, + const struct nfs42_layoutstat_devinfo *devinfo) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 8 + 8); + p = xdr_encode_hyper(p, devinfo->offset); + p = xdr_encode_hyper(p, devinfo->length); + encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); + p = xdr_reserve_space(xdr, 4*8); + p = xdr_encode_hyper(p, devinfo->read_count); + p = xdr_encode_hyper(p, devinfo->read_bytes); + p = xdr_encode_hyper(p, devinfo->write_count); + p = xdr_encode_hyper(p, devinfo->write_bytes); + encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE); +} + +static void +ff_layout_encode_ff_iostat(struct xdr_stream *xdr, + const nfs4_stateid *stateid, + const struct nfs42_layoutstat_devinfo *devinfo) +{ + ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo); + ff_layout_encode_ff_layoutupdate(xdr, devinfo, + devinfo->ld_private.data); +} + +/* report nothing for now */ +static void ff_layout_encode_iostats_array(struct xdr_stream *xdr, + const struct nfs4_layoutreturn_args *args, + struct nfs4_flexfile_layoutreturn_args *ff_args) +{ + __be32 *p; + int i; + + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(ff_args->num_dev); + for (i = 0; i < ff_args->num_dev; i++) + ff_layout_encode_ff_iostat(xdr, + &args->layout->plh_stateid, + &ff_args->devinfo[i]); +} + +static void +ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo, + unsigned int num_entries) +{ + unsigned int i; + + for (i = 0; i < num_entries; i++) { + if (!devinfo[i].ld_private.ops) + continue; + if (!devinfo[i].ld_private.ops->free) + continue; + devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); + } +} + +static struct nfs4_deviceid_node * +ff_layout_alloc_deviceid_node(struct nfs_server *server, + struct pnfs_device *pdev, gfp_t gfp_flags) +{ + struct nfs4_ff_layout_ds *dsaddr; + + dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags); + if (!dsaddr) + return NULL; + return &dsaddr->id_node; +} + +static void +ff_layout_encode_layoutreturn(struct xdr_stream *xdr, + const void *voidargs, + const struct nfs4_xdr_opaque_data *ff_opaque) +{ + const struct nfs4_layoutreturn_args *args = voidargs; + struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data; + struct xdr_buf tmp_buf = { + .head = { + [0] = { + .iov_base = page_address(ff_args->pages[0]), + }, + }, + .buflen = PAGE_SIZE, + }; + struct xdr_stream tmp_xdr; + __be32 *start; + + dprintk("%s: Begin\n", __func__); + + xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL); + + ff_layout_encode_ioerr(&tmp_xdr, args, ff_args); + ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args); + + start = xdr_reserve_space(xdr, 4); + *start = cpu_to_be32(tmp_buf.len); + xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len); + + dprintk("%s: Return\n", __func__); +} + +static void +ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args) +{ + struct nfs4_flexfile_layoutreturn_args *ff_args; + + if (!args->data) + return; + ff_args = args->data; + args->data = NULL; + + ff_layout_free_ds_ioerr(&ff_args->errors); + ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev); + + put_page(ff_args->pages[0]); + kfree(ff_args); +} + +static const struct nfs4_xdr_opaque_ops layoutreturn_ops = { + .encode = ff_layout_encode_layoutreturn, + .free = ff_layout_free_layoutreturn, +}; + +static int +ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args) +{ + struct nfs4_flexfile_layoutreturn_args *ff_args; + struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout); + + ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask()); + if (!ff_args) + goto out_nomem; + ff_args->pages[0] = alloc_page(nfs_io_gfp_mask()); + if (!ff_args->pages[0]) + goto out_nomem_free; + + INIT_LIST_HEAD(&ff_args->errors); + ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout, + &args->range, &ff_args->errors, + FF_LAYOUTRETURN_MAXERR); + + spin_lock(&args->inode->i_lock); + ff_args->num_dev = ff_layout_mirror_prepare_stats( + &ff_layout->generic_hdr, &ff_args->devinfo[0], + ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN); + spin_unlock(&args->inode->i_lock); + + args->ld_private->ops = &layoutreturn_ops; + args->ld_private->data = ff_args; + return 0; +out_nomem_free: + kfree(ff_args); +out_nomem: + return -ENOMEM; +} + +#ifdef CONFIG_NFS_V4_2 +void +ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) +{ + struct pnfs_layout_hdr *lo = lseg->pls_layout; + struct nfs42_layout_error *errors; + LIST_HEAD(head); + + if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR)) + return; + ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1); + if (list_empty(&head)) + return; + + errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors), + nfs_io_gfp_mask()); + if (errors != NULL) { + const struct nfs4_ff_layout_ds_err *pos; + size_t n = 0; + + list_for_each_entry(pos, &head, list) { + errors[n].offset = pos->offset; + errors[n].length = pos->length; + nfs4_stateid_copy(&errors[n].stateid, &pos->stateid); + errors[n].errors[0].dev_id = pos->deviceid; + errors[n].errors[0].status = pos->status; + errors[n].errors[0].opnum = pos->opnum; + n++; + if (!list_is_last(&pos->list, &head) && + n < NFS42_LAYOUTERROR_MAX) + continue; + if (nfs42_proc_layouterror(lseg, errors, n) < 0) + break; + n = 0; + } + kfree(errors); + } + ff_layout_free_ds_ioerr(&head); +} +#else +void +ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) +{ +} +#endif + +static int +ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen) +{ + const struct sockaddr_in *sin = (struct sockaddr_in *)sap; + + return snprintf(buf, buflen, "%pI4", &sin->sin_addr); +} + +static size_t +ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf, + const int buflen) +{ + const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; + const struct in6_addr *addr = &sin6->sin6_addr; + + /* + * RFC 4291, Section 2.2.2 + * + * Shorthanded ANY address + */ + if (ipv6_addr_any(addr)) + return snprintf(buf, buflen, "::"); + + /* + * RFC 4291, Section 2.2.2 + * + * Shorthanded loopback address + */ + if (ipv6_addr_loopback(addr)) + return snprintf(buf, buflen, "::1"); + + /* + * RFC 4291, Section 2.2.3 + * + * Special presentation address format for mapped v4 + * addresses. + */ + if (ipv6_addr_v4mapped(addr)) + return snprintf(buf, buflen, "::ffff:%pI4", + &addr->s6_addr32[3]); + + /* + * RFC 4291, Section 2.2.1 + */ + return snprintf(buf, buflen, "%pI6c", addr); +} + +/* Derived from rpc_sockaddr2uaddr */ +static void +ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da) +{ + struct sockaddr *sap = (struct sockaddr *)&da->da_addr; + char portbuf[RPCBIND_MAXUADDRPLEN]; + char addrbuf[RPCBIND_MAXUADDRLEN]; + unsigned short port; + int len, netid_len; + __be32 *p; + + switch (sap->sa_family) { + case AF_INET: + if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0) + return; + port = ntohs(((struct sockaddr_in *)sap)->sin_port); + break; + case AF_INET6: + if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0) + return; + port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port); + break; + default: + WARN_ON_ONCE(1); + return; + } + + snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff); + len = strlcat(addrbuf, portbuf, sizeof(addrbuf)); + + netid_len = strlen(da->da_netid); + p = xdr_reserve_space(xdr, 4 + netid_len); + xdr_encode_opaque(p, da->da_netid, netid_len); + + p = xdr_reserve_space(xdr, 4 + len); + xdr_encode_opaque(p, addrbuf, len); +} + +static void +ff_layout_encode_nfstime(struct xdr_stream *xdr, + ktime_t t) +{ + struct timespec64 ts; + __be32 *p; + + p = xdr_reserve_space(xdr, 12); + ts = ktime_to_timespec64(t); + p = xdr_encode_hyper(p, ts.tv_sec); + *p++ = cpu_to_be32(ts.tv_nsec); +} + +static void +ff_layout_encode_io_latency(struct xdr_stream *xdr, + struct nfs4_ff_io_stat *stat) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 5 * 8); + p = xdr_encode_hyper(p, stat->ops_requested); + p = xdr_encode_hyper(p, stat->bytes_requested); + p = xdr_encode_hyper(p, stat->ops_completed); + p = xdr_encode_hyper(p, stat->bytes_completed); + p = xdr_encode_hyper(p, stat->bytes_not_delivered); + ff_layout_encode_nfstime(xdr, stat->total_busy_time); + ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time); +} + +static void +ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, + const struct nfs42_layoutstat_devinfo *devinfo, + struct nfs4_ff_layout_mirror *mirror) +{ + struct nfs4_pnfs_ds_addr *da; + struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds; + struct nfs_fh *fh = &mirror->fh_versions[0]; + __be32 *p; + + da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node); + dprintk("%s: DS %s: encoding address %s\n", + __func__, ds->ds_remotestr, da->da_remotestr); + /* netaddr4 */ + ff_layout_encode_netaddr(xdr, da); + /* nfs_fh4 */ + p = xdr_reserve_space(xdr, 4 + fh->size); + xdr_encode_opaque(p, fh->data, fh->size); + /* ff_io_latency4 read */ + spin_lock(&mirror->lock); + ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat); + /* ff_io_latency4 write */ + ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat); + spin_unlock(&mirror->lock); + /* nfstime4 */ + ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time)); + /* bool */ + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(false); +} + +static void +ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args, + const struct nfs4_xdr_opaque_data *opaque) +{ + struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque, + struct nfs42_layoutstat_devinfo, ld_private); + __be32 *start; + + /* layoutupdate length */ + start = xdr_reserve_space(xdr, 4); + ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data); + + *start = cpu_to_be32((xdr->p - start - 1) * 4); +} + +static void +ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque) +{ + struct nfs4_ff_layout_mirror *mirror = opaque->data; + + ff_layout_put_mirror(mirror); +} + +static const struct nfs4_xdr_opaque_ops layoutstat_ops = { + .encode = ff_layout_encode_layoutstats, + .free = ff_layout_free_layoutstats, +}; + +static int +ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, + struct nfs42_layoutstat_devinfo *devinfo, + int dev_limit, enum nfs4_ff_op_type type) +{ + struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_deviceid_node *dev; + int i = 0; + + list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) { + if (i >= dev_limit) + break; + if (IS_ERR_OR_NULL(mirror->mirror_ds)) + continue; + if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, + &mirror->flags) && + type != NFS4_FF_OP_LAYOUTRETURN) + continue; + /* mirror refcount put in cleanup_layoutstats */ + if (!refcount_inc_not_zero(&mirror->ref)) + continue; + dev = &mirror->mirror_ds->id_node; + memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE); + devinfo->offset = 0; + devinfo->length = NFS4_MAX_UINT64; + spin_lock(&mirror->lock); + devinfo->read_count = mirror->read_stat.io_stat.ops_completed; + devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed; + devinfo->write_count = mirror->write_stat.io_stat.ops_completed; + devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed; + spin_unlock(&mirror->lock); + devinfo->layout_type = LAYOUT_FLEX_FILES; + devinfo->ld_private.ops = &layoutstat_ops; + devinfo->ld_private.data = mirror; + + devinfo++; + i++; + } + return i; +} + +static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) +{ + struct pnfs_layout_hdr *lo; + struct nfs4_flexfile_layout *ff_layout; + const int dev_count = PNFS_LAYOUTSTATS_MAXDEV; + + /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ + args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), + nfs_io_gfp_mask()); + if (!args->devinfo) + return -ENOMEM; + + spin_lock(&args->inode->i_lock); + lo = NFS_I(args->inode)->layout; + if (lo && pnfs_layout_is_valid(lo)) { + ff_layout = FF_LAYOUT_FROM_HDR(lo); + args->num_dev = ff_layout_mirror_prepare_stats( + &ff_layout->generic_hdr, &args->devinfo[0], dev_count, + NFS4_FF_OP_LAYOUTSTATS); + } else + args->num_dev = 0; + spin_unlock(&args->inode->i_lock); + if (!args->num_dev) { + kfree(args->devinfo); + args->devinfo = NULL; + return -ENOENT; + } + + return 0; +} + +static int +ff_layout_set_layoutdriver(struct nfs_server *server, + const struct nfs_fh *dummy) +{ +#if IS_ENABLED(CONFIG_NFS_V4_2) + server->caps |= NFS_CAP_LAYOUTSTATS; +#endif + return 0; +} + +static const struct pnfs_commit_ops ff_layout_commit_ops = { + .setup_ds_info = ff_layout_setup_ds_info, + .release_ds_info = ff_layout_release_ds_info, + .mark_request_commit = pnfs_layout_mark_request_commit, + .clear_request_commit = pnfs_generic_clear_request_commit, + .scan_commit_lists = pnfs_generic_scan_commit_lists, + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, + .commit_pagelist = ff_layout_commit_pagelist, +}; + +static struct pnfs_layoutdriver_type flexfilelayout_type = { + .id = LAYOUT_FLEX_FILES, + .name = "LAYOUT_FLEX_FILES", + .owner = THIS_MODULE, + .flags = PNFS_LAYOUTGET_ON_OPEN, + .max_layoutget_response = 4096, /* 1 page or so... */ + .set_layoutdriver = ff_layout_set_layoutdriver, + .alloc_layout_hdr = ff_layout_alloc_layout_hdr, + .free_layout_hdr = ff_layout_free_layout_hdr, + .alloc_lseg = ff_layout_alloc_lseg, + .free_lseg = ff_layout_free_lseg, + .add_lseg = ff_layout_add_lseg, + .pg_read_ops = &ff_layout_pg_read_ops, + .pg_write_ops = &ff_layout_pg_write_ops, + .get_ds_info = ff_layout_get_ds_info, + .free_deviceid_node = ff_layout_free_deviceid_node, + .read_pagelist = ff_layout_read_pagelist, + .write_pagelist = ff_layout_write_pagelist, + .alloc_deviceid_node = ff_layout_alloc_deviceid_node, + .prepare_layoutreturn = ff_layout_prepare_layoutreturn, + .sync = pnfs_nfs_generic_sync, + .prepare_layoutstats = ff_layout_prepare_layoutstats, + .cancel_io = ff_layout_cancel_io, +}; + +static int __init nfs4flexfilelayout_init(void) +{ + printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n", + __func__); + return pnfs_register_layoutdriver(&flexfilelayout_type); +} + +static void __exit nfs4flexfilelayout_exit(void) +{ + printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n", + __func__); + pnfs_unregister_layoutdriver(&flexfilelayout_type); +} + +MODULE_ALIAS("nfs-layouttype4-4"); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("The NFSv4 flexfile layout driver"); + +module_init(nfs4flexfilelayout_init); +module_exit(nfs4flexfilelayout_exit); + +module_param(io_maxretrans, ushort, 0644); +MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client " + "retries an I/O request before returning an error. "); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h new file mode 100644 index 0000000000..354a031c69 --- /dev/null +++ b/fs/nfs/flexfilelayout/flexfilelayout.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * NFSv4 flexfile layout driver data structures. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tao Peng <bergwolf@primarydata.com> + */ + +#ifndef FS_NFS_NFS4FLEXFILELAYOUT_H +#define FS_NFS_NFS4FLEXFILELAYOUT_H + +#define FF_FLAGS_NO_LAYOUTCOMMIT 1 +#define FF_FLAGS_NO_IO_THRU_MDS 2 +#define FF_FLAGS_NO_READ_IO 4 + +#include <linux/refcount.h> +#include "../pnfs.h" + +/* XXX: Let's filter out insanely large mirror count for now to avoid oom + * due to network error etc. */ +#define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096 + +/* LAYOUTSTATS report interval in ms */ +#define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L) +#define FF_LAYOUTSTATS_MAXDEV 4 + +struct nfs4_ff_ds_version { + u32 version; + u32 minor_version; + u32 rsize; + u32 wsize; + bool tightly_coupled; +}; + +/* chained in global deviceid hlist */ +struct nfs4_ff_layout_ds { + struct nfs4_deviceid_node id_node; + u32 ds_versions_cnt; + struct nfs4_ff_ds_version *ds_versions; + struct nfs4_pnfs_ds *ds; +}; + +struct nfs4_ff_layout_ds_err { + struct list_head list; /* linked in mirror error_list */ + u64 offset; + u64 length; + int status; + enum nfs_opnum4 opnum; + nfs4_stateid stateid; + struct nfs4_deviceid deviceid; +}; + +struct nfs4_ff_io_stat { + __u64 ops_requested; + __u64 bytes_requested; + __u64 ops_completed; + __u64 bytes_completed; + __u64 bytes_not_delivered; + ktime_t total_busy_time; + ktime_t aggregate_completion_time; +}; + +struct nfs4_ff_busy_timer { + ktime_t start_time; + atomic_t n_ops; +}; + +struct nfs4_ff_layoutstat { + struct nfs4_ff_io_stat io_stat; + struct nfs4_ff_busy_timer busy_timer; +}; + +struct nfs4_ff_layout_mirror { + struct pnfs_layout_hdr *layout; + struct list_head mirrors; + u32 ds_count; + u32 efficiency; + struct nfs4_deviceid devid; + struct nfs4_ff_layout_ds *mirror_ds; + u32 fh_versions_cnt; + struct nfs_fh *fh_versions; + nfs4_stateid stateid; + const struct cred __rcu *ro_cred; + const struct cred __rcu *rw_cred; + refcount_t ref; + spinlock_t lock; + unsigned long flags; + struct nfs4_ff_layoutstat read_stat; + struct nfs4_ff_layoutstat write_stat; + ktime_t start_time; + u32 report_interval; +}; + +#define NFS4_FF_MIRROR_STAT_AVAIL (0) + +struct nfs4_ff_layout_segment { + struct pnfs_layout_segment generic_hdr; + u64 stripe_unit; + u32 flags; + u32 mirror_array_cnt; + struct nfs4_ff_layout_mirror *mirror_array[]; +}; + +struct nfs4_flexfile_layout { + struct pnfs_layout_hdr generic_hdr; + struct pnfs_ds_commit_info commit_info; + struct list_head mirrors; + struct list_head error_list; /* nfs4_ff_layout_ds_err */ + ktime_t last_report_time; /* Layoutstat report times */ +}; + +struct nfs4_flexfile_layoutreturn_args { + struct list_head errors; + struct nfs42_layoutstat_devinfo devinfo[FF_LAYOUTSTATS_MAXDEV]; + unsigned int num_errors; + unsigned int num_dev; + struct page *pages[1]; +}; + +static inline struct nfs4_flexfile_layout * +FF_LAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo) +{ + return container_of(lo, struct nfs4_flexfile_layout, generic_hdr); +} + +static inline struct nfs4_ff_layout_segment * +FF_LAYOUT_LSEG(struct pnfs_layout_segment *lseg) +{ + return container_of(lseg, + struct nfs4_ff_layout_segment, + generic_hdr); +} + +static inline struct nfs4_ff_layout_ds * +FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node) +{ + return container_of(node, struct nfs4_ff_layout_ds, id_node); +} + +static inline struct nfs4_ff_layout_mirror * +FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx) +{ + struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); + + if (idx < fls->mirror_array_cnt) + return fls->mirror_array[idx]; + return NULL; +} + +static inline struct nfs4_deviceid_node * +FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx) +{ + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx); + + if (mirror != NULL) { + struct nfs4_ff_layout_ds *mirror_ds = mirror->mirror_ds; + + if (!IS_ERR_OR_NULL(mirror_ds)) + return &mirror_ds->id_node; + } + return NULL; +} + +static inline u32 +FF_LAYOUT_MIRROR_COUNT(struct pnfs_layout_segment *lseg) +{ + return FF_LAYOUT_LSEG(lseg)->mirror_array_cnt; +} + +static inline bool +ff_layout_no_fallback_to_mds(struct pnfs_layout_segment *lseg) +{ + return FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_IO_THRU_MDS; +} + +static inline bool +ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg) +{ + return FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_READ_IO; +} + +static inline int +nfs4_ff_layout_ds_version(const struct nfs4_ff_layout_mirror *mirror) +{ + return mirror->mirror_ds->ds_versions[0].version; +} + +struct nfs4_ff_layout_ds * +nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + gfp_t gfp_flags); +void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds); +void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds); +int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo, + struct nfs4_ff_layout_mirror *mirror, u64 offset, + u64 length, int status, enum nfs_opnum4 opnum, + gfp_t gfp_flags); +void ff_layout_send_layouterror(struct pnfs_layout_segment *lseg); +int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head); +void ff_layout_free_ds_ioerr(struct list_head *head); +unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, + const struct pnfs_layout_range *range, + struct list_head *head, + unsigned int maxnum); +struct nfs_fh * +nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror); +void +nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror, + nfs4_stateid *stateid); + +struct nfs4_pnfs_ds * +nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, + struct nfs4_ff_layout_mirror *mirror, + bool fail_return); + +struct rpc_clnt * +nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror, + struct nfs_client *ds_clp, + struct inode *inode); +const struct cred *ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror, + const struct pnfs_layout_range *range, + const struct cred *mdscred); +bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg); +bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg); + +#endif /* FS_NFS_NFS4FLEXFILELAYOUT_H */ diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c new file mode 100644 index 0000000000..e028f5a0ef --- /dev/null +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device operations for the pnfs nfs4 file layout driver. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tao Peng <bergwolf@primarydata.com> + */ + +#include <linux/nfs_fs.h> +#include <linux/vmalloc.h> +#include <linux/module.h> +#include <linux/sunrpc/addr.h> + +#include "../internal.h" +#include "../nfs4session.h" +#include "flexfilelayout.h" + +#define NFSDBG_FACILITY NFSDBG_PNFS_LD + +static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO; +static unsigned int dataserver_retrans; + +static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg); + +void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds) +{ + if (!IS_ERR_OR_NULL(mirror_ds)) + nfs4_put_deviceid_node(&mirror_ds->id_node); +} + +void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) +{ + nfs4_print_deviceid(&mirror_ds->id_node.deviceid); + nfs4_pnfs_ds_put(mirror_ds->ds); + kfree(mirror_ds->ds_versions); + kfree_rcu(mirror_ds, id_node.rcu); +} + +/* Decode opaque device data and construct new_ds using it */ +struct nfs4_ff_layout_ds * +nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + gfp_t gfp_flags) +{ + struct xdr_stream stream; + struct xdr_buf buf; + struct page *scratch; + struct list_head dsaddrs; + struct nfs4_pnfs_ds_addr *da; + struct nfs4_ff_layout_ds *new_ds = NULL; + struct nfs4_ff_ds_version *ds_versions = NULL; + u32 mp_count; + u32 version_count; + __be32 *p; + int i, ret = -ENOMEM; + + /* set up xdr stream */ + scratch = alloc_page(gfp_flags); + if (!scratch) + goto out_err; + + new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags); + if (!new_ds) + goto out_scratch; + + nfs4_init_deviceid_node(&new_ds->id_node, + server, + &pdev->dev_id); + INIT_LIST_HEAD(&dsaddrs); + + xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); + xdr_set_scratch_page(&stream, scratch); + + /* multipath count */ + p = xdr_inline_decode(&stream, 4); + if (unlikely(!p)) + goto out_err_drain_dsaddrs; + mp_count = be32_to_cpup(p); + dprintk("%s: multipath ds count %d\n", __func__, mp_count); + + for (i = 0; i < mp_count; i++) { + /* multipath ds */ + da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, + &stream, gfp_flags); + if (da) + list_add_tail(&da->da_node, &dsaddrs); + } + if (list_empty(&dsaddrs)) { + dprintk("%s: no suitable DS addresses found\n", + __func__); + ret = -ENOMEDIUM; + goto out_err_drain_dsaddrs; + } + + /* version count */ + p = xdr_inline_decode(&stream, 4); + if (unlikely(!p)) + goto out_err_drain_dsaddrs; + version_count = be32_to_cpup(p); + dprintk("%s: version count %d\n", __func__, version_count); + + ds_versions = kcalloc(version_count, + sizeof(struct nfs4_ff_ds_version), + gfp_flags); + if (!ds_versions) + goto out_scratch; + + for (i = 0; i < version_count; i++) { + /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) + + * tightly_coupled(4) */ + p = xdr_inline_decode(&stream, 20); + if (unlikely(!p)) + goto out_err_drain_dsaddrs; + ds_versions[i].version = be32_to_cpup(p++); + ds_versions[i].minor_version = be32_to_cpup(p++); + ds_versions[i].rsize = nfs_io_size(be32_to_cpup(p++), + server->nfs_client->cl_proto); + ds_versions[i].wsize = nfs_io_size(be32_to_cpup(p++), + server->nfs_client->cl_proto); + ds_versions[i].tightly_coupled = be32_to_cpup(p); + + if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE) + ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE; + if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE) + ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE; + + /* + * check for valid major/minor combination. + * currently we support dataserver which talk: + * v3, v4.0, v4.1, v4.2 + */ + if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) || + (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) { + dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__, + i, ds_versions[i].version, + ds_versions[i].minor_version); + ret = -EPROTONOSUPPORT; + goto out_err_drain_dsaddrs; + } + + dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n", + __func__, i, ds_versions[i].version, + ds_versions[i].minor_version, + ds_versions[i].rsize, + ds_versions[i].wsize, + ds_versions[i].tightly_coupled); + } + + new_ds->ds_versions = ds_versions; + new_ds->ds_versions_cnt = version_count; + + new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + if (!new_ds->ds) + goto out_err_drain_dsaddrs; + + /* If DS was already in cache, free ds addrs */ + while (!list_empty(&dsaddrs)) { + da = list_first_entry(&dsaddrs, + struct nfs4_pnfs_ds_addr, + da_node); + list_del_init(&da->da_node); + kfree(da->da_remotestr); + kfree(da); + } + + __free_page(scratch); + return new_ds; + +out_err_drain_dsaddrs: + while (!list_empty(&dsaddrs)) { + da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, + da_node); + list_del_init(&da->da_node); + kfree(da->da_remotestr); + kfree(da); + } + + kfree(ds_versions); +out_scratch: + __free_page(scratch); +out_err: + kfree(new_ds); + + dprintk("%s ERROR: returning %d\n", __func__, ret); + return NULL; +} + +static void extend_ds_error(struct nfs4_ff_layout_ds_err *err, + u64 offset, u64 length) +{ + u64 end; + + end = max_t(u64, pnfs_end_offset(err->offset, err->length), + pnfs_end_offset(offset, length)); + err->offset = min_t(u64, err->offset, offset); + err->length = end - err->offset; +} + +static int +ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1, + const struct nfs4_ff_layout_ds_err *e2) +{ + int ret; + + if (e1->opnum != e2->opnum) + return e1->opnum < e2->opnum ? -1 : 1; + if (e1->status != e2->status) + return e1->status < e2->status ? -1 : 1; + ret = memcmp(e1->stateid.data, e2->stateid.data, + sizeof(e1->stateid.data)); + if (ret != 0) + return ret; + ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid)); + if (ret != 0) + return ret; + if (pnfs_end_offset(e1->offset, e1->length) < e2->offset) + return -1; + if (e1->offset > pnfs_end_offset(e2->offset, e2->length)) + return 1; + /* If ranges overlap or are contiguous, they are the same */ + return 0; +} + +static void +ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo, + struct nfs4_ff_layout_ds_err *dserr) +{ + struct nfs4_ff_layout_ds_err *err, *tmp; + struct list_head *head = &flo->error_list; + int match; + + /* Do insertion sort w/ merges */ + list_for_each_entry_safe(err, tmp, &flo->error_list, list) { + match = ff_ds_error_match(err, dserr); + if (match < 0) + continue; + if (match > 0) { + /* Add entry "dserr" _before_ entry "err" */ + head = &err->list; + break; + } + /* Entries match, so merge "err" into "dserr" */ + extend_ds_error(dserr, err->offset, err->length); + list_replace(&err->list, &dserr->list); + kfree(err); + return; + } + + list_add_tail(&dserr->list, head); +} + +int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo, + struct nfs4_ff_layout_mirror *mirror, u64 offset, + u64 length, int status, enum nfs_opnum4 opnum, + gfp_t gfp_flags) +{ + struct nfs4_ff_layout_ds_err *dserr; + + if (status == 0) + return 0; + + if (IS_ERR_OR_NULL(mirror->mirror_ds)) + return -EINVAL; + + dserr = kmalloc(sizeof(*dserr), gfp_flags); + if (!dserr) + return -ENOMEM; + + INIT_LIST_HEAD(&dserr->list); + dserr->offset = offset; + dserr->length = length; + dserr->status = status; + dserr->opnum = opnum; + nfs4_stateid_copy(&dserr->stateid, &mirror->stateid); + memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid, + NFS4_DEVICEID4_SIZE); + + spin_lock(&flo->generic_hdr.plh_inode->i_lock); + ff_layout_add_ds_error_locked(flo, dserr); + spin_unlock(&flo->generic_hdr.plh_inode->i_lock); + return 0; +} + +static const struct cred * +ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode) +{ + const struct cred *cred, __rcu **pcred; + + if (iomode == IOMODE_READ) + pcred = &mirror->ro_cred; + else + pcred = &mirror->rw_cred; + + rcu_read_lock(); + do { + cred = rcu_dereference(*pcred); + if (!cred) + break; + + cred = get_cred_rcu(cred); + } while(!cred); + rcu_read_unlock(); + return cred; +} + +struct nfs_fh * +nfs4_ff_layout_select_ds_fh(struct nfs4_ff_layout_mirror *mirror) +{ + /* FIXME: For now assume there is only 1 version available for the DS */ + return &mirror->fh_versions[0]; +} + +void +nfs4_ff_layout_select_ds_stateid(const struct nfs4_ff_layout_mirror *mirror, + nfs4_stateid *stateid) +{ + if (nfs4_ff_layout_ds_version(mirror) == 4) + nfs4_stateid_copy(stateid, &mirror->stateid); +} + +static bool +ff_layout_init_mirror_ds(struct pnfs_layout_hdr *lo, + struct nfs4_ff_layout_mirror *mirror) +{ + if (mirror == NULL) + goto outerr; + if (mirror->mirror_ds == NULL) { + struct nfs4_deviceid_node *node; + struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV); + + node = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), + &mirror->devid, lo->plh_lc_cred, + GFP_KERNEL); + if (node) + mirror_ds = FF_LAYOUT_MIRROR_DS(node); + + /* check for race with another call to this function */ + if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) && + mirror_ds != ERR_PTR(-ENODEV)) + nfs4_put_deviceid_node(node); + } + + if (IS_ERR(mirror->mirror_ds)) + goto outerr; + + return true; +outerr: + return false; +} + +/** + * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call + * @lseg: the layout segment we're operating on + * @mirror: layout mirror describing the DS to use + * @fail_return: return layout on connect failure? + * + * Try to prepare a DS connection to accept an RPC call. This involves + * selecting a mirror to use and connecting the client to it if it's not + * already connected. + * + * Since we only need a single functioning mirror to satisfy a read, we don't + * want to return the layout if there is one. For writes though, any down + * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish + * between the two cases. + * + * Returns a pointer to a connected DS object on success or NULL on failure. + */ +struct nfs4_pnfs_ds * +nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, + struct nfs4_ff_layout_mirror *mirror, + bool fail_return) +{ + struct nfs4_pnfs_ds *ds = NULL; + struct inode *ino = lseg->pls_layout->plh_inode; + struct nfs_server *s = NFS_SERVER(ino); + unsigned int max_payload; + int status; + + if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror)) + goto noconnect; + + ds = mirror->mirror_ds->ds; + if (READ_ONCE(ds->ds_clp)) + goto out; + /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */ + smp_rmb(); + + /* FIXME: For now we assume the server sent only one version of NFS + * to use for the DS. + */ + status = nfs4_pnfs_ds_connect(s, ds, &mirror->mirror_ds->id_node, + dataserver_timeo, dataserver_retrans, + mirror->mirror_ds->ds_versions[0].version, + mirror->mirror_ds->ds_versions[0].minor_version); + + /* connect success, check rsize/wsize limit */ + if (!status) { + max_payload = + nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient), + NULL); + if (mirror->mirror_ds->ds_versions[0].rsize > max_payload) + mirror->mirror_ds->ds_versions[0].rsize = max_payload; + if (mirror->mirror_ds->ds_versions[0].wsize > max_payload) + mirror->mirror_ds->ds_versions[0].wsize = max_payload; + goto out; + } +noconnect: + ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), + mirror, lseg->pls_range.offset, + lseg->pls_range.length, NFS4ERR_NXIO, + OP_ILLEGAL, GFP_NOIO); + ff_layout_send_layouterror(lseg); + if (fail_return || !ff_layout_has_available_ds(lseg)) + pnfs_error_mark_layout_for_return(ino, lseg); + ds = NULL; +out: + return ds; +} + +const struct cred * +ff_layout_get_ds_cred(struct nfs4_ff_layout_mirror *mirror, + const struct pnfs_layout_range *range, + const struct cred *mdscred) +{ + const struct cred *cred; + + if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) { + cred = ff_layout_get_mirror_cred(mirror, range->iomode); + if (!cred) + cred = get_cred(mdscred); + } else { + cred = get_cred(mdscred); + } + return cred; +} + +/** + * nfs4_ff_find_or_create_ds_client - Find or create a DS rpc client + * @mirror: pointer to the mirror + * @ds_clp: nfs_client for the DS + * @inode: pointer to inode + * + * Find or create a DS rpc client with th MDS server rpc client auth flavor + * in the nfs_client cl_ds_clients list. + */ +struct rpc_clnt * +nfs4_ff_find_or_create_ds_client(struct nfs4_ff_layout_mirror *mirror, + struct nfs_client *ds_clp, struct inode *inode) +{ + switch (mirror->mirror_ds->ds_versions[0].version) { + case 3: + /* For NFSv3 DS, flavor is set when creating DS connections */ + return ds_clp->cl_rpcclient; + case 4: + return nfs4_find_or_create_ds_client(ds_clp, inode); + default: + BUG(); + } +} + +void ff_layout_free_ds_ioerr(struct list_head *head) +{ + struct nfs4_ff_layout_ds_err *err; + + while (!list_empty(head)) { + err = list_first_entry(head, + struct nfs4_ff_layout_ds_err, + list); + list_del(&err->list); + kfree(err); + } +} + +/* called with inode i_lock held */ +int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head) +{ + struct nfs4_ff_layout_ds_err *err; + __be32 *p; + + list_for_each_entry(err, head, list) { + /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) + * + array length + deviceid(NFS4_DEVICEID4_SIZE) + * + status(4) + opnum(4) + */ + p = xdr_reserve_space(xdr, + 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); + if (unlikely(!p)) + return -ENOBUFS; + p = xdr_encode_hyper(p, err->offset); + p = xdr_encode_hyper(p, err->length); + p = xdr_encode_opaque_fixed(p, &err->stateid, + NFS4_STATEID_SIZE); + /* Encode 1 error */ + *p++ = cpu_to_be32(1); + p = xdr_encode_opaque_fixed(p, &err->deviceid, + NFS4_DEVICEID4_SIZE); + *p++ = cpu_to_be32(err->status); + *p++ = cpu_to_be32(err->opnum); + dprintk("%s: offset %llu length %llu status %d op %d\n", + __func__, err->offset, err->length, err->status, + err->opnum); + } + + return 0; +} + +static +unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, + const struct pnfs_layout_range *range, + struct list_head *head, + unsigned int maxnum) +{ + struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo); + struct inode *inode = lo->plh_inode; + struct nfs4_ff_layout_ds_err *err, *n; + unsigned int ret = 0; + + spin_lock(&inode->i_lock); + list_for_each_entry_safe(err, n, &flo->error_list, list) { + if (!pnfs_is_range_intersecting(err->offset, + pnfs_end_offset(err->offset, err->length), + range->offset, + pnfs_end_offset(range->offset, range->length))) + continue; + if (!maxnum) + break; + list_move(&err->list, head); + maxnum--; + ret++; + } + spin_unlock(&inode->i_lock); + return ret; +} + +unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, + const struct pnfs_layout_range *range, + struct list_head *head, + unsigned int maxnum) +{ + unsigned int ret; + + ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum); + /* If we're over the max, discard all remaining entries */ + if (ret == maxnum) { + LIST_HEAD(discard); + do_layout_fetch_ds_ioerr(lo, range, &discard, -1); + ff_layout_free_ds_ioerr(&discard); + } + return ret; +} + +static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_deviceid_node *devid; + u32 idx; + + for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { + mirror = FF_LAYOUT_COMP(lseg, idx); + if (mirror) { + if (!mirror->mirror_ds) + return true; + if (IS_ERR(mirror->mirror_ds)) + continue; + devid = &mirror->mirror_ds->id_node; + if (!nfs4_test_deviceid_unavailable(devid)) + return true; + } + } + + return false; +} + +static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_deviceid_node *devid; + u32 idx; + + for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { + mirror = FF_LAYOUT_COMP(lseg, idx); + if (!mirror || IS_ERR(mirror->mirror_ds)) + return false; + if (!mirror->mirror_ds) + continue; + devid = &mirror->mirror_ds->id_node; + if (nfs4_test_deviceid_unavailable(devid)) + return false; + } + + return FF_LAYOUT_MIRROR_COUNT(lseg) != 0; +} + +static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg) +{ + if (lseg->pls_range.iomode == IOMODE_READ) + return ff_read_layout_has_available_ds(lseg); + /* Note: RW layout needs all mirrors available */ + return ff_rw_layout_has_available_ds(lseg); +} + +bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg) +{ + return ff_layout_no_fallback_to_mds(lseg) || + ff_layout_has_available_ds(lseg); +} + +bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg) +{ + return lseg->pls_range.iomode == IOMODE_RW && + ff_layout_no_read_on_rw(lseg); +} + +module_param(dataserver_retrans, uint, 0644); +MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client " + "retries a request before it attempts further " + " recovery action."); +module_param(dataserver_timeo, uint, 0644); +MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the " + "NFSv4.1 client waits for a response from a " + " data server before it retries an NFS request."); |