summaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/fuse/inode.c24
-rw-r--r--fs/fuse/ioctl.c60
-rw-r--r--fs/fuse/virtio_fs.c74
4 files changed, 148 insertions, 20 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b57ce41576..f39456c65e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -935,14 +935,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
}
for (i = 0; i < ap->num_pages; i++) {
- struct page *page = ap->pages[i];
+ struct folio *folio = page_folio(ap->pages[i]);
- if (!err)
- SetPageUptodate(page);
- else
- SetPageError(page);
- unlock_page(page);
- put_page(page);
+ folio_end_read(folio, !err);
+ folio_put(folio);
}
if (ia->ff)
fuse_file_put(ia->ff, false);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 99e44ea7d8..32fe6fa72f 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -755,6 +755,8 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
struct fs_parse_result result;
struct fuse_fs_context *ctx = fsc->fs_private;
int opt;
+ kuid_t kuid;
+ kgid_t kgid;
if (fsc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
/*
@@ -799,16 +801,30 @@ static int fuse_parse_param(struct fs_context *fsc, struct fs_parameter *param)
break;
case OPT_USER_ID:
- ctx->user_id = make_kuid(fsc->user_ns, result.uint_32);
- if (!uid_valid(ctx->user_id))
+ kuid = make_kuid(fsc->user_ns, result.uint_32);
+ if (!uid_valid(kuid))
return invalfc(fsc, "Invalid user_id");
+ /*
+ * The requested uid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kuid_has_mapping(fsc->user_ns, kuid))
+ return invalfc(fsc, "Invalid user_id");
+ ctx->user_id = kuid;
ctx->user_id_present = true;
break;
case OPT_GROUP_ID:
- ctx->group_id = make_kgid(fsc->user_ns, result.uint_32);
- if (!gid_valid(ctx->group_id))
+ kgid = make_kgid(fsc->user_ns, result.uint_32);;
+ if (!gid_valid(kgid))
+ return invalfc(fsc, "Invalid group_id");
+ /*
+ * The requested gid must be representable in the
+ * filesystem's idmapping.
+ */
+ if (!kgid_has_mapping(fsc->user_ns, kgid))
return invalfc(fsc, "Invalid group_id");
+ ctx->group_id = kgid;
ctx->group_id_present = true;
break;
diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c
index 726640fa43..572ce8a82c 100644
--- a/fs/fuse/ioctl.c
+++ b/fs/fuse/ioctl.c
@@ -8,6 +8,7 @@
#include <linux/uio.h>
#include <linux/compat.h>
#include <linux/fileattr.h>
+#include <linux/fsverity.h>
static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args,
struct fuse_ioctl_out *outarg)
@@ -117,6 +118,53 @@ static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
return 0;
}
+/* For fs-verity, determine iov lengths from input */
+static int fuse_setup_measure_verity(unsigned long arg, struct iovec *iov)
+{
+ __u16 digest_size;
+ struct fsverity_digest __user *uarg = (void __user *)arg;
+
+ if (copy_from_user(&digest_size, &uarg->digest_size, sizeof(digest_size)))
+ return -EFAULT;
+
+ if (digest_size > SIZE_MAX - sizeof(struct fsverity_digest))
+ return -EINVAL;
+
+ iov->iov_len = sizeof(struct fsverity_digest) + digest_size;
+
+ return 0;
+}
+
+static int fuse_setup_enable_verity(unsigned long arg, struct iovec *iov,
+ unsigned int *in_iovs)
+{
+ struct fsverity_enable_arg enable;
+ struct fsverity_enable_arg __user *uarg = (void __user *)arg;
+ const __u32 max_buffer_len = FUSE_MAX_MAX_PAGES * PAGE_SIZE;
+
+ if (copy_from_user(&enable, uarg, sizeof(enable)))
+ return -EFAULT;
+
+ if (enable.salt_size > max_buffer_len || enable.sig_size > max_buffer_len)
+ return -ENOMEM;
+
+ if (enable.salt_size > 0) {
+ iov++;
+ (*in_iovs)++;
+
+ iov->iov_base = u64_to_user_ptr(enable.salt_ptr);
+ iov->iov_len = enable.salt_size;
+ }
+
+ if (enable.sig_size > 0) {
+ iov++;
+ (*in_iovs)++;
+
+ iov->iov_base = u64_to_user_ptr(enable.sig_ptr);
+ iov->iov_len = enable.sig_size;
+ }
+ return 0;
+}
/*
* For ioctls, there is no generic way to determine how much memory
@@ -227,6 +275,18 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
out_iov = iov;
out_iovs = 1;
}
+
+ err = 0;
+ switch (cmd) {
+ case FS_IOC_MEASURE_VERITY:
+ err = fuse_setup_measure_verity(arg, iov);
+ break;
+ case FS_IOC_ENABLE_VERITY:
+ err = fuse_setup_enable_verity(arg, iov, &in_iovs);
+ break;
+ }
+ if (err)
+ goto out;
}
retry:
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index bb3e941b95..1a52a51b6b 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -7,6 +7,8 @@
#include <linux/fs.h>
#include <linux/dax.h>
#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/group_cpus.h>
#include <linux/pfn_t.h>
#include <linux/memremap.h>
#include <linux/module.h>
@@ -67,6 +69,8 @@ struct virtio_fs {
unsigned int num_request_queues; /* number of request queues */
struct dax_device *dax_dev;
+ unsigned int *mq_map; /* index = cpu id, value = request vq id */
+
/* DAX memory window where file contents are mapped */
void *window_kaddr;
phys_addr_t window_phys_addr;
@@ -185,6 +189,7 @@ static void virtio_fs_ktype_release(struct kobject *kobj)
{
struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj);
+ kfree(vfs->mq_map);
kfree(vfs->vqs);
kfree(vfs);
}
@@ -706,6 +711,44 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
}
}
+static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
+{
+ const struct cpumask *mask, *masks;
+ unsigned int q, cpu;
+
+ /* First attempt to map using existing transport layer affinities
+ * e.g. PCIe MSI-X
+ */
+ if (!vdev->config->get_vq_affinity)
+ goto fallback;
+
+ for (q = 0; q < fs->num_request_queues; q++) {
+ mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ fs->mq_map[cpu] = q;
+ }
+
+ return;
+fallback:
+ /* Attempt to map evenly in groups over the CPUs */
+ masks = group_cpus_evenly(fs->num_request_queues);
+ /* If even this fails we default to all CPUs use queue zero */
+ if (!masks) {
+ for_each_possible_cpu(cpu)
+ fs->mq_map[cpu] = 0;
+ return;
+ }
+
+ for (q = 0; q < fs->num_request_queues; q++) {
+ for_each_cpu(cpu, &masks[q])
+ fs->mq_map[cpu] = q;
+ }
+ kfree(masks);
+}
+
/* Virtqueue interrupt handler */
static void virtio_fs_vq_done(struct virtqueue *vq)
{
@@ -742,6 +785,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
{
struct virtqueue **vqs;
vq_callback_t **callbacks;
+ /* Specify pre_vectors to ensure that the queues before the
+ * request queues (e.g. hiprio) don't claim any of the CPUs in
+ * the multi-queue mapping and interrupt affinities
+ */
+ struct irq_affinity desc = { .pre_vectors = VQ_REQUEST };
const char **names;
unsigned int i;
int ret = 0;
@@ -751,6 +799,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
if (fs->num_request_queues == 0)
return -EINVAL;
+ /* Truncate nr of request queues to nr_cpu_id */
+ fs->num_request_queues = min_t(unsigned int, fs->num_request_queues,
+ nr_cpu_ids);
fs->nvqs = VQ_REQUEST + fs->num_request_queues;
fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
if (!fs->vqs)
@@ -760,7 +811,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
GFP_KERNEL);
names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
- if (!vqs || !callbacks || !names) {
+ fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL,
+ dev_to_node(&vdev->dev));
+ if (!vqs || !callbacks || !names || !fs->mq_map) {
ret = -ENOMEM;
goto out;
}
@@ -780,7 +833,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
names[i] = fs->vqs[i].name;
}
- ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
+ ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc);
if (ret < 0)
goto out;
@@ -792,8 +845,10 @@ out:
kfree(names);
kfree(callbacks);
kfree(vqs);
- if (ret)
+ if (ret) {
kfree(fs->vqs);
+ kfree(fs->mq_map);
+ }
return ret;
}
@@ -939,7 +994,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
if (ret < 0)
goto out;
- /* TODO vq affinity */
+ virtio_fs_map_queues(vdev, fs);
ret = virtio_fs_setup_dax(vdev, fs);
if (ret < 0)
@@ -1023,7 +1078,6 @@ static const unsigned int feature_table[] = {};
static struct virtio_driver virtio_fs_driver = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.feature_table = feature_table,
.feature_table_size = ARRAY_SIZE(feature_table),
@@ -1288,7 +1342,7 @@ out:
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
{
- unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
+ unsigned int queue_id;
struct virtio_fs *fs;
struct fuse_req *req;
struct virtio_fs_vq *fsvq;
@@ -1302,11 +1356,13 @@ __releases(fiq->lock)
spin_unlock(&fiq->lock);
fs = fiq->priv;
+ queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()];
- pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
- __func__, req->in.h.opcode, req->in.h.unique,
+ pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n",
+ __func__, req->in.h.opcode, req->in.h.unique,
req->in.h.nodeid, req->in.h.len,
- fuse_len_args(req->args->out_numargs, req->args->out_args));
+ fuse_len_args(req->args->out_numargs, req->args->out_args),
+ queue_id);
fsvq = &fs->vqs[queue_id];
ret = virtio_fs_enqueue_req(fsvq, req, false);