summaryrefslogtreecommitdiffstats
path: root/fs/verity
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--fs/verity/Kconfig60
-rw-r--r--fs/verity/Makefile11
-rw-r--r--fs/verity/enable.c419
-rw-r--r--fs/verity/fsverity_private.h154
-rw-r--r--fs/verity/hash_algs.c328
-rw-r--r--fs/verity/init.c61
-rw-r--r--fs/verity/measure.c102
-rw-r--r--fs/verity/open.c404
-rw-r--r--fs/verity/read_metadata.c194
-rw-r--r--fs/verity/signature.c168
-rw-r--r--fs/verity/verify.c291
11 files changed, 2192 insertions, 0 deletions
diff --git a/fs/verity/Kconfig b/fs/verity/Kconfig
new file mode 100644
index 000000000..aad1f1d99
--- /dev/null
+++ b/fs/verity/Kconfig
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config FS_VERITY
+ bool "FS Verity (read-only file-based authenticity protection)"
+ select CRYPTO
+ select CRYPTO_HASH_INFO
+ # SHA-256 is implied as it's intended to be the default hash algorithm.
+ # To avoid bloat, other wanted algorithms must be selected explicitly.
+ # Note that CRYPTO_SHA256 denotes the generic C implementation, but
+ # some architectures provided optimized implementations of the same
+ # algorithm that may be used instead. In this case, CRYPTO_SHA256 may
+ # be omitted even if SHA-256 is being used.
+ imply CRYPTO_SHA256
+ help
+ This option enables fs-verity. fs-verity is the dm-verity
+ mechanism implemented at the file level. On supported
+ filesystems (currently ext4, f2fs, and btrfs), userspace can
+ use an ioctl to enable verity for a file, which causes the
+ filesystem to build a Merkle tree for the file. The filesystem
+ will then transparently verify any data read from the file
+ against the Merkle tree. The file is also made read-only.
+
+ This serves as an integrity check, but the availability of the
+ Merkle tree root hash also allows efficiently supporting
+ various use cases where normally the whole file would need to
+ be hashed at once, such as: (a) auditing (logging the file's
+ hash), or (b) authenticity verification (comparing the hash
+ against a known good value, e.g. from a digital signature).
+
+ fs-verity is especially useful on large files where not all
+ the contents may actually be needed. Also, fs-verity verifies
+ data each time it is paged back in, which provides better
+ protection against malicious disks vs. an ahead-of-time hash.
+
+ If unsure, say N.
+
+config FS_VERITY_DEBUG
+ bool "FS Verity debugging"
+ depends on FS_VERITY
+ help
+ Enable debugging messages related to fs-verity by default.
+
+ Say N unless you are an fs-verity developer.
+
+config FS_VERITY_BUILTIN_SIGNATURES
+ bool "FS Verity builtin signature support"
+ depends on FS_VERITY
+ select SYSTEM_DATA_VERIFICATION
+ help
+ Support verifying signatures of verity files against the X.509
+ certificates that have been loaded into the ".fs-verity"
+ kernel keyring.
+
+ This is meant as a relatively simple mechanism that can be
+ used to provide an authenticity guarantee for verity files, as
+ an alternative to IMA appraisal. Userspace programs still
+ need to check that the verity bit is set in order to get an
+ authenticity guarantee.
+
+ If unsure, say N.
diff --git a/fs/verity/Makefile b/fs/verity/Makefile
new file mode 100644
index 000000000..435559a4f
--- /dev/null
+++ b/fs/verity/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_FS_VERITY) += enable.o \
+ hash_algs.o \
+ init.o \
+ measure.o \
+ open.o \
+ read_metadata.o \
+ verify.o
+
+obj-$(CONFIG_FS_VERITY_BUILTIN_SIGNATURES) += signature.o
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
new file mode 100644
index 000000000..400c264bf
--- /dev/null
+++ b/fs/verity/enable.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ioctl to enable verity on a file
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <crypto/hash.h>
+#include <linux/backing-dev.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+/*
+ * Read a file data page for Merkle tree construction. Do aggressive readahead,
+ * since we're sequentially reading the entire file.
+ */
+static struct page *read_file_data_page(struct file *file, pgoff_t index,
+ struct file_ra_state *ra,
+ unsigned long remaining_pages)
+{
+ DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, index);
+ struct folio *folio;
+
+ folio = __filemap_get_folio(ractl.mapping, index, FGP_ACCESSED, 0);
+ if (!folio || !folio_test_uptodate(folio)) {
+ if (folio)
+ folio_put(folio);
+ else
+ page_cache_sync_ra(&ractl, remaining_pages);
+ folio = read_cache_folio(ractl.mapping, index, NULL, file);
+ if (IS_ERR(folio))
+ return &folio->page;
+ }
+ if (folio_test_readahead(folio))
+ page_cache_async_ra(&ractl, folio, remaining_pages);
+ return folio_file_page(folio, index);
+}
+
+static int build_merkle_tree_level(struct file *filp, unsigned int level,
+ u64 num_blocks_to_hash,
+ const struct merkle_tree_params *params,
+ u8 *pending_hashes,
+ struct ahash_request *req)
+{
+ struct inode *inode = file_inode(filp);
+ const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ struct file_ra_state ra = { 0 };
+ unsigned int pending_size = 0;
+ u64 dst_block_num;
+ u64 i;
+ int err;
+
+ if (WARN_ON(params->block_size != PAGE_SIZE)) /* checked earlier too */
+ return -EINVAL;
+
+ if (level < params->num_levels) {
+ dst_block_num = params->level_start[level];
+ } else {
+ if (WARN_ON(num_blocks_to_hash != 1))
+ return -EINVAL;
+ dst_block_num = 0; /* unused */
+ }
+
+ file_ra_state_init(&ra, filp->f_mapping);
+
+ for (i = 0; i < num_blocks_to_hash; i++) {
+ struct page *src_page;
+
+ if ((pgoff_t)i % 10000 == 0 || i + 1 == num_blocks_to_hash)
+ pr_debug("Hashing block %llu of %llu for level %u\n",
+ i + 1, num_blocks_to_hash, level);
+
+ if (level == 0) {
+ /* Leaf: hashing a data block */
+ src_page = read_file_data_page(filp, i, &ra,
+ num_blocks_to_hash - i);
+ if (IS_ERR(src_page)) {
+ err = PTR_ERR(src_page);
+ fsverity_err(inode,
+ "Error %d reading data page %llu",
+ err, i);
+ return err;
+ }
+ } else {
+ unsigned long num_ra_pages =
+ min_t(unsigned long, num_blocks_to_hash - i,
+ inode->i_sb->s_bdi->io_pages);
+
+ /* Non-leaf: hashing hash block from level below */
+ src_page = vops->read_merkle_tree_page(inode,
+ params->level_start[level - 1] + i,
+ num_ra_pages);
+ if (IS_ERR(src_page)) {
+ err = PTR_ERR(src_page);
+ fsverity_err(inode,
+ "Error %d reading Merkle tree page %llu",
+ err, params->level_start[level - 1] + i);
+ return err;
+ }
+ }
+
+ err = fsverity_hash_page(params, inode, req, src_page,
+ &pending_hashes[pending_size]);
+ put_page(src_page);
+ if (err)
+ return err;
+ pending_size += params->digest_size;
+
+ if (level == params->num_levels) /* Root hash? */
+ return 0;
+
+ if (pending_size + params->digest_size > params->block_size ||
+ i + 1 == num_blocks_to_hash) {
+ /* Flush the pending hash block */
+ memset(&pending_hashes[pending_size], 0,
+ params->block_size - pending_size);
+ err = vops->write_merkle_tree_block(inode,
+ pending_hashes,
+ dst_block_num,
+ params->log_blocksize);
+ if (err) {
+ fsverity_err(inode,
+ "Error %d writing Merkle tree block %llu",
+ err, dst_block_num);
+ return err;
+ }
+ dst_block_num++;
+ pending_size = 0;
+ }
+
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ cond_resched();
+ }
+ return 0;
+}
+
+/*
+ * Build the Merkle tree for the given file using the given parameters, and
+ * return the root hash in @root_hash.
+ *
+ * The tree is written to a filesystem-specific location as determined by the
+ * ->write_merkle_tree_block() method. However, the blocks that comprise the
+ * tree are the same for all filesystems.
+ */
+static int build_merkle_tree(struct file *filp,
+ const struct merkle_tree_params *params,
+ u8 *root_hash)
+{
+ struct inode *inode = file_inode(filp);
+ u8 *pending_hashes;
+ struct ahash_request *req;
+ u64 blocks;
+ unsigned int level;
+ int err = -ENOMEM;
+
+ if (inode->i_size == 0) {
+ /* Empty file is a special case; root hash is all 0's */
+ memset(root_hash, 0, params->digest_size);
+ return 0;
+ }
+
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(params->hash_alg, GFP_KERNEL);
+
+ pending_hashes = kmalloc(params->block_size, GFP_KERNEL);
+ if (!pending_hashes)
+ goto out;
+
+ /*
+ * Build each level of the Merkle tree, starting at the leaf level
+ * (level 0) and ascending to the root node (level 'num_levels - 1').
+ * Then at the end (level 'num_levels'), calculate the root hash.
+ */
+ blocks = ((u64)inode->i_size + params->block_size - 1) >>
+ params->log_blocksize;
+ for (level = 0; level <= params->num_levels; level++) {
+ err = build_merkle_tree_level(filp, level, blocks, params,
+ pending_hashes, req);
+ if (err)
+ goto out;
+ blocks = (blocks + params->hashes_per_block - 1) >>
+ params->log_arity;
+ }
+ memcpy(root_hash, pending_hashes, params->digest_size);
+ err = 0;
+out:
+ kfree(pending_hashes);
+ fsverity_free_hash_request(params->hash_alg, req);
+ return err;
+}
+
+static int enable_verity(struct file *filp,
+ const struct fsverity_enable_arg *arg)
+{
+ struct inode *inode = file_inode(filp);
+ const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ struct merkle_tree_params params = { };
+ struct fsverity_descriptor *desc;
+ size_t desc_size = struct_size(desc, signature, arg->sig_size);
+ struct fsverity_info *vi;
+ int err;
+
+ /* Start initializing the fsverity_descriptor */
+ desc = kzalloc(desc_size, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->version = 1;
+ desc->hash_algorithm = arg->hash_algorithm;
+ desc->log_blocksize = ilog2(arg->block_size);
+
+ /* Get the salt if the user provided one */
+ if (arg->salt_size &&
+ copy_from_user(desc->salt, u64_to_user_ptr(arg->salt_ptr),
+ arg->salt_size)) {
+ err = -EFAULT;
+ goto out;
+ }
+ desc->salt_size = arg->salt_size;
+
+ /* Get the signature if the user provided one */
+ if (arg->sig_size &&
+ copy_from_user(desc->signature, u64_to_user_ptr(arg->sig_ptr),
+ arg->sig_size)) {
+ err = -EFAULT;
+ goto out;
+ }
+ desc->sig_size = cpu_to_le32(arg->sig_size);
+
+ desc->data_size = cpu_to_le64(inode->i_size);
+
+ /* Prepare the Merkle tree parameters */
+ err = fsverity_init_merkle_tree_params(&params, inode,
+ arg->hash_algorithm,
+ desc->log_blocksize,
+ desc->salt, desc->salt_size);
+ if (err)
+ goto out;
+
+ /*
+ * Start enabling verity on this file, serialized by the inode lock.
+ * Fail if verity is already enabled or is already being enabled.
+ */
+ inode_lock(inode);
+ if (IS_VERITY(inode))
+ err = -EEXIST;
+ else
+ err = vops->begin_enable_verity(filp);
+ inode_unlock(inode);
+ if (err)
+ goto out;
+
+ /*
+ * Build the Merkle tree. Don't hold the inode lock during this, since
+ * on huge files this may take a very long time and we don't want to
+ * force unrelated syscalls like chown() to block forever. We don't
+ * need the inode lock here because deny_write_access() already prevents
+ * the file from being written to or truncated, and we still serialize
+ * ->begin_enable_verity() and ->end_enable_verity() using the inode
+ * lock and only allow one process to be here at a time on a given file.
+ */
+ pr_debug("Building Merkle tree...\n");
+ BUILD_BUG_ON(sizeof(desc->root_hash) < FS_VERITY_MAX_DIGEST_SIZE);
+ err = build_merkle_tree(filp, &params, desc->root_hash);
+ if (err) {
+ fsverity_err(inode, "Error %d building Merkle tree", err);
+ goto rollback;
+ }
+ pr_debug("Done building Merkle tree. Root hash is %s:%*phN\n",
+ params.hash_alg->name, params.digest_size, desc->root_hash);
+
+ /*
+ * Create the fsverity_info. Don't bother trying to save work by
+ * reusing the merkle_tree_params from above. Instead, just create the
+ * fsverity_info from the fsverity_descriptor as if it were just loaded
+ * from disk. This is simpler, and it serves as an extra check that the
+ * metadata we're writing is valid before actually enabling verity.
+ */
+ vi = fsverity_create_info(inode, desc);
+ if (IS_ERR(vi)) {
+ err = PTR_ERR(vi);
+ goto rollback;
+ }
+
+ if (arg->sig_size)
+ pr_debug("Storing a %u-byte PKCS#7 signature alongside the file\n",
+ arg->sig_size);
+
+ /*
+ * Tell the filesystem to finish enabling verity on the file.
+ * Serialized with ->begin_enable_verity() by the inode lock.
+ */
+ inode_lock(inode);
+ err = vops->end_enable_verity(filp, desc, desc_size, params.tree_size);
+ inode_unlock(inode);
+ if (err) {
+ fsverity_err(inode, "%ps() failed with err %d",
+ vops->end_enable_verity, err);
+ fsverity_free_info(vi);
+ } else if (WARN_ON(!IS_VERITY(inode))) {
+ err = -EINVAL;
+ fsverity_free_info(vi);
+ } else {
+ /* Successfully enabled verity */
+
+ /*
+ * Readers can start using ->i_verity_info immediately, so it
+ * can't be rolled back once set. So don't set it until just
+ * after the filesystem has successfully enabled verity.
+ */
+ fsverity_set_info(inode, vi);
+ }
+out:
+ kfree(params.hashstate);
+ kfree(desc);
+ return err;
+
+rollback:
+ inode_lock(inode);
+ (void)vops->end_enable_verity(filp, NULL, 0, params.tree_size);
+ inode_unlock(inode);
+ goto out;
+}
+
+/**
+ * fsverity_ioctl_enable() - enable verity on a file
+ * @filp: file to enable verity on
+ * @uarg: user pointer to fsverity_enable_arg
+ *
+ * Enable fs-verity on a file. See the "FS_IOC_ENABLE_VERITY" section of
+ * Documentation/filesystems/fsverity.rst for the documentation.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_ioctl_enable(struct file *filp, const void __user *uarg)
+{
+ struct inode *inode = file_inode(filp);
+ struct fsverity_enable_arg arg;
+ int err;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.version != 1)
+ return -EINVAL;
+
+ if (arg.__reserved1 ||
+ memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
+ return -EINVAL;
+
+ if (arg.block_size != PAGE_SIZE)
+ return -EINVAL;
+
+ if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt))
+ return -EMSGSIZE;
+
+ if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
+ return -EMSGSIZE;
+
+ /*
+ * Require a regular file with write access. But the actual fd must
+ * still be readonly so that we can lock out all writers. This is
+ * needed to guarantee that no writable fds exist to the file once it
+ * has verity enabled, and to stabilize the data being hashed.
+ */
+
+ err = file_permission(filp, MAY_WRITE);
+ if (err)
+ return err;
+
+ if (IS_APPEND(inode))
+ return -EPERM;
+
+ if (S_ISDIR(inode->i_mode))
+ return -EISDIR;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ err = mnt_want_write_file(filp);
+ if (err) /* -EROFS */
+ return err;
+
+ err = deny_write_access(filp);
+ if (err) /* -ETXTBSY */
+ goto out_drop_write;
+
+ err = enable_verity(filp, &arg);
+
+ /*
+ * We no longer drop the inode's pagecache after enabling verity. This
+ * used to be done to try to avoid a race condition where pages could be
+ * evicted after being used in the Merkle tree construction, then
+ * re-instantiated by a concurrent read. Such pages are unverified, and
+ * the backing storage could have filled them with different content, so
+ * they shouldn't be used to fulfill reads once verity is enabled.
+ *
+ * But, dropping the pagecache has a big performance impact, and it
+ * doesn't fully solve the race condition anyway. So for those reasons,
+ * and also because this race condition isn't very important relatively
+ * speaking (especially for small-ish files, where the chance of a page
+ * being used, evicted, *and* re-instantiated all while enabling verity
+ * is quite small), we no longer drop the inode's pagecache.
+ */
+
+ /*
+ * allow_write_access() is needed to pair with deny_write_access().
+ * Regardless, the filesystem won't allow writing to verity files.
+ */
+ allow_write_access(filp);
+out_drop_write:
+ mnt_drop_write_file(filp);
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsverity_ioctl_enable);
diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h
new file mode 100644
index 000000000..dbe1ce5b4
--- /dev/null
+++ b/fs/verity/fsverity_private.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * fs-verity: read-only file-based authenticity protection
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef _FSVERITY_PRIVATE_H
+#define _FSVERITY_PRIVATE_H
+
+#ifdef CONFIG_FS_VERITY_DEBUG
+#define DEBUG
+#endif
+
+#define pr_fmt(fmt) "fs-verity: " fmt
+
+#include <linux/fsverity.h>
+#include <linux/mempool.h>
+
+struct ahash_request;
+
+/*
+ * Implementation limit: maximum depth of the Merkle tree. For now 8 is plenty;
+ * it's enough for over U64_MAX bytes of data using SHA-256 and 4K blocks.
+ */
+#define FS_VERITY_MAX_LEVELS 8
+
+/* A hash algorithm supported by fs-verity */
+struct fsverity_hash_alg {
+ struct crypto_ahash *tfm; /* hash tfm, allocated on demand */
+ const char *name; /* crypto API name, e.g. sha256 */
+ unsigned int digest_size; /* digest size in bytes, e.g. 32 for SHA-256 */
+ unsigned int block_size; /* block size in bytes, e.g. 64 for SHA-256 */
+ mempool_t req_pool; /* mempool with a preallocated hash request */
+};
+
+/* Merkle tree parameters: hash algorithm, initial hash state, and topology */
+struct merkle_tree_params {
+ struct fsverity_hash_alg *hash_alg; /* the hash algorithm */
+ const u8 *hashstate; /* initial hash state or NULL */
+ unsigned int digest_size; /* same as hash_alg->digest_size */
+ unsigned int block_size; /* size of data and tree blocks */
+ unsigned int hashes_per_block; /* number of hashes per tree block */
+ unsigned int log_blocksize; /* log2(block_size) */
+ unsigned int log_arity; /* log2(hashes_per_block) */
+ unsigned int num_levels; /* number of levels in Merkle tree */
+ u64 tree_size; /* Merkle tree size in bytes */
+ unsigned long level0_blocks; /* number of blocks in tree level 0 */
+
+ /*
+ * Starting block index for each tree level, ordered from leaf level (0)
+ * to root level ('num_levels - 1')
+ */
+ u64 level_start[FS_VERITY_MAX_LEVELS];
+};
+
+/*
+ * fsverity_info - cached verity metadata for an inode
+ *
+ * When a verity file is first opened, an instance of this struct is allocated
+ * and stored in ->i_verity_info; it remains until the inode is evicted. It
+ * caches information about the Merkle tree that's needed to efficiently verify
+ * data read from the file. It also caches the file digest. The Merkle tree
+ * pages themselves are not cached here, but the filesystem may cache them.
+ */
+struct fsverity_info {
+ struct merkle_tree_params tree_params;
+ u8 root_hash[FS_VERITY_MAX_DIGEST_SIZE];
+ u8 file_digest[FS_VERITY_MAX_DIGEST_SIZE];
+ const struct inode *inode;
+};
+
+
+#define FS_VERITY_MAX_SIGNATURE_SIZE (FS_VERITY_MAX_DESCRIPTOR_SIZE - \
+ sizeof(struct fsverity_descriptor))
+
+/* hash_algs.c */
+
+extern struct fsverity_hash_alg fsverity_hash_algs[];
+
+struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
+ unsigned int num);
+struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
+ gfp_t gfp_flags);
+void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
+ struct ahash_request *req);
+const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
+ const u8 *salt, size_t salt_size);
+int fsverity_hash_page(const struct merkle_tree_params *params,
+ const struct inode *inode,
+ struct ahash_request *req, struct page *page, u8 *out);
+int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
+ const void *data, size_t size, u8 *out);
+void __init fsverity_check_hash_algs(void);
+
+/* init.c */
+
+void __printf(3, 4) __cold
+fsverity_msg(const struct inode *inode, const char *level,
+ const char *fmt, ...);
+
+#define fsverity_warn(inode, fmt, ...) \
+ fsverity_msg((inode), KERN_WARNING, fmt, ##__VA_ARGS__)
+#define fsverity_err(inode, fmt, ...) \
+ fsverity_msg((inode), KERN_ERR, fmt, ##__VA_ARGS__)
+
+/* open.c */
+
+int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
+ const struct inode *inode,
+ unsigned int hash_algorithm,
+ unsigned int log_blocksize,
+ const u8 *salt, size_t salt_size);
+
+struct fsverity_info *fsverity_create_info(const struct inode *inode,
+ struct fsverity_descriptor *desc);
+
+void fsverity_set_info(struct inode *inode, struct fsverity_info *vi);
+
+void fsverity_free_info(struct fsverity_info *vi);
+
+int fsverity_get_descriptor(struct inode *inode,
+ struct fsverity_descriptor **desc_ret);
+
+int __init fsverity_init_info_cache(void);
+void __init fsverity_exit_info_cache(void);
+
+/* signature.c */
+
+#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+int fsverity_verify_signature(const struct fsverity_info *vi,
+ const u8 *signature, size_t sig_size);
+
+int __init fsverity_init_signature(void);
+#else /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
+static inline int
+fsverity_verify_signature(const struct fsverity_info *vi,
+ const u8 *signature, size_t sig_size)
+{
+ return 0;
+}
+
+static inline int fsverity_init_signature(void)
+{
+ return 0;
+}
+#endif /* !CONFIG_FS_VERITY_BUILTIN_SIGNATURES */
+
+/* verify.c */
+
+int __init fsverity_init_workqueue(void);
+void __init fsverity_exit_workqueue(void);
+
+#endif /* _FSVERITY_PRIVATE_H */
diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c
new file mode 100644
index 000000000..71d0fccb6
--- /dev/null
+++ b/fs/verity/hash_algs.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs-verity hash algorithms
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <crypto/hash.h>
+#include <linux/scatterlist.h>
+
+/* The hash algorithms supported by fs-verity */
+struct fsverity_hash_alg fsverity_hash_algs[] = {
+ [FS_VERITY_HASH_ALG_SHA256] = {
+ .name = "sha256",
+ .digest_size = SHA256_DIGEST_SIZE,
+ .block_size = SHA256_BLOCK_SIZE,
+ },
+ [FS_VERITY_HASH_ALG_SHA512] = {
+ .name = "sha512",
+ .digest_size = SHA512_DIGEST_SIZE,
+ .block_size = SHA512_BLOCK_SIZE,
+ },
+};
+
+static DEFINE_MUTEX(fsverity_hash_alg_init_mutex);
+
+/**
+ * fsverity_get_hash_alg() - validate and prepare a hash algorithm
+ * @inode: optional inode for logging purposes
+ * @num: the hash algorithm number
+ *
+ * Get the struct fsverity_hash_alg for the given hash algorithm number, and
+ * ensure it has a hash transform ready to go. The hash transforms are
+ * allocated on-demand so that we don't waste resources unnecessarily, and
+ * because the crypto modules may be initialized later than fs/verity/.
+ *
+ * Return: pointer to the hash alg on success, else an ERR_PTR()
+ */
+struct fsverity_hash_alg *fsverity_get_hash_alg(const struct inode *inode,
+ unsigned int num)
+{
+ struct fsverity_hash_alg *alg;
+ struct crypto_ahash *tfm;
+ int err;
+
+ if (num >= ARRAY_SIZE(fsverity_hash_algs) ||
+ !fsverity_hash_algs[num].name) {
+ fsverity_warn(inode, "Unknown hash algorithm number: %u", num);
+ return ERR_PTR(-EINVAL);
+ }
+ alg = &fsverity_hash_algs[num];
+
+ /* pairs with smp_store_release() below */
+ if (likely(smp_load_acquire(&alg->tfm) != NULL))
+ return alg;
+
+ mutex_lock(&fsverity_hash_alg_init_mutex);
+
+ if (alg->tfm != NULL)
+ goto out_unlock;
+
+ /*
+ * Using the shash API would make things a bit simpler, but the ahash
+ * API is preferable as it allows the use of crypto accelerators.
+ */
+ tfm = crypto_alloc_ahash(alg->name, 0, 0);
+ if (IS_ERR(tfm)) {
+ if (PTR_ERR(tfm) == -ENOENT) {
+ fsverity_warn(inode,
+ "Missing crypto API support for hash algorithm \"%s\"",
+ alg->name);
+ alg = ERR_PTR(-ENOPKG);
+ goto out_unlock;
+ }
+ fsverity_err(inode,
+ "Error allocating hash algorithm \"%s\": %ld",
+ alg->name, PTR_ERR(tfm));
+ alg = ERR_CAST(tfm);
+ goto out_unlock;
+ }
+
+ err = -EINVAL;
+ if (WARN_ON(alg->digest_size != crypto_ahash_digestsize(tfm)))
+ goto err_free_tfm;
+ if (WARN_ON(alg->block_size != crypto_ahash_blocksize(tfm)))
+ goto err_free_tfm;
+
+ err = mempool_init_kmalloc_pool(&alg->req_pool, 1,
+ sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(tfm));
+ if (err)
+ goto err_free_tfm;
+
+ pr_info("%s using implementation \"%s\"\n",
+ alg->name, crypto_ahash_driver_name(tfm));
+
+ /* pairs with smp_load_acquire() above */
+ smp_store_release(&alg->tfm, tfm);
+ goto out_unlock;
+
+err_free_tfm:
+ crypto_free_ahash(tfm);
+ alg = ERR_PTR(err);
+out_unlock:
+ mutex_unlock(&fsverity_hash_alg_init_mutex);
+ return alg;
+}
+
+/**
+ * fsverity_alloc_hash_request() - allocate a hash request object
+ * @alg: the hash algorithm for which to allocate the request
+ * @gfp_flags: memory allocation flags
+ *
+ * This is mempool-backed, so this never fails if __GFP_DIRECT_RECLAIM is set in
+ * @gfp_flags. However, in that case this might need to wait for all
+ * previously-allocated requests to be freed. So to avoid deadlocks, callers
+ * must never need multiple requests at a time to make forward progress.
+ *
+ * Return: the request object on success; NULL on failure (but see above)
+ */
+struct ahash_request *fsverity_alloc_hash_request(struct fsverity_hash_alg *alg,
+ gfp_t gfp_flags)
+{
+ struct ahash_request *req = mempool_alloc(&alg->req_pool, gfp_flags);
+
+ if (req)
+ ahash_request_set_tfm(req, alg->tfm);
+ return req;
+}
+
+/**
+ * fsverity_free_hash_request() - free a hash request object
+ * @alg: the hash algorithm
+ * @req: the hash request object to free
+ */
+void fsverity_free_hash_request(struct fsverity_hash_alg *alg,
+ struct ahash_request *req)
+{
+ if (req) {
+ ahash_request_zero(req);
+ mempool_free(req, &alg->req_pool);
+ }
+}
+
+/**
+ * fsverity_prepare_hash_state() - precompute the initial hash state
+ * @alg: hash algorithm
+ * @salt: a salt which is to be prepended to all data to be hashed
+ * @salt_size: salt size in bytes, possibly 0
+ *
+ * Return: NULL if the salt is empty, otherwise the kmalloc()'ed precomputed
+ * initial hash state on success or an ERR_PTR() on failure.
+ */
+const u8 *fsverity_prepare_hash_state(struct fsverity_hash_alg *alg,
+ const u8 *salt, size_t salt_size)
+{
+ u8 *hashstate = NULL;
+ struct ahash_request *req = NULL;
+ u8 *padded_salt = NULL;
+ size_t padded_salt_size;
+ struct scatterlist sg;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ if (salt_size == 0)
+ return NULL;
+
+ hashstate = kmalloc(crypto_ahash_statesize(alg->tfm), GFP_KERNEL);
+ if (!hashstate)
+ return ERR_PTR(-ENOMEM);
+
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
+
+ /*
+ * Zero-pad the salt to the next multiple of the input size of the hash
+ * algorithm's compression function, e.g. 64 bytes for SHA-256 or 128
+ * bytes for SHA-512. This ensures that the hash algorithm won't have
+ * any bytes buffered internally after processing the salt, thus making
+ * salted hashing just as fast as unsalted hashing.
+ */
+ padded_salt_size = round_up(salt_size, alg->block_size);
+ padded_salt = kzalloc(padded_salt_size, GFP_KERNEL);
+ if (!padded_salt) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+ memcpy(padded_salt, salt, salt_size);
+
+ sg_init_one(&sg, padded_salt, padded_salt_size);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, &sg, NULL, padded_salt_size);
+
+ err = crypto_wait_req(crypto_ahash_init(req), &wait);
+ if (err)
+ goto err_free;
+
+ err = crypto_wait_req(crypto_ahash_update(req), &wait);
+ if (err)
+ goto err_free;
+
+ err = crypto_ahash_export(req, hashstate);
+ if (err)
+ goto err_free;
+out:
+ fsverity_free_hash_request(alg, req);
+ kfree(padded_salt);
+ return hashstate;
+
+err_free:
+ kfree(hashstate);
+ hashstate = ERR_PTR(err);
+ goto out;
+}
+
+/**
+ * fsverity_hash_page() - hash a single data or hash page
+ * @params: the Merkle tree's parameters
+ * @inode: inode for which the hashing is being done
+ * @req: preallocated hash request
+ * @page: the page to hash
+ * @out: output digest, size 'params->digest_size' bytes
+ *
+ * Hash a single data or hash block, assuming block_size == PAGE_SIZE.
+ * The hash is salted if a salt is specified in the Merkle tree parameters.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_hash_page(const struct merkle_tree_params *params,
+ const struct inode *inode,
+ struct ahash_request *req, struct page *page, u8 *out)
+{
+ struct scatterlist sg;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ if (WARN_ON(params->block_size != PAGE_SIZE))
+ return -EINVAL;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, PAGE_SIZE, 0);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, &sg, out, PAGE_SIZE);
+
+ if (params->hashstate) {
+ err = crypto_ahash_import(req, params->hashstate);
+ if (err) {
+ fsverity_err(inode,
+ "Error %d importing hash state", err);
+ return err;
+ }
+ err = crypto_ahash_finup(req);
+ } else {
+ err = crypto_ahash_digest(req);
+ }
+
+ err = crypto_wait_req(err, &wait);
+ if (err)
+ fsverity_err(inode, "Error %d computing page hash", err);
+ return err;
+}
+
+/**
+ * fsverity_hash_buffer() - hash some data
+ * @alg: the hash algorithm to use
+ * @data: the data to hash
+ * @size: size of data to hash, in bytes
+ * @out: output digest, size 'alg->digest_size' bytes
+ *
+ * Hash some data which is located in physically contiguous memory (i.e. memory
+ * allocated by kmalloc(), not by vmalloc()). No salt is used.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_hash_buffer(struct fsverity_hash_alg *alg,
+ const void *data, size_t size, u8 *out)
+{
+ struct ahash_request *req;
+ struct scatterlist sg;
+ DECLARE_CRYPTO_WAIT(wait);
+ int err;
+
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(alg, GFP_KERNEL);
+
+ sg_init_one(&sg, data, size);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ ahash_request_set_crypt(req, &sg, out, size);
+
+ err = crypto_wait_req(crypto_ahash_digest(req), &wait);
+
+ fsverity_free_hash_request(alg, req);
+ return err;
+}
+
+void __init fsverity_check_hash_algs(void)
+{
+ size_t i;
+
+ /*
+ * Sanity check the hash algorithms (could be a build-time check, but
+ * they're in an array)
+ */
+ for (i = 0; i < ARRAY_SIZE(fsverity_hash_algs); i++) {
+ const struct fsverity_hash_alg *alg = &fsverity_hash_algs[i];
+
+ if (!alg->name)
+ continue;
+
+ BUG_ON(alg->digest_size > FS_VERITY_MAX_DIGEST_SIZE);
+
+ /*
+ * For efficiency, the implementation currently assumes the
+ * digest and block sizes are powers of 2. This limitation can
+ * be lifted if the code is updated to handle other values.
+ */
+ BUG_ON(!is_power_of_2(alg->digest_size));
+ BUG_ON(!is_power_of_2(alg->block_size));
+ }
+}
diff --git a/fs/verity/init.c b/fs/verity/init.c
new file mode 100644
index 000000000..c98b7016f
--- /dev/null
+++ b/fs/verity/init.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fs-verity module initialization and logging
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/ratelimit.h>
+
+void fsverity_msg(const struct inode *inode, const char *level,
+ const char *fmt, ...)
+{
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ struct va_format vaf;
+ va_list args;
+
+ if (!__ratelimit(&rs))
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ if (inode)
+ printk("%sfs-verity (%s, inode %lu): %pV\n",
+ level, inode->i_sb->s_id, inode->i_ino, &vaf);
+ else
+ printk("%sfs-verity: %pV\n", level, &vaf);
+ va_end(args);
+}
+
+static int __init fsverity_init(void)
+{
+ int err;
+
+ fsverity_check_hash_algs();
+
+ err = fsverity_init_info_cache();
+ if (err)
+ return err;
+
+ err = fsverity_init_workqueue();
+ if (err)
+ goto err_exit_info_cache;
+
+ err = fsverity_init_signature();
+ if (err)
+ goto err_exit_workqueue;
+
+ pr_debug("Initialized fs-verity\n");
+ return 0;
+
+err_exit_workqueue:
+ fsverity_exit_workqueue();
+err_exit_info_cache:
+ fsverity_exit_info_cache();
+ return err;
+}
+late_initcall(fsverity_init)
diff --git a/fs/verity/measure.c b/fs/verity/measure.c
new file mode 100644
index 000000000..e99c00350
--- /dev/null
+++ b/fs/verity/measure.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ioctl to get a verity file's digest
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/uaccess.h>
+
+/**
+ * fsverity_ioctl_measure() - get a verity file's digest
+ * @filp: file to get digest of
+ * @_uarg: user pointer to fsverity_digest
+ *
+ * Retrieve the file digest that the kernel is enforcing for reads from a verity
+ * file. See the "FS_IOC_MEASURE_VERITY" section of
+ * Documentation/filesystems/fsverity.rst for the documentation.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_ioctl_measure(struct file *filp, void __user *_uarg)
+{
+ const struct inode *inode = file_inode(filp);
+ struct fsverity_digest __user *uarg = _uarg;
+ const struct fsverity_info *vi;
+ const struct fsverity_hash_alg *hash_alg;
+ struct fsverity_digest arg;
+
+ vi = fsverity_get_info(inode);
+ if (!vi)
+ return -ENODATA; /* not a verity file */
+ hash_alg = vi->tree_params.hash_alg;
+
+ /*
+ * The user specifies the digest_size their buffer has space for; we can
+ * return the digest if it fits in the available space. We write back
+ * the actual size, which may be shorter than the user-specified size.
+ */
+
+ if (get_user(arg.digest_size, &uarg->digest_size))
+ return -EFAULT;
+ if (arg.digest_size < hash_alg->digest_size)
+ return -EOVERFLOW;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.digest_algorithm = hash_alg - fsverity_hash_algs;
+ arg.digest_size = hash_alg->digest_size;
+
+ if (copy_to_user(uarg, &arg, sizeof(arg)))
+ return -EFAULT;
+
+ if (copy_to_user(uarg->digest, vi->file_digest, hash_alg->digest_size))
+ return -EFAULT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsverity_ioctl_measure);
+
+/**
+ * fsverity_get_digest() - get a verity file's digest
+ * @inode: inode to get digest of
+ * @digest: (out) pointer to the digest
+ * @alg: (out) pointer to the hash algorithm enumeration
+ *
+ * Return the file hash algorithm and digest of an fsverity protected file.
+ * Assumption: before calling fsverity_get_digest(), the file must have been
+ * opened.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_get_digest(struct inode *inode,
+ u8 digest[FS_VERITY_MAX_DIGEST_SIZE],
+ enum hash_algo *alg)
+{
+ const struct fsverity_info *vi;
+ const struct fsverity_hash_alg *hash_alg;
+ int i;
+
+ vi = fsverity_get_info(inode);
+ if (!vi)
+ return -ENODATA; /* not a verity file */
+
+ hash_alg = vi->tree_params.hash_alg;
+ memset(digest, 0, FS_VERITY_MAX_DIGEST_SIZE);
+
+ /* convert the verity hash algorithm name to a hash_algo_name enum */
+ i = match_string(hash_algo_name, HASH_ALGO__LAST, hash_alg->name);
+ if (i < 0)
+ return -EINVAL;
+ *alg = i;
+
+ if (WARN_ON_ONCE(hash_alg->digest_size != hash_digest_size[*alg]))
+ return -EINVAL;
+ memcpy(digest, vi->file_digest, hash_alg->digest_size);
+
+ pr_debug("file digest %s:%*phN\n", hash_algo_name[*alg],
+ hash_digest_size[*alg], digest);
+
+ return 0;
+}
diff --git a/fs/verity/open.c b/fs/verity/open.c
new file mode 100644
index 000000000..81ff94442
--- /dev/null
+++ b/fs/verity/open.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Opening fs-verity files
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/slab.h>
+
+static struct kmem_cache *fsverity_info_cachep;
+
+/**
+ * fsverity_init_merkle_tree_params() - initialize Merkle tree parameters
+ * @params: the parameters struct to initialize
+ * @inode: the inode for which the Merkle tree is being built
+ * @hash_algorithm: number of hash algorithm to use
+ * @log_blocksize: log base 2 of block size to use
+ * @salt: pointer to salt (optional)
+ * @salt_size: size of salt, possibly 0
+ *
+ * Validate the hash algorithm and block size, then compute the tree topology
+ * (num levels, num blocks in each level, etc.) and initialize @params.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
+ const struct inode *inode,
+ unsigned int hash_algorithm,
+ unsigned int log_blocksize,
+ const u8 *salt, size_t salt_size)
+{
+ struct fsverity_hash_alg *hash_alg;
+ int err;
+ u64 blocks;
+ u64 offset;
+ int level;
+
+ memset(params, 0, sizeof(*params));
+
+ hash_alg = fsverity_get_hash_alg(inode, hash_algorithm);
+ if (IS_ERR(hash_alg))
+ return PTR_ERR(hash_alg);
+ params->hash_alg = hash_alg;
+ params->digest_size = hash_alg->digest_size;
+
+ params->hashstate = fsverity_prepare_hash_state(hash_alg, salt,
+ salt_size);
+ if (IS_ERR(params->hashstate)) {
+ err = PTR_ERR(params->hashstate);
+ params->hashstate = NULL;
+ fsverity_err(inode, "Error %d preparing hash state", err);
+ goto out_err;
+ }
+
+ if (log_blocksize != PAGE_SHIFT) {
+ fsverity_warn(inode, "Unsupported log_blocksize: %u",
+ log_blocksize);
+ err = -EINVAL;
+ goto out_err;
+ }
+ params->log_blocksize = log_blocksize;
+ params->block_size = 1 << log_blocksize;
+
+ if (WARN_ON(!is_power_of_2(params->digest_size))) {
+ err = -EINVAL;
+ goto out_err;
+ }
+ if (params->block_size < 2 * params->digest_size) {
+ fsverity_warn(inode,
+ "Merkle tree block size (%u) too small for hash algorithm \"%s\"",
+ params->block_size, hash_alg->name);
+ err = -EINVAL;
+ goto out_err;
+ }
+ params->log_arity = params->log_blocksize - ilog2(params->digest_size);
+ params->hashes_per_block = 1 << params->log_arity;
+
+ pr_debug("Merkle tree uses %s with %u-byte blocks (%u hashes/block), salt=%*phN\n",
+ hash_alg->name, params->block_size, params->hashes_per_block,
+ (int)salt_size, salt);
+
+ /*
+ * Compute the number of levels in the Merkle tree and create a map from
+ * level to the starting block of that level. Level 'num_levels - 1' is
+ * the root and is stored first. Level 0 is the level directly "above"
+ * the data blocks and is stored last.
+ */
+
+ /* Compute number of levels and the number of blocks in each level */
+ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
+ pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
+ while (blocks > 1) {
+ if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
+ fsverity_err(inode, "Too many levels in Merkle tree");
+ err = -EINVAL;
+ goto out_err;
+ }
+ blocks = (blocks + params->hashes_per_block - 1) >>
+ params->log_arity;
+ /* temporarily using level_start[] to store blocks in level */
+ params->level_start[params->num_levels++] = blocks;
+ }
+ params->level0_blocks = params->level_start[0];
+
+ /* Compute the starting block of each level */
+ offset = 0;
+ for (level = (int)params->num_levels - 1; level >= 0; level--) {
+ blocks = params->level_start[level];
+ params->level_start[level] = offset;
+ pr_debug("Level %d is %llu blocks starting at index %llu\n",
+ level, blocks, offset);
+ offset += blocks;
+ }
+
+ params->tree_size = offset << log_blocksize;
+ return 0;
+
+out_err:
+ kfree(params->hashstate);
+ memset(params, 0, sizeof(*params));
+ return err;
+}
+
+/*
+ * Compute the file digest by hashing the fsverity_descriptor excluding the
+ * signature and with the sig_size field set to 0.
+ */
+static int compute_file_digest(struct fsverity_hash_alg *hash_alg,
+ struct fsverity_descriptor *desc,
+ u8 *file_digest)
+{
+ __le32 sig_size = desc->sig_size;
+ int err;
+
+ desc->sig_size = 0;
+ err = fsverity_hash_buffer(hash_alg, desc, sizeof(*desc), file_digest);
+ desc->sig_size = sig_size;
+
+ return err;
+}
+
+/*
+ * Create a new fsverity_info from the given fsverity_descriptor (with optional
+ * appended signature), and check the signature if present. The
+ * fsverity_descriptor must have already undergone basic validation.
+ */
+struct fsverity_info *fsverity_create_info(const struct inode *inode,
+ struct fsverity_descriptor *desc)
+{
+ struct fsverity_info *vi;
+ int err;
+
+ vi = kmem_cache_zalloc(fsverity_info_cachep, GFP_KERNEL);
+ if (!vi)
+ return ERR_PTR(-ENOMEM);
+ vi->inode = inode;
+
+ err = fsverity_init_merkle_tree_params(&vi->tree_params, inode,
+ desc->hash_algorithm,
+ desc->log_blocksize,
+ desc->salt, desc->salt_size);
+ if (err) {
+ fsverity_err(inode,
+ "Error %d initializing Merkle tree parameters",
+ err);
+ goto out;
+ }
+
+ memcpy(vi->root_hash, desc->root_hash, vi->tree_params.digest_size);
+
+ err = compute_file_digest(vi->tree_params.hash_alg, desc,
+ vi->file_digest);
+ if (err) {
+ fsverity_err(inode, "Error %d computing file digest", err);
+ goto out;
+ }
+ pr_debug("Computed file digest: %s:%*phN\n",
+ vi->tree_params.hash_alg->name,
+ vi->tree_params.digest_size, vi->file_digest);
+
+ err = fsverity_verify_signature(vi, desc->signature,
+ le32_to_cpu(desc->sig_size));
+out:
+ if (err) {
+ fsverity_free_info(vi);
+ vi = ERR_PTR(err);
+ }
+ return vi;
+}
+
+void fsverity_set_info(struct inode *inode, struct fsverity_info *vi)
+{
+ /*
+ * Multiple tasks may race to set ->i_verity_info, so use
+ * cmpxchg_release(). This pairs with the smp_load_acquire() in
+ * fsverity_get_info(). I.e., here we publish ->i_verity_info with a
+ * RELEASE barrier so that other tasks can ACQUIRE it.
+ */
+ if (cmpxchg_release(&inode->i_verity_info, NULL, vi) != NULL) {
+ /* Lost the race, so free the fsverity_info we allocated. */
+ fsverity_free_info(vi);
+ /*
+ * Afterwards, the caller may access ->i_verity_info directly,
+ * so make sure to ACQUIRE the winning fsverity_info.
+ */
+ (void)fsverity_get_info(inode);
+ }
+}
+
+void fsverity_free_info(struct fsverity_info *vi)
+{
+ if (!vi)
+ return;
+ kfree(vi->tree_params.hashstate);
+ kmem_cache_free(fsverity_info_cachep, vi);
+}
+
+static bool validate_fsverity_descriptor(struct inode *inode,
+ const struct fsverity_descriptor *desc,
+ size_t desc_size)
+{
+ if (desc_size < sizeof(*desc)) {
+ fsverity_err(inode, "Unrecognized descriptor size: %zu bytes",
+ desc_size);
+ return false;
+ }
+
+ if (desc->version != 1) {
+ fsverity_err(inode, "Unrecognized descriptor version: %u",
+ desc->version);
+ return false;
+ }
+
+ if (memchr_inv(desc->__reserved, 0, sizeof(desc->__reserved))) {
+ fsverity_err(inode, "Reserved bits set in descriptor");
+ return false;
+ }
+
+ if (desc->salt_size > sizeof(desc->salt)) {
+ fsverity_err(inode, "Invalid salt_size: %u", desc->salt_size);
+ return false;
+ }
+
+ if (le64_to_cpu(desc->data_size) != inode->i_size) {
+ fsverity_err(inode,
+ "Wrong data_size: %llu (desc) != %lld (inode)",
+ le64_to_cpu(desc->data_size), inode->i_size);
+ return false;
+ }
+
+ if (le32_to_cpu(desc->sig_size) > desc_size - sizeof(*desc)) {
+ fsverity_err(inode, "Signature overflows verity descriptor");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Read the inode's fsverity_descriptor (with optional appended signature) from
+ * the filesystem, and do basic validation of it.
+ */
+int fsverity_get_descriptor(struct inode *inode,
+ struct fsverity_descriptor **desc_ret)
+{
+ int res;
+ struct fsverity_descriptor *desc;
+
+ res = inode->i_sb->s_vop->get_verity_descriptor(inode, NULL, 0);
+ if (res < 0) {
+ fsverity_err(inode,
+ "Error %d getting verity descriptor size", res);
+ return res;
+ }
+ if (res > FS_VERITY_MAX_DESCRIPTOR_SIZE) {
+ fsverity_err(inode, "Verity descriptor is too large (%d bytes)",
+ res);
+ return -EMSGSIZE;
+ }
+ desc = kmalloc(res, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ res = inode->i_sb->s_vop->get_verity_descriptor(inode, desc, res);
+ if (res < 0) {
+ fsverity_err(inode, "Error %d reading verity descriptor", res);
+ kfree(desc);
+ return res;
+ }
+
+ if (!validate_fsverity_descriptor(inode, desc, res)) {
+ kfree(desc);
+ return -EINVAL;
+ }
+
+ *desc_ret = desc;
+ return 0;
+}
+
+/* Ensure the inode has an ->i_verity_info */
+static int ensure_verity_info(struct inode *inode)
+{
+ struct fsverity_info *vi = fsverity_get_info(inode);
+ struct fsverity_descriptor *desc;
+ int err;
+
+ if (vi)
+ return 0;
+
+ err = fsverity_get_descriptor(inode, &desc);
+ if (err)
+ return err;
+
+ vi = fsverity_create_info(inode, desc);
+ if (IS_ERR(vi)) {
+ err = PTR_ERR(vi);
+ goto out_free_desc;
+ }
+
+ fsverity_set_info(inode, vi);
+ err = 0;
+out_free_desc:
+ kfree(desc);
+ return err;
+}
+
+/**
+ * fsverity_file_open() - prepare to open a verity file
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, deny the open if it is for writing. Otherwise,
+ * set up the inode's ->i_verity_info if not already done.
+ *
+ * When combined with fscrypt, this must be called after fscrypt_file_open().
+ * Otherwise, we won't have the key set up to decrypt the verity metadata.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (!IS_VERITY(inode))
+ return 0;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ pr_debug("Denying opening verity file (ino %lu) for write\n",
+ inode->i_ino);
+ return -EPERM;
+ }
+
+ return ensure_verity_info(inode);
+}
+EXPORT_SYMBOL_GPL(fsverity_file_open);
+
+/**
+ * fsverity_prepare_setattr() - prepare to change a verity inode's attributes
+ * @dentry: dentry through which the inode is being changed
+ * @attr: attributes to change
+ *
+ * Verity files are immutable, so deny truncates. This isn't covered by the
+ * open-time check because sys_truncate() takes a path, not a file descriptor.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ if (IS_VERITY(d_inode(dentry)) && (attr->ia_valid & ATTR_SIZE)) {
+ pr_debug("Denying truncate of verity file (ino %lu)\n",
+ d_inode(dentry)->i_ino);
+ return -EPERM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsverity_prepare_setattr);
+
+/**
+ * fsverity_cleanup_inode() - free the inode's verity info, if present
+ * @inode: an inode being evicted
+ *
+ * Filesystems must call this on inode eviction to free ->i_verity_info.
+ */
+void fsverity_cleanup_inode(struct inode *inode)
+{
+ fsverity_free_info(inode->i_verity_info);
+ inode->i_verity_info = NULL;
+}
+EXPORT_SYMBOL_GPL(fsverity_cleanup_inode);
+
+int __init fsverity_init_info_cache(void)
+{
+ fsverity_info_cachep = KMEM_CACHE_USERCOPY(fsverity_info,
+ SLAB_RECLAIM_ACCOUNT,
+ file_digest);
+ if (!fsverity_info_cachep)
+ return -ENOMEM;
+ return 0;
+}
+
+void __init fsverity_exit_info_cache(void)
+{
+ kmem_cache_destroy(fsverity_info_cachep);
+ fsverity_info_cachep = NULL;
+}
diff --git a/fs/verity/read_metadata.c b/fs/verity/read_metadata.c
new file mode 100644
index 000000000..2aefc5565
--- /dev/null
+++ b/fs/verity/read_metadata.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Ioctl to read verity metadata
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/backing-dev.h>
+#include <linux/highmem.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+static int fsverity_read_merkle_tree(struct inode *inode,
+ const struct fsverity_info *vi,
+ void __user *buf, u64 offset, int length)
+{
+ const struct fsverity_operations *vops = inode->i_sb->s_vop;
+ u64 end_offset;
+ unsigned int offs_in_page;
+ pgoff_t index, last_index;
+ int retval = 0;
+ int err = 0;
+
+ end_offset = min(offset + length, vi->tree_params.tree_size);
+ if (offset >= end_offset)
+ return 0;
+ offs_in_page = offset_in_page(offset);
+ last_index = (end_offset - 1) >> PAGE_SHIFT;
+
+ /*
+ * Iterate through each Merkle tree page in the requested range and copy
+ * the requested portion to userspace. Note that the Merkle tree block
+ * size isn't important here, as we are returning a byte stream; i.e.,
+ * we can just work with pages even if the tree block size != PAGE_SIZE.
+ */
+ for (index = offset >> PAGE_SHIFT; index <= last_index; index++) {
+ unsigned long num_ra_pages =
+ min_t(unsigned long, last_index - index + 1,
+ inode->i_sb->s_bdi->io_pages);
+ unsigned int bytes_to_copy = min_t(u64, end_offset - offset,
+ PAGE_SIZE - offs_in_page);
+ struct page *page;
+ const void *virt;
+
+ page = vops->read_merkle_tree_page(inode, index, num_ra_pages);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ fsverity_err(inode,
+ "Error %d reading Merkle tree page %lu",
+ err, index);
+ break;
+ }
+
+ virt = kmap_local_page(page);
+ if (copy_to_user(buf, virt + offs_in_page, bytes_to_copy)) {
+ kunmap_local(virt);
+ put_page(page);
+ err = -EFAULT;
+ break;
+ }
+ kunmap_local(virt);
+ put_page(page);
+
+ retval += bytes_to_copy;
+ buf += bytes_to_copy;
+ offset += bytes_to_copy;
+
+ if (fatal_signal_pending(current)) {
+ err = -EINTR;
+ break;
+ }
+ cond_resched();
+ offs_in_page = 0;
+ }
+ return retval ? retval : err;
+}
+
+/* Copy the requested portion of the buffer to userspace. */
+static int fsverity_read_buffer(void __user *dst, u64 offset, int length,
+ const void *src, size_t src_length)
+{
+ if (offset >= src_length)
+ return 0;
+ src += offset;
+ src_length -= offset;
+
+ length = min_t(size_t, length, src_length);
+
+ if (copy_to_user(dst, src, length))
+ return -EFAULT;
+
+ return length;
+}
+
+static int fsverity_read_descriptor(struct inode *inode,
+ void __user *buf, u64 offset, int length)
+{
+ struct fsverity_descriptor *desc;
+ size_t desc_size;
+ int res;
+
+ res = fsverity_get_descriptor(inode, &desc);
+ if (res)
+ return res;
+
+ /* don't include the signature */
+ desc_size = offsetof(struct fsverity_descriptor, signature);
+ desc->sig_size = 0;
+
+ res = fsverity_read_buffer(buf, offset, length, desc, desc_size);
+
+ kfree(desc);
+ return res;
+}
+
+static int fsverity_read_signature(struct inode *inode,
+ void __user *buf, u64 offset, int length)
+{
+ struct fsverity_descriptor *desc;
+ int res;
+
+ res = fsverity_get_descriptor(inode, &desc);
+ if (res)
+ return res;
+
+ if (desc->sig_size == 0) {
+ res = -ENODATA;
+ goto out;
+ }
+
+ /*
+ * Include only the signature. Note that fsverity_get_descriptor()
+ * already verified that sig_size is in-bounds.
+ */
+ res = fsverity_read_buffer(buf, offset, length, desc->signature,
+ le32_to_cpu(desc->sig_size));
+out:
+ kfree(desc);
+ return res;
+}
+
+/**
+ * fsverity_ioctl_read_metadata() - read verity metadata from a file
+ * @filp: file to read the metadata from
+ * @uarg: user pointer to fsverity_read_metadata_arg
+ *
+ * Return: length read on success, 0 on EOF, -errno on failure
+ */
+int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg)
+{
+ struct inode *inode = file_inode(filp);
+ const struct fsverity_info *vi;
+ struct fsverity_read_metadata_arg arg;
+ int length;
+ void __user *buf;
+
+ vi = fsverity_get_info(inode);
+ if (!vi)
+ return -ENODATA; /* not a verity file */
+ /*
+ * Note that we don't have to explicitly check that the file is open for
+ * reading, since verity files can only be opened for reading.
+ */
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.__reserved)
+ return -EINVAL;
+
+ /* offset + length must not overflow. */
+ if (arg.offset + arg.length < arg.offset)
+ return -EINVAL;
+
+ /* Ensure that the return value will fit in INT_MAX. */
+ length = min_t(u64, arg.length, INT_MAX);
+
+ buf = u64_to_user_ptr(arg.buf_ptr);
+
+ switch (arg.metadata_type) {
+ case FS_VERITY_METADATA_TYPE_MERKLE_TREE:
+ return fsverity_read_merkle_tree(inode, vi, buf, arg.offset,
+ length);
+ case FS_VERITY_METADATA_TYPE_DESCRIPTOR:
+ return fsverity_read_descriptor(inode, buf, arg.offset, length);
+ case FS_VERITY_METADATA_TYPE_SIGNATURE:
+ return fsverity_read_signature(inode, buf, arg.offset, length);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(fsverity_ioctl_read_metadata);
diff --git a/fs/verity/signature.c b/fs/verity/signature.c
new file mode 100644
index 000000000..b59de0305
--- /dev/null
+++ b/fs/verity/signature.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Verification of builtin signatures
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <linux/cred.h>
+#include <linux/key.h>
+#include <linux/slab.h>
+#include <linux/verification.h>
+
+/*
+ * /proc/sys/fs/verity/require_signatures
+ * If 1, all verity files must have a valid builtin signature.
+ */
+static int fsverity_require_signatures;
+
+/*
+ * Keyring that contains the trusted X.509 certificates.
+ *
+ * Only root (kuid=0) can modify this. Also, root may use
+ * keyctl_restrict_keyring() to prevent any more additions.
+ */
+static struct key *fsverity_keyring;
+
+/**
+ * fsverity_verify_signature() - check a verity file's signature
+ * @vi: the file's fsverity_info
+ * @signature: the file's built-in signature
+ * @sig_size: size of signature in bytes, or 0 if no signature
+ *
+ * If the file includes a signature of its fs-verity file digest, verify it
+ * against the certificates in the fs-verity keyring.
+ *
+ * Return: 0 on success (signature valid or not required); -errno on failure
+ */
+int fsverity_verify_signature(const struct fsverity_info *vi,
+ const u8 *signature, size_t sig_size)
+{
+ const struct inode *inode = vi->inode;
+ const struct fsverity_hash_alg *hash_alg = vi->tree_params.hash_alg;
+ struct fsverity_formatted_digest *d;
+ int err;
+
+ if (sig_size == 0) {
+ if (fsverity_require_signatures) {
+ fsverity_err(inode,
+ "require_signatures=1, rejecting unsigned file!");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ if (fsverity_keyring->keys.nr_leaves_on_tree == 0) {
+ /*
+ * The ".fs-verity" keyring is empty, due to builtin signatures
+ * being supported by the kernel but not actually being used.
+ * In this case, verify_pkcs7_signature() would always return an
+ * error, usually ENOKEY. It could also be EBADMSG if the
+ * PKCS#7 is malformed, but that isn't very important to
+ * distinguish. So, just skip to ENOKEY to avoid the attack
+ * surface of the PKCS#7 parser, which would otherwise be
+ * reachable by any task able to execute FS_IOC_ENABLE_VERITY.
+ */
+ fsverity_err(inode,
+ "fs-verity keyring is empty, rejecting signed file!");
+ return -ENOKEY;
+ }
+
+ d = kzalloc(sizeof(*d) + hash_alg->digest_size, GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ memcpy(d->magic, "FSVerity", 8);
+ d->digest_algorithm = cpu_to_le16(hash_alg - fsverity_hash_algs);
+ d->digest_size = cpu_to_le16(hash_alg->digest_size);
+ memcpy(d->digest, vi->file_digest, hash_alg->digest_size);
+
+ err = verify_pkcs7_signature(d, sizeof(*d) + hash_alg->digest_size,
+ signature, sig_size, fsverity_keyring,
+ VERIFYING_UNSPECIFIED_SIGNATURE,
+ NULL, NULL);
+ kfree(d);
+
+ if (err) {
+ if (err == -ENOKEY)
+ fsverity_err(inode,
+ "File's signing cert isn't in the fs-verity keyring");
+ else if (err == -EKEYREJECTED)
+ fsverity_err(inode, "Incorrect file signature");
+ else if (err == -EBADMSG)
+ fsverity_err(inode, "Malformed file signature");
+ else
+ fsverity_err(inode, "Error %d verifying file signature",
+ err);
+ return err;
+ }
+
+ pr_debug("Valid signature for file digest %s:%*phN\n",
+ hash_alg->name, hash_alg->digest_size, vi->file_digest);
+ return 0;
+}
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table_header *fsverity_sysctl_header;
+
+static const struct ctl_path fsverity_sysctl_path[] = {
+ { .procname = "fs", },
+ { .procname = "verity", },
+ { }
+};
+
+static struct ctl_table fsverity_sysctl_table[] = {
+ {
+ .procname = "require_signatures",
+ .data = &fsverity_require_signatures,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static int __init fsverity_sysctl_init(void)
+{
+ fsverity_sysctl_header = register_sysctl_paths(fsverity_sysctl_path,
+ fsverity_sysctl_table);
+ if (!fsverity_sysctl_header) {
+ pr_err("sysctl registration failed!\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+#else /* !CONFIG_SYSCTL */
+static inline int __init fsverity_sysctl_init(void)
+{
+ return 0;
+}
+#endif /* !CONFIG_SYSCTL */
+
+int __init fsverity_init_signature(void)
+{
+ struct key *ring;
+ int err;
+
+ ring = keyring_alloc(".fs-verity", KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(), KEY_POS_SEARCH |
+ KEY_USR_VIEW | KEY_USR_READ | KEY_USR_WRITE |
+ KEY_USR_SEARCH | KEY_USR_SETATTR,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+
+ err = fsverity_sysctl_init();
+ if (err)
+ goto err_put_ring;
+
+ fsverity_keyring = ring;
+ return 0;
+
+err_put_ring:
+ key_put(ring);
+ return err;
+}
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
new file mode 100644
index 000000000..e23d382fc
--- /dev/null
+++ b/fs/verity/verify.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data verification functions, i.e. hooks for ->readahead()
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include "fsverity_private.h"
+
+#include <crypto/hash.h>
+#include <linux/bio.h>
+#include <linux/ratelimit.h>
+
+static struct workqueue_struct *fsverity_read_workqueue;
+
+/**
+ * hash_at_level() - compute the location of the block's hash at the given level
+ *
+ * @params: (in) the Merkle tree parameters
+ * @dindex: (in) the index of the data block being verified
+ * @level: (in) the level of hash we want (0 is leaf level)
+ * @hindex: (out) the index of the hash block containing the wanted hash
+ * @hoffset: (out) the byte offset to the wanted hash within the hash block
+ */
+static void hash_at_level(const struct merkle_tree_params *params,
+ pgoff_t dindex, unsigned int level, pgoff_t *hindex,
+ unsigned int *hoffset)
+{
+ pgoff_t position;
+
+ /* Offset of the hash within the level's region, in hashes */
+ position = dindex >> (level * params->log_arity);
+
+ /* Index of the hash block in the tree overall */
+ *hindex = params->level_start[level] + (position >> params->log_arity);
+
+ /* Offset of the wanted hash (in bytes) within the hash block */
+ *hoffset = (position & ((1 << params->log_arity) - 1)) <<
+ (params->log_blocksize - params->log_arity);
+}
+
+static inline int cmp_hashes(const struct fsverity_info *vi,
+ const u8 *want_hash, const u8 *real_hash,
+ pgoff_t index, int level)
+{
+ const unsigned int hsize = vi->tree_params.digest_size;
+
+ if (memcmp(want_hash, real_hash, hsize) == 0)
+ return 0;
+
+ fsverity_err(vi->inode,
+ "FILE CORRUPTED! index=%lu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
+ index, level,
+ vi->tree_params.hash_alg->name, hsize, want_hash,
+ vi->tree_params.hash_alg->name, hsize, real_hash);
+ return -EBADMSG;
+}
+
+/*
+ * Verify a single data page against the file's Merkle tree.
+ *
+ * In principle, we need to verify the entire path to the root node. However,
+ * for efficiency the filesystem may cache the hash pages. Therefore we need
+ * only ascend the tree until an already-verified page is seen, as indicated by
+ * the PageChecked bit being set; then verify the path to that page.
+ *
+ * This code currently only supports the case where the verity block size is
+ * equal to PAGE_SIZE. Doing otherwise would be possible but tricky, since we
+ * wouldn't be able to use the PageChecked bit.
+ *
+ * Note that multiple processes may race to verify a hash page and mark it
+ * Checked, but it doesn't matter; the result will be the same either way.
+ *
+ * Return: true if the page is valid, else false.
+ */
+static bool verify_page(struct inode *inode, const struct fsverity_info *vi,
+ struct ahash_request *req, struct page *data_page,
+ unsigned long level0_ra_pages)
+{
+ const struct merkle_tree_params *params = &vi->tree_params;
+ const unsigned int hsize = params->digest_size;
+ const pgoff_t index = data_page->index;
+ int level;
+ u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
+ const u8 *want_hash;
+ u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
+ struct page *hpages[FS_VERITY_MAX_LEVELS];
+ unsigned int hoffsets[FS_VERITY_MAX_LEVELS];
+ int err;
+
+ if (WARN_ON_ONCE(!PageLocked(data_page) || PageUptodate(data_page)))
+ return false;
+
+ pr_debug_ratelimited("Verifying data page %lu...\n", index);
+
+ /*
+ * Starting at the leaf level, ascend the tree saving hash pages along
+ * the way until we find a verified hash page, indicated by PageChecked;
+ * or until we reach the root.
+ */
+ for (level = 0; level < params->num_levels; level++) {
+ pgoff_t hindex;
+ unsigned int hoffset;
+ struct page *hpage;
+
+ hash_at_level(params, index, level, &hindex, &hoffset);
+
+ pr_debug_ratelimited("Level %d: hindex=%lu, hoffset=%u\n",
+ level, hindex, hoffset);
+
+ hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hindex,
+ level == 0 ? level0_ra_pages : 0);
+ if (IS_ERR(hpage)) {
+ err = PTR_ERR(hpage);
+ fsverity_err(inode,
+ "Error %d reading Merkle tree page %lu",
+ err, hindex);
+ goto out;
+ }
+
+ if (PageChecked(hpage)) {
+ memcpy_from_page(_want_hash, hpage, hoffset, hsize);
+ want_hash = _want_hash;
+ put_page(hpage);
+ pr_debug_ratelimited("Hash page already checked, want %s:%*phN\n",
+ params->hash_alg->name,
+ hsize, want_hash);
+ goto descend;
+ }
+ pr_debug_ratelimited("Hash page not yet checked\n");
+ hpages[level] = hpage;
+ hoffsets[level] = hoffset;
+ }
+
+ want_hash = vi->root_hash;
+ pr_debug("Want root hash: %s:%*phN\n",
+ params->hash_alg->name, hsize, want_hash);
+descend:
+ /* Descend the tree verifying hash pages */
+ for (; level > 0; level--) {
+ struct page *hpage = hpages[level - 1];
+ unsigned int hoffset = hoffsets[level - 1];
+
+ err = fsverity_hash_page(params, inode, req, hpage, real_hash);
+ if (err)
+ goto out;
+ err = cmp_hashes(vi, want_hash, real_hash, index, level - 1);
+ if (err)
+ goto out;
+ SetPageChecked(hpage);
+ memcpy_from_page(_want_hash, hpage, hoffset, hsize);
+ want_hash = _want_hash;
+ put_page(hpage);
+ pr_debug("Verified hash page at level %d, now want %s:%*phN\n",
+ level - 1, params->hash_alg->name, hsize, want_hash);
+ }
+
+ /* Finally, verify the data page */
+ err = fsverity_hash_page(params, inode, req, data_page, real_hash);
+ if (err)
+ goto out;
+ err = cmp_hashes(vi, want_hash, real_hash, index, -1);
+out:
+ for (; level > 0; level--)
+ put_page(hpages[level - 1]);
+
+ return err == 0;
+}
+
+/**
+ * fsverity_verify_page() - verify a data page
+ * @page: the page to verity
+ *
+ * Verify a page that has just been read from a verity file. The page must be a
+ * pagecache page that is still locked and not yet uptodate.
+ *
+ * Return: true if the page is valid, else false.
+ */
+bool fsverity_verify_page(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ const struct fsverity_info *vi = inode->i_verity_info;
+ struct ahash_request *req;
+ bool valid;
+
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS);
+
+ valid = verify_page(inode, vi, req, page, 0);
+
+ fsverity_free_hash_request(vi->tree_params.hash_alg, req);
+
+ return valid;
+}
+EXPORT_SYMBOL_GPL(fsverity_verify_page);
+
+#ifdef CONFIG_BLOCK
+/**
+ * fsverity_verify_bio() - verify a 'read' bio that has just completed
+ * @bio: the bio to verify
+ *
+ * Verify a set of pages that have just been read from a verity file. The pages
+ * must be pagecache pages that are still locked and not yet uptodate. Pages
+ * that fail verification are set to the Error state. Verification is skipped
+ * for pages already in the Error state, e.g. due to fscrypt decryption failure.
+ *
+ * This is a helper function for use by the ->readahead() method of filesystems
+ * that issue bios to read data directly into the page cache. Filesystems that
+ * populate the page cache without issuing bios (e.g. non block-based
+ * filesystems) must instead call fsverity_verify_page() directly on each page.
+ * All filesystems must also call fsverity_verify_page() on holes.
+ */
+void fsverity_verify_bio(struct bio *bio)
+{
+ struct inode *inode = bio_first_page_all(bio)->mapping->host;
+ const struct fsverity_info *vi = inode->i_verity_info;
+ const struct merkle_tree_params *params = &vi->tree_params;
+ struct ahash_request *req;
+ struct bio_vec *bv;
+ struct bvec_iter_all iter_all;
+ unsigned long max_ra_pages = 0;
+
+ /* This allocation never fails, since it's mempool-backed. */
+ req = fsverity_alloc_hash_request(params->hash_alg, GFP_NOFS);
+
+ if (bio->bi_opf & REQ_RAHEAD) {
+ /*
+ * If this bio is for data readahead, then we also do readahead
+ * of the first (largest) level of the Merkle tree. Namely,
+ * when a Merkle tree page is read, we also try to piggy-back on
+ * some additional pages -- up to 1/4 the number of data pages.
+ *
+ * This improves sequential read performance, as it greatly
+ * reduces the number of I/O requests made to the Merkle tree.
+ */
+ bio_for_each_segment_all(bv, bio, iter_all)
+ max_ra_pages++;
+ max_ra_pages /= 4;
+ }
+
+ bio_for_each_segment_all(bv, bio, iter_all) {
+ struct page *page = bv->bv_page;
+ unsigned long level0_index = page->index >> params->log_arity;
+ unsigned long level0_ra_pages =
+ min(max_ra_pages, params->level0_blocks - level0_index);
+
+ if (!PageError(page) &&
+ !verify_page(inode, vi, req, page, level0_ra_pages))
+ SetPageError(page);
+ }
+
+ fsverity_free_hash_request(params->hash_alg, req);
+}
+EXPORT_SYMBOL_GPL(fsverity_verify_bio);
+#endif /* CONFIG_BLOCK */
+
+/**
+ * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
+ * @work: the work to enqueue
+ *
+ * Enqueue verification work for asynchronous processing.
+ */
+void fsverity_enqueue_verify_work(struct work_struct *work)
+{
+ queue_work(fsverity_read_workqueue, work);
+}
+EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
+
+int __init fsverity_init_workqueue(void)
+{
+ /*
+ * Use a high-priority workqueue to prioritize verification work, which
+ * blocks reads from completing, over regular application tasks.
+ *
+ * For performance reasons, don't use an unbound workqueue. Using an
+ * unbound workqueue for crypto operations causes excessive scheduler
+ * latency on ARM64.
+ */
+ fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
+ WQ_HIGHPRI,
+ num_online_cpus());
+ if (!fsverity_read_workqueue)
+ return -ENOMEM;
+ return 0;
+}
+
+void __init fsverity_exit_workqueue(void)
+{
+ destroy_workqueue(fsverity_read_workqueue);
+ fsverity_read_workqueue = NULL;
+}