summaryrefslogtreecommitdiffstats
path: root/fs/verity
diff options
context:
space:
mode:
Diffstat (limited to 'fs/verity')
-rw-r--r--fs/verity/fsverity_private.h1
-rw-r--r--fs/verity/init.c7
-rw-r--r--fs/verity/measure.c4
-rw-r--r--fs/verity/open.c1
-rw-r--r--fs/verity/verify.c48
5 files changed, 27 insertions, 34 deletions
diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h
index a6a6b27492..b3506f56e1 100644
--- a/fs/verity/fsverity_private.h
+++ b/fs/verity/fsverity_private.h
@@ -69,7 +69,6 @@ struct fsverity_info {
u8 file_digest[FS_VERITY_MAX_DIGEST_SIZE];
const struct inode *inode;
unsigned long *hash_block_verified;
- spinlock_t hash_page_init_lock;
};
#define FS_VERITY_MAX_SIGNATURE_SIZE (FS_VERITY_MAX_DESCRIPTOR_SIZE - \
diff --git a/fs/verity/init.c b/fs/verity/init.c
index cb2c9aac61..f440f0e61e 100644
--- a/fs/verity/init.c
+++ b/fs/verity/init.c
@@ -10,8 +10,6 @@
#include <linux/ratelimit.h>
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *fsverity_sysctl_header;
-
static struct ctl_table fsverity_sysctl_table[] = {
#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
{
@@ -28,10 +26,7 @@ static struct ctl_table fsverity_sysctl_table[] = {
static void __init fsverity_init_sysctl(void)
{
- fsverity_sysctl_header = register_sysctl("fs/verity",
- fsverity_sysctl_table);
- if (!fsverity_sysctl_header)
- panic("fsverity sysctl registration failed");
+ register_sysctl_init("fs/verity", fsverity_sysctl_table);
}
#else /* CONFIG_SYSCTL */
static inline void fsverity_init_sysctl(void)
diff --git a/fs/verity/measure.c b/fs/verity/measure.c
index bf7a5f4ccc..3969d54158 100644
--- a/fs/verity/measure.c
+++ b/fs/verity/measure.c
@@ -159,9 +159,9 @@ __bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_ker
__bpf_kfunc_end_defs();
-BTF_SET8_START(fsverity_set_ids)
+BTF_KFUNCS_START(fsverity_set_ids)
BTF_ID_FLAGS(func, bpf_get_fsverity_digest, KF_TRUSTED_ARGS)
-BTF_SET8_END(fsverity_set_ids)
+BTF_KFUNCS_END(fsverity_set_ids)
static int bpf_get_fsverity_digest_filter(const struct bpf_prog *prog, u32 kfunc_id)
{
diff --git a/fs/verity/open.c b/fs/verity/open.c
index 6c31a871b8..fdeb95eca3 100644
--- a/fs/verity/open.c
+++ b/fs/verity/open.c
@@ -239,7 +239,6 @@ struct fsverity_info *fsverity_create_info(const struct inode *inode,
err = -ENOMEM;
goto fail;
}
- spin_lock_init(&vi->hash_page_init_lock);
}
return vi;
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 904ccd7e8e..4fcad0825a 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -19,7 +19,6 @@ static struct workqueue_struct *fsverity_read_workqueue;
static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
unsigned long hblock_idx)
{
- bool verified;
unsigned int blocks_per_page;
unsigned int i;
@@ -43,12 +42,20 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
* re-instantiated from the backing storage are re-verified. To do
* this, we use PG_checked again, but now it doesn't really mean
* "checked". Instead, now it just serves as an indicator for whether
- * the hash page is newly instantiated or not.
+ * the hash page is newly instantiated or not. If the page is new, as
+ * indicated by PG_checked=0, we clear the bitmap bits for the page's
+ * blocks since they are untrustworthy, then set PG_checked=1.
+ * Otherwise we return the bitmap bit for the requested block.
*
- * The first thread that sees PG_checked=0 must clear the corresponding
- * bitmap bits, then set PG_checked=1. This requires a spinlock. To
- * avoid having to take this spinlock in the common case of
- * PG_checked=1, we start with an opportunistic lockless read.
+ * Multiple threads may execute this code concurrently on the same page.
+ * This is safe because we use memory barriers to ensure that if a
+ * thread sees PG_checked=1, then it also sees the associated bitmap
+ * clearing to have occurred. Also, all writes and their corresponding
+ * reads are atomic, and all writes are safe to repeat in the event that
+ * multiple threads get into the PG_checked=0 section. (Clearing a
+ * bitmap bit again at worst causes a hash block to be verified
+ * redundantly. That event should be very rare, so it's not worth using
+ * a lock to avoid. Setting PG_checked again has no effect.)
*/
if (PageChecked(hpage)) {
/*
@@ -58,24 +65,17 @@ static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
smp_rmb();
return test_bit(hblock_idx, vi->hash_block_verified);
}
- spin_lock(&vi->hash_page_init_lock);
- if (PageChecked(hpage)) {
- verified = test_bit(hblock_idx, vi->hash_block_verified);
- } else {
- blocks_per_page = vi->tree_params.blocks_per_page;
- hblock_idx = round_down(hblock_idx, blocks_per_page);
- for (i = 0; i < blocks_per_page; i++)
- clear_bit(hblock_idx + i, vi->hash_block_verified);
- /*
- * A write memory barrier is needed here to give RELEASE
- * semantics to the below SetPageChecked() operation.
- */
- smp_wmb();
- SetPageChecked(hpage);
- verified = false;
- }
- spin_unlock(&vi->hash_page_init_lock);
- return verified;
+ blocks_per_page = vi->tree_params.blocks_per_page;
+ hblock_idx = round_down(hblock_idx, blocks_per_page);
+ for (i = 0; i < blocks_per_page; i++)
+ clear_bit(hblock_idx + i, vi->hash_block_verified);
+ /*
+ * A write memory barrier is needed here to give RELEASE semantics to
+ * the below SetPageChecked() operation.
+ */
+ smp_wmb();
+ SetPageChecked(hpage);
+ return false;
}
/*