diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /fs/crypto | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fs/crypto')
-rw-r--r-- | fs/crypto/Kconfig | 46 | ||||
-rw-r--r-- | fs/crypto/Makefile | 14 | ||||
-rw-r--r-- | fs/crypto/bio.c | 193 | ||||
-rw-r--r-- | fs/crypto/crypto.c | 411 | ||||
-rw-r--r-- | fs/crypto/fname.c | 626 | ||||
-rw-r--r-- | fs/crypto/fscrypt_private.h | 665 | ||||
-rw-r--r-- | fs/crypto/hkdf.c | 182 | ||||
-rw-r--r-- | fs/crypto/hooks.c | 456 | ||||
-rw-r--r-- | fs/crypto/inline_crypt.c | 479 | ||||
-rw-r--r-- | fs/crypto/keyring.c | 1204 | ||||
-rw-r--r-- | fs/crypto/keysetup.c | 806 | ||||
-rw-r--r-- | fs/crypto/keysetup_v1.c | 321 | ||||
-rw-r--r-- | fs/crypto/policy.c | 867 |
13 files changed, 6270 insertions, 0 deletions
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig new file mode 100644 index 0000000000..2d0c8922f6 --- /dev/null +++ b/fs/crypto/Kconfig @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: GPL-2.0-only +config FS_ENCRYPTION + bool "FS Encryption (Per-file encryption)" + select CRYPTO + select CRYPTO_HASH + select CRYPTO_SKCIPHER + select CRYPTO_LIB_SHA256 + select KEYS + help + Enable encryption of files and directories. This + feature is similar to ecryptfs, but it is more memory + efficient since it avoids caching the encrypted and + decrypted pages in the page cache. Currently Ext4, + F2FS and UBIFS make use of this feature. + +# Filesystems supporting encryption must select this if FS_ENCRYPTION. This +# allows the algorithms to be built as modules when all the filesystems are, +# whereas selecting them from FS_ENCRYPTION would force them to be built-in. +# +# Note: this option only pulls in the algorithms that filesystem encryption +# needs "by default". If userspace will use "non-default" encryption modes such +# as Adiantum encryption, then those other modes need to be explicitly enabled +# in the crypto API; see Documentation/filesystems/fscrypt.rst for details. +# +# Also note that this option only pulls in the generic implementations of the +# algorithms, not any per-architecture optimized implementations. It is +# strongly recommended to enable optimized implementations too. It is safe to +# disable these generic implementations if corresponding optimized +# implementations will always be available too; for this reason, these are soft +# dependencies ('imply' rather than 'select'). Only disable these generic +# implementations if you're sure they will never be needed, though. +config FS_ENCRYPTION_ALGS + tristate + imply CRYPTO_AES + imply CRYPTO_CBC + imply CRYPTO_CTS + imply CRYPTO_ECB + imply CRYPTO_HMAC + imply CRYPTO_SHA512 + imply CRYPTO_XTS + +config FS_ENCRYPTION_INLINE_CRYPT + bool "Enable fscrypt to use inline crypto" + depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION + help + Enable fscrypt to use inline encryption hardware if available. diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile new file mode 100644 index 0000000000..652c7180ec --- /dev/null +++ b/fs/crypto/Makefile @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o + +fscrypto-y := crypto.o \ + fname.o \ + hkdf.o \ + hooks.o \ + keyring.o \ + keysetup.o \ + keysetup_v1.o \ + policy.o + +fscrypto-$(CONFIG_BLOCK) += bio.o +fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c new file mode 100644 index 0000000000..62e1a3dd83 --- /dev/null +++ b/fs/crypto/bio.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Utility functions for file contents encryption/decryption on + * block device-based filesystems. + * + * Copyright (C) 2015, Google, Inc. + * Copyright (C) 2015, Motorola Mobility + */ + +#include <linux/pagemap.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/namei.h> +#include "fscrypt_private.h" + +/** + * fscrypt_decrypt_bio() - decrypt the contents of a bio + * @bio: the bio to decrypt + * + * Decrypt the contents of a "read" bio following successful completion of the + * underlying disk read. The bio must be reading a whole number of blocks of an + * encrypted file directly into the page cache. If the bio is reading the + * ciphertext into bounce pages instead of the page cache (for example, because + * the file is also compressed, so decompression is required after decryption), + * then this function isn't applicable. This function may sleep, so it must be + * called from a workqueue rather than from the bio's bi_end_io callback. + * + * Return: %true on success; %false on failure. On failure, bio->bi_status is + * also set to an error status. + */ +bool fscrypt_decrypt_bio(struct bio *bio) +{ + struct folio_iter fi; + + bio_for_each_folio_all(fi, bio) { + int err = fscrypt_decrypt_pagecache_blocks(fi.folio, fi.length, + fi.offset); + + if (err) { + bio->bi_status = errno_to_blk_status(err); + return false; + } + } + return true; +} +EXPORT_SYMBOL(fscrypt_decrypt_bio); + +static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, + pgoff_t lblk, sector_t pblk, + unsigned int len) +{ + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits); + struct bio *bio; + int ret, err = 0; + int num_pages = 0; + + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(inode->i_sb->s_bdev, BIO_MAX_VECS, REQ_OP_WRITE, + GFP_NOFS); + + while (len) { + unsigned int blocks_this_page = min(len, blocks_per_page); + unsigned int bytes_this_page = blocks_this_page << blockbits; + + if (num_pages == 0) { + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); + bio->bi_iter.bi_sector = + pblk << (blockbits - SECTOR_SHIFT); + } + ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); + if (WARN_ON_ONCE(ret != bytes_this_page)) { + err = -EIO; + goto out; + } + num_pages++; + len -= blocks_this_page; + lblk += blocks_this_page; + pblk += blocks_this_page; + if (num_pages == BIO_MAX_VECS || !len || + !fscrypt_mergeable_bio(bio, inode, lblk)) { + err = submit_bio_wait(bio); + if (err) + goto out; + bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE); + num_pages = 0; + } + } +out: + bio_put(bio); + return err; +} + +/** + * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file + * @inode: the file's inode + * @lblk: the first file logical block to zero out + * @pblk: the first filesystem physical block to zero out + * @len: number of blocks to zero out + * + * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write + * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be + * both logically and physically contiguous. It's also assumed that the + * filesystem only uses a single block device, ->s_bdev. + * + * Note that since each block uses a different IV, this involves writing a + * different ciphertext to each block; we can't simply reuse the same one. + * + * Return: 0 on success; -errno on failure. + */ +int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len) +{ + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; + const unsigned int blocks_per_page = 1 << blocks_per_page_bits; + struct page *pages[16]; /* write up to 16 pages at a time */ + unsigned int nr_pages; + unsigned int i; + unsigned int offset; + struct bio *bio; + int ret, err; + + if (len == 0) + return 0; + + if (fscrypt_inode_uses_inline_crypto(inode)) + return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk, + len); + + BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS); + nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), + (len + blocks_per_page - 1) >> blocks_per_page_bits); + + /* + * We need at least one page for ciphertext. Allocate the first one + * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail. + * + * Any additional page allocations are allowed to fail, as they only + * help performance, and waiting on the mempool for them could deadlock. + */ + for (i = 0; i < nr_pages; i++) { + pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS : + GFP_NOWAIT | __GFP_NOWARN); + if (!pages[i]) + break; + } + nr_pages = i; + if (WARN_ON_ONCE(nr_pages <= 0)) + return -EINVAL; + + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(inode->i_sb->s_bdev, nr_pages, REQ_OP_WRITE, GFP_NOFS); + + do { + bio->bi_iter.bi_sector = pblk << (blockbits - 9); + + i = 0; + offset = 0; + do { + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), pages[i], + blocksize, offset, GFP_NOFS); + if (err) + goto out; + lblk++; + pblk++; + len--; + offset += blocksize; + if (offset == PAGE_SIZE || len == 0) { + ret = bio_add_page(bio, pages[i++], offset, 0); + if (WARN_ON_ONCE(ret != offset)) { + err = -EIO; + goto out; + } + offset = 0; + } + } while (i != nr_pages && len != 0); + + err = submit_bio_wait(bio); + if (err) + goto out; + bio_reset(bio, inode->i_sb->s_bdev, REQ_OP_WRITE); + } while (len != 0); + err = 0; +out: + bio_put(bio); + for (i = 0; i < nr_pages; i++) + fscrypt_free_bounce_page(pages[i]); + return err; +} +EXPORT_SYMBOL(fscrypt_zeroout_range); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c new file mode 100644 index 0000000000..6a837e4b80 --- /dev/null +++ b/fs/crypto/crypto.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This contains encryption functions for per-file encryption. + * + * Copyright (C) 2015, Google, Inc. + * Copyright (C) 2015, Motorola Mobility + * + * Written by Michael Halcrow, 2014. + * + * Filename encryption additions + * Uday Savagaonkar, 2014 + * Encryption policy handling additions + * Ildar Muslukhov, 2014 + * Add fscrypt_pullback_bio_page() + * Jaegeuk Kim, 2015. + * + * This has not yet undergone a rigorous security audit. + * + * The usage of AES-XTS should conform to recommendations in NIST + * Special Publication 800-38E and IEEE P1619/D16. + */ + +#include <linux/pagemap.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/ratelimit.h> +#include <crypto/skcipher.h> +#include "fscrypt_private.h" + +static unsigned int num_prealloc_crypto_pages = 32; + +module_param(num_prealloc_crypto_pages, uint, 0444); +MODULE_PARM_DESC(num_prealloc_crypto_pages, + "Number of crypto pages to preallocate"); + +static mempool_t *fscrypt_bounce_page_pool = NULL; + +static struct workqueue_struct *fscrypt_read_workqueue; +static DEFINE_MUTEX(fscrypt_init_mutex); + +struct kmem_cache *fscrypt_info_cachep; + +void fscrypt_enqueue_decrypt_work(struct work_struct *work) +{ + queue_work(fscrypt_read_workqueue, work); +} +EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work); + +struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags) +{ + return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags); +} + +/** + * fscrypt_free_bounce_page() - free a ciphertext bounce page + * @bounce_page: the bounce page to free, or NULL + * + * Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(), + * or by fscrypt_alloc_bounce_page() directly. + */ +void fscrypt_free_bounce_page(struct page *bounce_page) +{ + if (!bounce_page) + return; + set_page_private(bounce_page, (unsigned long)NULL); + ClearPagePrivate(bounce_page); + mempool_free(bounce_page, fscrypt_bounce_page_pool); +} +EXPORT_SYMBOL(fscrypt_free_bounce_page); + +/* + * Generate the IV for the given logical block number within the given file. + * For filenames encryption, lblk_num == 0. + * + * Keep this in sync with fscrypt_limit_io_blocks(). fscrypt_limit_io_blocks() + * needs to know about any IV generation methods where the low bits of IV don't + * simply contain the lblk_num (e.g., IV_INO_LBLK_32). + */ +void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, + const struct fscrypt_info *ci) +{ + u8 flags = fscrypt_policy_flags(&ci->ci_policy); + + memset(iv, 0, ci->ci_mode->ivsize); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { + WARN_ON_ONCE(lblk_num > U32_MAX); + WARN_ON_ONCE(ci->ci_inode->i_ino > U32_MAX); + lblk_num |= (u64)ci->ci_inode->i_ino << 32; + } else if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { + WARN_ON_ONCE(lblk_num > U32_MAX); + lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); + } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { + memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE); + } + iv->lblk_num = cpu_to_le64(lblk_num); +} + +/* Encrypt or decrypt a single filesystem block of file contents */ +int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, + u64 lblk_num, struct page *src_page, + struct page *dest_page, unsigned int len, + unsigned int offs, gfp_t gfp_flags) +{ + union fscrypt_iv iv; + struct skcipher_request *req = NULL; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist dst, src; + struct fscrypt_info *ci = inode->i_crypt_info; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; + int res = 0; + + if (WARN_ON_ONCE(len <= 0)) + return -EINVAL; + if (WARN_ON_ONCE(len % FSCRYPT_CONTENTS_ALIGNMENT != 0)) + return -EINVAL; + + fscrypt_generate_iv(&iv, lblk_num, ci); + + req = skcipher_request_alloc(tfm, gfp_flags); + if (!req) + return -ENOMEM; + + skcipher_request_set_callback( + req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + + sg_init_table(&dst, 1); + sg_set_page(&dst, dest_page, len, offs); + sg_init_table(&src, 1); + sg_set_page(&src, src_page, len, offs); + skcipher_request_set_crypt(req, &src, &dst, len, &iv); + if (rw == FS_DECRYPT) + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); + else + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + skcipher_request_free(req); + if (res) { + fscrypt_err(inode, "%scryption failed for block %llu: %d", + (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res); + return res; + } + return 0; +} + +/** + * fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a + * pagecache page + * @page: The locked pagecache page containing the block(s) to encrypt + * @len: Total size of the block(s) to encrypt. Must be a nonzero + * multiple of the filesystem's block size. + * @offs: Byte offset within @page of the first block to encrypt. Must be + * a multiple of the filesystem's block size. + * @gfp_flags: Memory allocation flags. See details below. + * + * A new bounce page is allocated, and the specified block(s) are encrypted into + * it. In the bounce page, the ciphertext block(s) will be located at the same + * offsets at which the plaintext block(s) were located in the source page; any + * other parts of the bounce page will be left uninitialized. However, normally + * blocksize == PAGE_SIZE and the whole page is encrypted at once. + * + * This is for use by the filesystem's ->writepages() method. + * + * The bounce page allocation is mempool-backed, so it will always succeed when + * @gfp_flags includes __GFP_DIRECT_RECLAIM, e.g. when it's GFP_NOFS. However, + * only the first page of each bio can be allocated this way. To prevent + * deadlocks, for any additional pages a mask like GFP_NOWAIT must be used. + * + * Return: the new encrypted bounce page on success; an ERR_PTR() on failure + */ +struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags) + +{ + const struct inode *inode = page->mapping->host; + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + struct page *ciphertext_page; + u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) + + (offs >> blockbits); + unsigned int i; + int err; + + if (WARN_ON_ONCE(!PageLocked(page))) + return ERR_PTR(-EINVAL); + + if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) + return ERR_PTR(-EINVAL); + + ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags); + if (!ciphertext_page) + return ERR_PTR(-ENOMEM); + + for (i = offs; i < offs + len; i += blocksize, lblk_num++) { + err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, + page, ciphertext_page, + blocksize, i, gfp_flags); + if (err) { + fscrypt_free_bounce_page(ciphertext_page); + return ERR_PTR(err); + } + } + SetPagePrivate(ciphertext_page); + set_page_private(ciphertext_page, (unsigned long)page); + return ciphertext_page; +} +EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks); + +/** + * fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place + * @inode: The inode to which this block belongs + * @page: The page containing the block to encrypt + * @len: Size of block to encrypt. This must be a multiple of + * FSCRYPT_CONTENTS_ALIGNMENT. + * @offs: Byte offset within @page at which the block to encrypt begins + * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based + * number of the block within the file + * @gfp_flags: Memory allocation flags + * + * Encrypt a possibly-compressed filesystem block that is located in an + * arbitrary page, not necessarily in the original pagecache page. The @inode + * and @lblk_num must be specified, as they can't be determined from @page. + * + * Return: 0 on success; -errno on failure + */ +int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num, gfp_t gfp_flags) +{ + return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page, + len, offs, gfp_flags); +} +EXPORT_SYMBOL(fscrypt_encrypt_block_inplace); + +/** + * fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a + * pagecache folio + * @folio: The locked pagecache folio containing the block(s) to decrypt + * @len: Total size of the block(s) to decrypt. Must be a nonzero + * multiple of the filesystem's block size. + * @offs: Byte offset within @folio of the first block to decrypt. Must be + * a multiple of the filesystem's block size. + * + * The specified block(s) are decrypted in-place within the pagecache folio, + * which must still be locked and not uptodate. + * + * This is for use by the filesystem's ->readahead() method. + * + * Return: 0 on success; -errno on failure + */ +int fscrypt_decrypt_pagecache_blocks(struct folio *folio, size_t len, + size_t offs) +{ + const struct inode *inode = folio->mapping->host; + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocksize = 1 << blockbits; + u64 lblk_num = ((u64)folio->index << (PAGE_SHIFT - blockbits)) + + (offs >> blockbits); + size_t i; + int err; + + if (WARN_ON_ONCE(!folio_test_locked(folio))) + return -EINVAL; + + if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize))) + return -EINVAL; + + for (i = offs; i < offs + len; i += blocksize, lblk_num++) { + struct page *page = folio_page(folio, i >> PAGE_SHIFT); + + err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, + page, blocksize, i & ~PAGE_MASK, + GFP_NOFS); + if (err) + return err; + } + return 0; +} +EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks); + +/** + * fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place + * @inode: The inode to which this block belongs + * @page: The page containing the block to decrypt + * @len: Size of block to decrypt. This must be a multiple of + * FSCRYPT_CONTENTS_ALIGNMENT. + * @offs: Byte offset within @page at which the block to decrypt begins + * @lblk_num: Filesystem logical block number of the block, i.e. the 0-based + * number of the block within the file + * + * Decrypt a possibly-compressed filesystem block that is located in an + * arbitrary page, not necessarily in the original pagecache page. The @inode + * and @lblk_num must be specified, as they can't be determined from @page. + * + * Return: 0 on success; -errno on failure + */ +int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num) +{ + return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page, + len, offs, GFP_NOFS); +} +EXPORT_SYMBOL(fscrypt_decrypt_block_inplace); + +/** + * fscrypt_initialize() - allocate major buffers for fs encryption. + * @sb: the filesystem superblock + * + * We only call this when we start accessing encrypted files, since it + * results in memory getting allocated that wouldn't otherwise be used. + * + * Return: 0 on success; -errno on failure + */ +int fscrypt_initialize(struct super_block *sb) +{ + int err = 0; + mempool_t *pool; + + /* pairs with smp_store_release() below */ + if (likely(smp_load_acquire(&fscrypt_bounce_page_pool))) + return 0; + + /* No need to allocate a bounce page pool if this FS won't use it. */ + if (sb->s_cop->flags & FS_CFLG_OWN_PAGES) + return 0; + + mutex_lock(&fscrypt_init_mutex); + if (fscrypt_bounce_page_pool) + goto out_unlock; + + err = -ENOMEM; + pool = mempool_create_page_pool(num_prealloc_crypto_pages, 0); + if (!pool) + goto out_unlock; + /* pairs with smp_load_acquire() above */ + smp_store_release(&fscrypt_bounce_page_pool, pool); + err = 0; +out_unlock: + mutex_unlock(&fscrypt_init_mutex); + return err; +} + +void fscrypt_msg(const struct inode *inode, const char *level, + const char *fmt, ...) +{ + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + struct va_format vaf; + va_list args; + + if (!__ratelimit(&rs)) + return; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + if (inode && inode->i_ino) + printk("%sfscrypt (%s, inode %lu): %pV\n", + level, inode->i_sb->s_id, inode->i_ino, &vaf); + else if (inode) + printk("%sfscrypt (%s): %pV\n", level, inode->i_sb->s_id, &vaf); + else + printk("%sfscrypt: %pV\n", level, &vaf); + va_end(args); +} + +/** + * fscrypt_init() - Set up for fs encryption. + * + * Return: 0 on success; -errno on failure + */ +static int __init fscrypt_init(void) +{ + int err = -ENOMEM; + + /* + * Use an unbound workqueue to allow bios to be decrypted in parallel + * even when they happen to complete on the same CPU. This sacrifices + * locality, but it's worthwhile since decryption is CPU-intensive. + * + * Also use a high-priority workqueue to prioritize decryption work, + * which blocks reads from completing, over regular application tasks. + */ + fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", + WQ_UNBOUND | WQ_HIGHPRI, + num_online_cpus()); + if (!fscrypt_read_workqueue) + goto fail; + + fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); + if (!fscrypt_info_cachep) + goto fail_free_queue; + + err = fscrypt_init_keyring(); + if (err) + goto fail_free_info; + + return 0; + +fail_free_info: + kmem_cache_destroy(fscrypt_info_cachep); +fail_free_queue: + destroy_workqueue(fscrypt_read_workqueue); +fail: + return err; +} +late_initcall(fscrypt_init) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c new file mode 100644 index 0000000000..6eae3f12ad --- /dev/null +++ b/fs/crypto/fname.c @@ -0,0 +1,626 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This contains functions for filename crypto management + * + * Copyright (C) 2015, Google, Inc. + * Copyright (C) 2015, Motorola Mobility + * + * Written by Uday Savagaonkar, 2014. + * Modified by Jaegeuk Kim, 2015. + * + * This has not yet undergone a rigorous security audit. + */ + +#include <linux/namei.h> +#include <linux/scatterlist.h> +#include <crypto/hash.h> +#include <crypto/sha2.h> +#include <crypto/skcipher.h> +#include "fscrypt_private.h" + +/* + * The minimum message length (input and output length), in bytes, for all + * filenames encryption modes. Filenames shorter than this will be zero-padded + * before being encrypted. + */ +#define FSCRYPT_FNAME_MIN_MSG_LEN 16 + +/* + * struct fscrypt_nokey_name - identifier for directory entry when key is absent + * + * When userspace lists an encrypted directory without access to the key, the + * filesystem must present a unique "no-key name" for each filename that allows + * it to find the directory entry again if requested. Naively, that would just + * mean using the ciphertext filenames. However, since the ciphertext filenames + * can contain illegal characters ('\0' and '/'), they must be encoded in some + * way. We use base64url. But that can cause names to exceed NAME_MAX (255 + * bytes), so we also need to use a strong hash to abbreviate long names. + * + * The filesystem may also need another kind of hash, the "dirhash", to quickly + * find the directory entry. Since filesystems normally compute the dirhash + * over the on-disk filename (i.e. the ciphertext), it's not computable from + * no-key names that abbreviate the ciphertext using the strong hash to fit in + * NAME_MAX. It's also not computable if it's a keyed hash taken over the + * plaintext (but it may still be available in the on-disk directory entry); + * casefolded directories use this type of dirhash. At least in these cases, + * each no-key name must include the name's dirhash too. + * + * To meet all these requirements, we base64url-encode the following + * variable-length structure. It contains the dirhash, or 0's if the filesystem + * didn't provide one; up to 149 bytes of the ciphertext name; and for + * ciphertexts longer than 149 bytes, also the SHA-256 of the remaining bytes. + * + * This ensures that each no-key name contains everything needed to find the + * directory entry again, contains only legal characters, doesn't exceed + * NAME_MAX, is unambiguous unless there's a SHA-256 collision, and that we only + * take the performance hit of SHA-256 on very long filenames (which are rare). + */ +struct fscrypt_nokey_name { + u32 dirhash[2]; + u8 bytes[149]; + u8 sha256[SHA256_DIGEST_SIZE]; +}; /* 189 bytes => 252 bytes base64url-encoded, which is <= NAME_MAX (255) */ + +/* + * Decoded size of max-size no-key name, i.e. a name that was abbreviated using + * the strong hash and thus includes the 'sha256' field. This isn't simply + * sizeof(struct fscrypt_nokey_name), as the padding at the end isn't included. + */ +#define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256) + +/* Encoded size of max-size no-key name */ +#define FSCRYPT_NOKEY_NAME_MAX_ENCODED \ + FSCRYPT_BASE64URL_CHARS(FSCRYPT_NOKEY_NAME_MAX) + +static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) +{ + if (str->len == 1 && str->name[0] == '.') + return true; + + if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + return true; + + return false; +} + +/** + * fscrypt_fname_encrypt() - encrypt a filename + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets). Key must already be + * set up. + * @iname: the filename to encrypt + * @out: (output) the encrypted filename + * @olen: size of the encrypted filename. It must be at least @iname->len. + * Any extra space is filled with NUL padding before encryption. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen) +{ + struct skcipher_request *req = NULL; + DECLARE_CRYPTO_WAIT(wait); + const struct fscrypt_info *ci = inode->i_crypt_info; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; + union fscrypt_iv iv; + struct scatterlist sg; + int res; + + /* + * Copy the filename to the output buffer for encrypting in-place and + * pad it with the needed number of NUL bytes. + */ + if (WARN_ON_ONCE(olen < iname->len)) + return -ENOBUFS; + memcpy(out, iname->name, iname->len); + memset(out + iname->len, 0, olen - iname->len); + + /* Initialize the IV */ + fscrypt_generate_iv(&iv, 0, ci); + + /* Set up the encryption request */ + req = skcipher_request_alloc(tfm, GFP_NOFS); + if (!req) + return -ENOMEM; + skcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + sg_init_one(&sg, out, olen); + skcipher_request_set_crypt(req, &sg, &sg, olen, &iv); + + /* Do the encryption */ + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); + skcipher_request_free(req); + if (res < 0) { + fscrypt_err(inode, "Filename encryption failed: %d", res); + return res; + } + + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_fname_encrypt); + +/** + * fname_decrypt() - decrypt a filename + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @iname: the encrypted filename to decrypt + * @oname: (output) the decrypted filename. The caller must have allocated + * enough space for this, e.g. using fscrypt_fname_alloc_buffer(). + * + * Return: 0 on success, -errno on failure + */ +static int fname_decrypt(const struct inode *inode, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) +{ + struct skcipher_request *req = NULL; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist src_sg, dst_sg; + const struct fscrypt_info *ci = inode->i_crypt_info; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; + union fscrypt_iv iv; + int res; + + /* Allocate request */ + req = skcipher_request_alloc(tfm, GFP_NOFS); + if (!req) + return -ENOMEM; + skcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + + /* Initialize IV */ + fscrypt_generate_iv(&iv, 0, ci); + + /* Create decryption request */ + sg_init_one(&src_sg, iname->name, iname->len); + sg_init_one(&dst_sg, oname->name, oname->len); + skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv); + res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait); + skcipher_request_free(req); + if (res < 0) { + fscrypt_err(inode, "Filename decryption failed: %d", res); + return res; + } + + oname->len = strnlen(oname->name, iname->len); + return 0; +} + +static const char base64url_table[65] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_"; + +#define FSCRYPT_BASE64URL_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) + +/** + * fscrypt_base64url_encode() - base64url-encode some binary data + * @src: the binary data to encode + * @srclen: the length of @src in bytes + * @dst: (output) the base64url-encoded string. Not NUL-terminated. + * + * Encodes data using base64url encoding, i.e. the "Base 64 Encoding with URL + * and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't used, + * as it's unneeded and not required by the RFC. base64url is used instead of + * base64 to avoid the '/' character, which isn't allowed in filenames. + * + * Return: the length of the resulting base64url-encoded string in bytes. + * This will be equal to FSCRYPT_BASE64URL_CHARS(srclen). + */ +static int fscrypt_base64url_encode(const u8 *src, int srclen, char *dst) +{ + u32 ac = 0; + int bits = 0; + int i; + char *cp = dst; + + for (i = 0; i < srclen; i++) { + ac = (ac << 8) | src[i]; + bits += 8; + do { + bits -= 6; + *cp++ = base64url_table[(ac >> bits) & 0x3f]; + } while (bits >= 6); + } + if (bits) + *cp++ = base64url_table[(ac << (6 - bits)) & 0x3f]; + return cp - dst; +} + +/** + * fscrypt_base64url_decode() - base64url-decode a string + * @src: the string to decode. Doesn't need to be NUL-terminated. + * @srclen: the length of @src in bytes + * @dst: (output) the decoded binary data + * + * Decodes a string using base64url encoding, i.e. the "Base 64 Encoding with + * URL and Filename Safe Alphabet" specified by RFC 4648. '='-padding isn't + * accepted, nor are non-encoding characters such as whitespace. + * + * This implementation hasn't been optimized for performance. + * + * Return: the length of the resulting decoded binary data in bytes, + * or -1 if the string isn't a valid base64url string. + */ +static int fscrypt_base64url_decode(const char *src, int srclen, u8 *dst) +{ + u32 ac = 0; + int bits = 0; + int i; + u8 *bp = dst; + + for (i = 0; i < srclen; i++) { + const char *p = strchr(base64url_table, src[i]); + + if (p == NULL || src[i] == 0) + return -1; + ac = (ac << 6) | (p - base64url_table); + bits += 6; + if (bits >= 8) { + bits -= 8; + *bp++ = (u8)(ac >> bits); + } + } + if (ac & ((1 << bits) - 1)) + return -1; + return bp - dst; +} + +bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, + u32 orig_len, u32 max_len, + u32 *encrypted_len_ret) +{ + int padding = 4 << (fscrypt_policy_flags(policy) & + FSCRYPT_POLICY_FLAGS_PAD_MASK); + u32 encrypted_len; + + if (orig_len > max_len) + return false; + encrypted_len = max_t(u32, orig_len, FSCRYPT_FNAME_MIN_MSG_LEN); + encrypted_len = round_up(encrypted_len, padding); + *encrypted_len_ret = min(encrypted_len, max_len); + return true; +} + +/** + * fscrypt_fname_encrypted_size() - calculate length of encrypted filename + * @inode: parent inode of dentry name being encrypted. Key must + * already be set up. + * @orig_len: length of the original filename + * @max_len: maximum length to return + * @encrypted_len_ret: where calculated length should be returned (on success) + * + * Filenames that are shorter than the maximum length may have their lengths + * increased slightly by encryption, due to padding that is applied. + * + * Return: false if the orig_len is greater than max_len. Otherwise, true and + * fill out encrypted_len_ret with the length (up to max_len). + */ +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret) +{ + return __fscrypt_fname_encrypted_size(&inode->i_crypt_info->ci_policy, + orig_len, max_len, + encrypted_len_ret); +} +EXPORT_SYMBOL_GPL(fscrypt_fname_encrypted_size); + +/** + * fscrypt_fname_alloc_buffer() - allocate a buffer for presented filenames + * @max_encrypted_len: maximum length of encrypted filenames the buffer will be + * used to present + * @crypto_str: (output) buffer to allocate + * + * Allocate a buffer that is large enough to hold any decrypted or encoded + * filename (null-terminated), for the given maximum encrypted filename length. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_fname_alloc_buffer(u32 max_encrypted_len, + struct fscrypt_str *crypto_str) +{ + u32 max_presented_len = max_t(u32, FSCRYPT_NOKEY_NAME_MAX_ENCODED, + max_encrypted_len); + + crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS); + if (!crypto_str->name) + return -ENOMEM; + crypto_str->len = max_presented_len; + return 0; +} +EXPORT_SYMBOL(fscrypt_fname_alloc_buffer); + +/** + * fscrypt_fname_free_buffer() - free a buffer for presented filenames + * @crypto_str: the buffer to free + * + * Free a buffer that was allocated by fscrypt_fname_alloc_buffer(). + */ +void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) +{ + if (!crypto_str) + return; + kfree(crypto_str->name); + crypto_str->name = NULL; +} +EXPORT_SYMBOL(fscrypt_fname_free_buffer); + +/** + * fscrypt_fname_disk_to_usr() - convert an encrypted filename to + * user-presentable form + * @inode: inode of the parent directory (for regular filenames) + * or of the symlink (for symlink targets) + * @hash: first part of the name's dirhash, if applicable. This only needs to + * be provided if the filename is located in an indexed directory whose + * encryption key may be unavailable. Not needed for symlink targets. + * @minor_hash: second part of the name's dirhash, if applicable + * @iname: encrypted filename to convert. May also be "." or "..", which + * aren't actually encrypted. + * @oname: output buffer for the user-presentable filename. The caller must + * have allocated enough space for this, e.g. using + * fscrypt_fname_alloc_buffer(). + * + * If the key is available, we'll decrypt the disk name. Otherwise, we'll + * encode it for presentation in fscrypt_nokey_name format. + * See struct fscrypt_nokey_name for details. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_fname_disk_to_usr(const struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) +{ + const struct qstr qname = FSTR_TO_QSTR(iname); + struct fscrypt_nokey_name nokey_name; + u32 size; /* size of the unencoded no-key name */ + + if (fscrypt_is_dot_dotdot(&qname)) { + oname->name[0] = '.'; + oname->name[iname->len - 1] = '.'; + oname->len = iname->len; + return 0; + } + + if (iname->len < FSCRYPT_FNAME_MIN_MSG_LEN) + return -EUCLEAN; + + if (fscrypt_has_encryption_key(inode)) + return fname_decrypt(inode, iname, oname); + + /* + * Sanity check that struct fscrypt_nokey_name doesn't have padding + * between fields and that its encoded size never exceeds NAME_MAX. + */ + BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, dirhash) != + offsetof(struct fscrypt_nokey_name, bytes)); + BUILD_BUG_ON(offsetofend(struct fscrypt_nokey_name, bytes) != + offsetof(struct fscrypt_nokey_name, sha256)); + BUILD_BUG_ON(FSCRYPT_NOKEY_NAME_MAX_ENCODED > NAME_MAX); + + nokey_name.dirhash[0] = hash; + nokey_name.dirhash[1] = minor_hash; + + if (iname->len <= sizeof(nokey_name.bytes)) { + memcpy(nokey_name.bytes, iname->name, iname->len); + size = offsetof(struct fscrypt_nokey_name, bytes[iname->len]); + } else { + memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes)); + /* Compute strong hash of remaining part of name. */ + sha256(&iname->name[sizeof(nokey_name.bytes)], + iname->len - sizeof(nokey_name.bytes), + nokey_name.sha256); + size = FSCRYPT_NOKEY_NAME_MAX; + } + oname->len = fscrypt_base64url_encode((const u8 *)&nokey_name, size, + oname->name); + return 0; +} +EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); + +/** + * fscrypt_setup_filename() - prepare to search a possibly encrypted directory + * @dir: the directory that will be searched + * @iname: the user-provided filename being searched for + * @lookup: 1 if we're allowed to proceed without the key because it's + * ->lookup() or we're finding the dir_entry for deletion; 0 if we cannot + * proceed without the key because we're going to create the dir_entry. + * @fname: the filename information to be filled in + * + * Given a user-provided filename @iname, this function sets @fname->disk_name + * to the name that would be stored in the on-disk directory entry, if possible. + * If the directory is unencrypted this is simply @iname. Else, if we have the + * directory's encryption key, then @iname is the plaintext, so we encrypt it to + * get the disk_name. + * + * Else, for keyless @lookup operations, @iname should be a no-key name, so we + * decode it to get the struct fscrypt_nokey_name. Non-@lookup operations will + * be impossible in this case, so we fail them with ENOKEY. + * + * If successful, fscrypt_free_filename() must be called later to clean up. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, + int lookup, struct fscrypt_name *fname) +{ + struct fscrypt_nokey_name *nokey_name; + int ret; + + memset(fname, 0, sizeof(struct fscrypt_name)); + fname->usr_fname = iname; + + if (!IS_ENCRYPTED(dir) || fscrypt_is_dot_dotdot(iname)) { + fname->disk_name.name = (unsigned char *)iname->name; + fname->disk_name.len = iname->len; + return 0; + } + ret = fscrypt_get_encryption_info(dir, lookup); + if (ret) + return ret; + + if (fscrypt_has_encryption_key(dir)) { + if (!fscrypt_fname_encrypted_size(dir, iname->len, NAME_MAX, + &fname->crypto_buf.len)) + return -ENAMETOOLONG; + fname->crypto_buf.name = kmalloc(fname->crypto_buf.len, + GFP_NOFS); + if (!fname->crypto_buf.name) + return -ENOMEM; + + ret = fscrypt_fname_encrypt(dir, iname, fname->crypto_buf.name, + fname->crypto_buf.len); + if (ret) + goto errout; + fname->disk_name.name = fname->crypto_buf.name; + fname->disk_name.len = fname->crypto_buf.len; + return 0; + } + if (!lookup) + return -ENOKEY; + fname->is_nokey_name = true; + + /* + * We don't have the key and we are doing a lookup; decode the + * user-supplied name + */ + + if (iname->len > FSCRYPT_NOKEY_NAME_MAX_ENCODED) + return -ENOENT; + + fname->crypto_buf.name = kmalloc(FSCRYPT_NOKEY_NAME_MAX, GFP_KERNEL); + if (fname->crypto_buf.name == NULL) + return -ENOMEM; + + ret = fscrypt_base64url_decode(iname->name, iname->len, + fname->crypto_buf.name); + if (ret < (int)offsetof(struct fscrypt_nokey_name, bytes[1]) || + (ret > offsetof(struct fscrypt_nokey_name, sha256) && + ret != FSCRYPT_NOKEY_NAME_MAX)) { + ret = -ENOENT; + goto errout; + } + fname->crypto_buf.len = ret; + + nokey_name = (void *)fname->crypto_buf.name; + fname->hash = nokey_name->dirhash[0]; + fname->minor_hash = nokey_name->dirhash[1]; + if (ret != FSCRYPT_NOKEY_NAME_MAX) { + /* The full ciphertext filename is available. */ + fname->disk_name.name = nokey_name->bytes; + fname->disk_name.len = + ret - offsetof(struct fscrypt_nokey_name, bytes); + } + return 0; + +errout: + kfree(fname->crypto_buf.name); + return ret; +} +EXPORT_SYMBOL(fscrypt_setup_filename); + +/** + * fscrypt_match_name() - test whether the given name matches a directory entry + * @fname: the name being searched for + * @de_name: the name from the directory entry + * @de_name_len: the length of @de_name in bytes + * + * Normally @fname->disk_name will be set, and in that case we simply compare + * that to the name stored in the directory entry. The only exception is that + * if we don't have the key for an encrypted directory and the name we're + * looking for is very long, then we won't have the full disk_name and instead + * we'll need to match against a fscrypt_nokey_name that includes a strong hash. + * + * Return: %true if the name matches, otherwise %false. + */ +bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len) +{ + const struct fscrypt_nokey_name *nokey_name = + (const void *)fname->crypto_buf.name; + u8 digest[SHA256_DIGEST_SIZE]; + + if (likely(fname->disk_name.name)) { + if (de_name_len != fname->disk_name.len) + return false; + return !memcmp(de_name, fname->disk_name.name, de_name_len); + } + if (de_name_len <= sizeof(nokey_name->bytes)) + return false; + if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes))) + return false; + sha256(&de_name[sizeof(nokey_name->bytes)], + de_name_len - sizeof(nokey_name->bytes), digest); + return !memcmp(digest, nokey_name->sha256, sizeof(digest)); +} +EXPORT_SYMBOL_GPL(fscrypt_match_name); + +/** + * fscrypt_fname_siphash() - calculate the SipHash of a filename + * @dir: the parent directory + * @name: the filename to calculate the SipHash of + * + * Given a plaintext filename @name and a directory @dir which uses SipHash as + * its dirhash method and has had its fscrypt key set up, this function + * calculates the SipHash of that name using the directory's secret dirhash key. + * + * Return: the SipHash of @name using the hash key of @dir + */ +u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name) +{ + const struct fscrypt_info *ci = dir->i_crypt_info; + + WARN_ON_ONCE(!ci->ci_dirhash_key_initialized); + + return siphash(name->name, name->len, &ci->ci_dirhash_key); +} +EXPORT_SYMBOL_GPL(fscrypt_fname_siphash); + +/* + * Validate dentries in encrypted directories to make sure we aren't potentially + * caching stale dentries after a key has been added. + */ +int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct dentry *dir; + int err; + int valid; + + /* + * Plaintext names are always valid, since fscrypt doesn't support + * reverting to no-key names without evicting the directory's inode + * -- which implies eviction of the dentries in the directory. + */ + if (!(dentry->d_flags & DCACHE_NOKEY_NAME)) + return 1; + + /* + * No-key name; valid if the directory's key is still unavailable. + * + * Although fscrypt forbids rename() on no-key names, we still must use + * dget_parent() here rather than use ->d_parent directly. That's + * because a corrupted fs image may contain directory hard links, which + * the VFS handles by moving the directory's dentry tree in the dcache + * each time ->lookup() finds the directory and it already has a dentry + * elsewhere. Thus ->d_parent can be changing, and we must safely grab + * a reference to some ->d_parent to prevent it from being freed. + */ + + if (flags & LOOKUP_RCU) + return -ECHILD; + + dir = dget_parent(dentry); + /* + * Pass allow_unsupported=true, so that files with an unsupported + * encryption policy can be deleted. + */ + err = fscrypt_get_encryption_info(d_inode(dir), true); + valid = !fscrypt_has_encryption_key(d_inode(dir)); + dput(dir); + + if (err < 0) + return err; + + return valid; +} +EXPORT_SYMBOL_GPL(fscrypt_d_revalidate); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h new file mode 100644 index 0000000000..2d63da4863 --- /dev/null +++ b/fs/crypto/fscrypt_private.h @@ -0,0 +1,665 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * fscrypt_private.h + * + * Copyright (C) 2015, Google, Inc. + * + * Originally written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar. + * Heavily modified since then. + */ + +#ifndef _FSCRYPT_PRIVATE_H +#define _FSCRYPT_PRIVATE_H + +#include <linux/fscrypt.h> +#include <linux/siphash.h> +#include <crypto/hash.h> +#include <linux/blk-crypto.h> + +#define CONST_STRLEN(str) (sizeof(str) - 1) + +#define FSCRYPT_FILE_NONCE_SIZE 16 + +/* + * Minimum size of an fscrypt master key. Note: a longer key will be required + * if ciphers with a 256-bit security strength are used. This is just the + * absolute minimum, which applies when only 128-bit encryption is used. + */ +#define FSCRYPT_MIN_KEY_SIZE 16 + +#define FSCRYPT_CONTEXT_V1 1 +#define FSCRYPT_CONTEXT_V2 2 + +/* Keep this in sync with include/uapi/linux/fscrypt.h */ +#define FSCRYPT_MODE_MAX FSCRYPT_MODE_AES_256_HCTR2 + +struct fscrypt_context_v1 { + u8 version; /* FSCRYPT_CONTEXT_V1 */ + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; +}; + +struct fscrypt_context_v2 { + u8 version; /* FSCRYPT_CONTEXT_V2 */ + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 __reserved[4]; + u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; +}; + +/* + * fscrypt_context - the encryption context of an inode + * + * This is the on-disk equivalent of an fscrypt_policy, stored alongside each + * encrypted file usually in a hidden extended attribute. It contains the + * fields from the fscrypt_policy, in order to identify the encryption algorithm + * and key with which the file is encrypted. It also contains a nonce that was + * randomly generated by fscrypt itself; this is used as KDF input or as a tweak + * to cause different files to be encrypted differently. + */ +union fscrypt_context { + u8 version; + struct fscrypt_context_v1 v1; + struct fscrypt_context_v2 v2; +}; + +/* + * Return the size expected for the given fscrypt_context based on its version + * number, or 0 if the context version is unrecognized. + */ +static inline int fscrypt_context_size(const union fscrypt_context *ctx) +{ + switch (ctx->version) { + case FSCRYPT_CONTEXT_V1: + BUILD_BUG_ON(sizeof(ctx->v1) != 28); + return sizeof(ctx->v1); + case FSCRYPT_CONTEXT_V2: + BUILD_BUG_ON(sizeof(ctx->v2) != 40); + return sizeof(ctx->v2); + } + return 0; +} + +/* Check whether an fscrypt_context has a recognized version number and size */ +static inline bool fscrypt_context_is_valid(const union fscrypt_context *ctx, + int ctx_size) +{ + return ctx_size >= 1 && ctx_size == fscrypt_context_size(ctx); +} + +/* Retrieve the context's nonce, assuming the context was already validated */ +static inline const u8 *fscrypt_context_nonce(const union fscrypt_context *ctx) +{ + switch (ctx->version) { + case FSCRYPT_CONTEXT_V1: + return ctx->v1.nonce; + case FSCRYPT_CONTEXT_V2: + return ctx->v2.nonce; + } + WARN_ON_ONCE(1); + return NULL; +} + +union fscrypt_policy { + u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; +}; + +/* + * Return the size expected for the given fscrypt_policy based on its version + * number, or 0 if the policy version is unrecognized. + */ +static inline int fscrypt_policy_size(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return sizeof(policy->v1); + case FSCRYPT_POLICY_V2: + return sizeof(policy->v2); + } + return 0; +} + +/* Return the contents encryption mode of a valid encryption policy */ +static inline u8 +fscrypt_policy_contents_mode(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return policy->v1.contents_encryption_mode; + case FSCRYPT_POLICY_V2: + return policy->v2.contents_encryption_mode; + } + BUG(); +} + +/* Return the filenames encryption mode of a valid encryption policy */ +static inline u8 +fscrypt_policy_fnames_mode(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return policy->v1.filenames_encryption_mode; + case FSCRYPT_POLICY_V2: + return policy->v2.filenames_encryption_mode; + } + BUG(); +} + +/* Return the flags (FSCRYPT_POLICY_FLAG*) of a valid encryption policy */ +static inline u8 +fscrypt_policy_flags(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return policy->v1.flags; + case FSCRYPT_POLICY_V2: + return policy->v2.flags; + } + BUG(); +} + +/* + * For encrypted symlinks, the ciphertext length is stored at the beginning + * of the string in little-endian format. + */ +struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[]; +} __packed; + +/** + * struct fscrypt_prepared_key - a key prepared for actual encryption/decryption + * @tfm: crypto API transform object + * @blk_key: key for blk-crypto + * + * Normally only one of the fields will be non-NULL. + */ +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + struct blk_crypto_key *blk_key; +#endif +}; + +/* + * fscrypt_info - the "encryption key" for an inode + * + * When an encrypted file's key is made available, an instance of this struct is + * allocated and stored in ->i_crypt_info. Once created, it remains until the + * inode is evicted. + */ +struct fscrypt_info { + + /* The key in a form prepared for actual encryption/decryption */ + struct fscrypt_prepared_key ci_enc_key; + + /* True if ci_enc_key should be freed when this fscrypt_info is freed */ + bool ci_owns_key; + +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + /* + * True if this inode will use inline encryption (blk-crypto) instead of + * the traditional filesystem-layer encryption. + */ + bool ci_inlinecrypt; +#endif + + /* + * Encryption mode used for this inode. It corresponds to either the + * contents or filenames encryption mode, depending on the inode type. + */ + struct fscrypt_mode *ci_mode; + + /* Back-pointer to the inode */ + struct inode *ci_inode; + + /* + * The master key with which this inode was unlocked (decrypted). This + * will be NULL if the master key was found in a process-subscribed + * keyring rather than in the filesystem-level keyring. + */ + struct fscrypt_master_key *ci_master_key; + + /* + * Link in list of inodes that were unlocked with the master key. + * Only used when ->ci_master_key is set. + */ + struct list_head ci_master_key_link; + + /* + * If non-NULL, then encryption is done using the master key directly + * and ci_enc_key will equal ci_direct_key->dk_key. + */ + struct fscrypt_direct_key *ci_direct_key; + + /* + * This inode's hash key for filenames. This is a 128-bit SipHash-2-4 + * key. This is only set for directories that use a keyed dirhash over + * the plaintext filenames -- currently just casefolded directories. + */ + siphash_key_t ci_dirhash_key; + bool ci_dirhash_key_initialized; + + /* The encryption policy used by this inode */ + union fscrypt_policy ci_policy; + + /* This inode's nonce, copied from the fscrypt_context */ + u8 ci_nonce[FSCRYPT_FILE_NONCE_SIZE]; + + /* Hashed inode number. Only set for IV_INO_LBLK_32 */ + u32 ci_hashed_ino; +}; + +typedef enum { + FS_DECRYPT = 0, + FS_ENCRYPT, +} fscrypt_direction_t; + +/* crypto.c */ +extern struct kmem_cache *fscrypt_info_cachep; +int fscrypt_initialize(struct super_block *sb); +int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, + u64 lblk_num, struct page *src_page, + struct page *dest_page, unsigned int len, + unsigned int offs, gfp_t gfp_flags); +struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags); + +void __printf(3, 4) __cold +fscrypt_msg(const struct inode *inode, const char *level, const char *fmt, ...); + +#define fscrypt_warn(inode, fmt, ...) \ + fscrypt_msg((inode), KERN_WARNING, fmt, ##__VA_ARGS__) +#define fscrypt_err(inode, fmt, ...) \ + fscrypt_msg((inode), KERN_ERR, fmt, ##__VA_ARGS__) + +#define FSCRYPT_MAX_IV_SIZE 32 + +union fscrypt_iv { + struct { + /* logical block number within the file */ + __le64 lblk_num; + + /* per-file nonce; only set in DIRECT_KEY mode */ + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; + }; + u8 raw[FSCRYPT_MAX_IV_SIZE]; + __le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)]; +}; + +void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, + const struct fscrypt_info *ci); + +/* fname.c */ +bool __fscrypt_fname_encrypted_size(const union fscrypt_policy *policy, + u32 orig_len, u32 max_len, + u32 *encrypted_len_ret); + +/* hkdf.c */ +struct fscrypt_hkdf { + struct crypto_shash *hmac_tfm; +}; + +int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, + unsigned int master_key_size); + +/* + * The list of contexts in which fscrypt uses HKDF. These values are used as + * the first byte of the HKDF application-specific info string to guarantee that + * info strings are never repeated between contexts. This ensures that all HKDF + * outputs are unique and cryptographically isolated, i.e. knowledge of one + * output doesn't reveal another. + */ +#define HKDF_CONTEXT_KEY_IDENTIFIER 1 /* info=<empty> */ +#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 /* info=file_nonce */ +#define HKDF_CONTEXT_DIRECT_KEY 3 /* info=mode_num */ +#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 /* info=mode_num||fs_uuid */ +#define HKDF_CONTEXT_DIRHASH_KEY 5 /* info=file_nonce */ +#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 /* info=mode_num||fs_uuid */ +#define HKDF_CONTEXT_INODE_HASH_KEY 7 /* info=<empty> */ + +int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen); + +void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); + +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT +int fscrypt_select_encryption_impl(struct fscrypt_info *ci); + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return ci->ci_inlinecrypt; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci); + +void fscrypt_destroy_inline_crypt_key(struct super_block *sb, + struct fscrypt_prepared_key *prep_key); + +/* + * Check whether the crypto transform or blk-crypto key has been allocated in + * @prep_key, depending on which encryption implementation the file will use. + */ +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + /* + * The two smp_load_acquire()'s here pair with the smp_store_release()'s + * in fscrypt_prepare_inline_crypt_key() and fscrypt_prepare_key(). + * I.e., in some cases (namely, if this prep_key is a per-mode + * encryption key) another task can publish blk_key or tfm concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + if (fscrypt_using_inline_encryption(ci)) + return smp_load_acquire(&prep_key->blk_key) != NULL; + return smp_load_acquire(&prep_key->tfm) != NULL; +} + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + return 0; +} + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return false; +} + +static inline int +fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + WARN_ON_ONCE(1); + return -EOPNOTSUPP; +} + +static inline void +fscrypt_destroy_inline_crypt_key(struct super_block *sb, + struct fscrypt_prepared_key *prep_key) +{ +} + +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + return smp_load_acquire(&prep_key->tfm) != NULL; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +/* keyring.c */ + +/* + * fscrypt_master_key_secret - secret key material of an in-use master key + */ +struct fscrypt_master_key_secret { + + /* + * For v2 policy keys: HKDF context keyed by this master key. + * For v1 policy keys: not set (hkdf.hmac_tfm == NULL). + */ + struct fscrypt_hkdf hkdf; + + /* + * Size of the raw key in bytes. This remains set even if ->raw was + * zeroized due to no longer being needed. I.e. we still remember the + * size of the key even if we don't need to remember the key itself. + */ + u32 size; + + /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */ + u8 raw[FSCRYPT_MAX_KEY_SIZE]; + +} __randomize_layout; + +/* + * fscrypt_master_key - an in-use master key + * + * This represents a master encryption key which has been added to the + * filesystem and can be used to "unlock" the encrypted files which were + * encrypted with it. + */ +struct fscrypt_master_key { + + /* + * Link in ->s_master_keys->key_hashtable. + * Only valid if ->mk_active_refs > 0. + */ + struct hlist_node mk_node; + + /* Semaphore that protects ->mk_secret and ->mk_users */ + struct rw_semaphore mk_sem; + + /* + * Active and structural reference counts. An active ref guarantees + * that the struct continues to exist, continues to be in the keyring + * ->s_master_keys, and that any embedded subkeys (e.g. + * ->mk_direct_keys) that have been prepared continue to exist. + * A structural ref only guarantees that the struct continues to exist. + * + * There is one active ref associated with ->mk_secret being present, + * and one active ref for each inode in ->mk_decrypted_inodes. + * + * There is one structural ref associated with the active refcount being + * nonzero. Finding a key in the keyring also takes a structural ref, + * which is then held temporarily while the key is operated on. + */ + refcount_t mk_active_refs; + refcount_t mk_struct_refs; + + struct rcu_head mk_rcu_head; + + /* + * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is + * executed, this is wiped and no new inodes can be unlocked with this + * key; however, there may still be inodes in ->mk_decrypted_inodes + * which could not be evicted. As long as some inodes still remain, + * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or + * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again. + * + * While ->mk_secret is present, one ref in ->mk_active_refs is held. + * + * Locking: protected by ->mk_sem. The manipulation of ->mk_active_refs + * associated with this field is protected by ->mk_sem as well. + */ + struct fscrypt_master_key_secret mk_secret; + + /* + * For v1 policy keys: an arbitrary key descriptor which was assigned by + * userspace (->descriptor). + * + * For v2 policy keys: a cryptographic hash of this key (->identifier). + */ + struct fscrypt_key_specifier mk_spec; + + /* + * Keyring which contains a key of type 'key_type_fscrypt_user' for each + * user who has added this key. Normally each key will be added by just + * one user, but it's possible that multiple users share a key, and in + * that case we need to keep track of those users so that one user can't + * remove the key before the others want it removed too. + * + * This is NULL for v1 policy keys; those can only be added by root. + * + * Locking: protected by ->mk_sem. (We don't just rely on the keyrings + * subsystem semaphore ->mk_users->sem, as we need support for atomic + * search+insert along with proper synchronization with ->mk_secret.) + */ + struct key *mk_users; + + /* + * List of inodes that were unlocked using this key. This allows the + * inodes to be evicted efficiently if the key is removed. + */ + struct list_head mk_decrypted_inodes; + spinlock_t mk_decrypted_inodes_lock; + + /* + * Per-mode encryption keys for the various types of encryption policies + * that use them. Allocated and derived on-demand. + */ + struct fscrypt_prepared_key mk_direct_keys[FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[FSCRYPT_MODE_MAX + 1]; + + /* Hash key for inode numbers. Initialized only when needed. */ + siphash_key_t mk_ino_hash_key; + bool mk_ino_hash_key_initialized; + +} __randomize_layout; + +static inline bool +is_master_key_secret_present(const struct fscrypt_master_key_secret *secret) +{ + /* + * The READ_ONCE() is only necessary for fscrypt_drop_inode(). + * fscrypt_drop_inode() runs in atomic context, so it can't take the key + * semaphore and thus 'secret' can change concurrently which would be a + * data race. But fscrypt_drop_inode() only need to know whether the + * secret *was* present at the time of check, so READ_ONCE() suffices. + */ + return READ_ONCE(secret->size) != 0; +} + +static inline const char *master_key_spec_type( + const struct fscrypt_key_specifier *spec) +{ + switch (spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + return "descriptor"; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + return "identifier"; + } + return "[unknown]"; +} + +static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) +{ + switch (spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + return FSCRYPT_KEY_DESCRIPTOR_SIZE; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + return FSCRYPT_KEY_IDENTIFIER_SIZE; + } + return 0; +} + +void fscrypt_put_master_key(struct fscrypt_master_key *mk); + +void fscrypt_put_master_key_activeref(struct super_block *sb, + struct fscrypt_master_key *mk); + +struct fscrypt_master_key * +fscrypt_find_master_key(struct super_block *sb, + const struct fscrypt_key_specifier *mk_spec); + +int fscrypt_get_test_dummy_key_identifier( + u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]); + +int fscrypt_add_test_dummy_key(struct super_block *sb, + struct fscrypt_key_specifier *key_spec); + +int fscrypt_verify_key_added(struct super_block *sb, + const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]); + +int __init fscrypt_init_keyring(void); + +/* keysetup.c */ + +struct fscrypt_mode { + const char *friendly_name; + const char *cipher_str; + int keysize; /* key size in bytes */ + int security_strength; /* security strength in bytes */ + int ivsize; /* IV size in bytes */ + int logged_cryptoapi_impl; + int logged_blk_crypto_native; + int logged_blk_crypto_fallback; + enum blk_crypto_mode_num blk_crypto_mode; +}; + +extern struct fscrypt_mode fscrypt_modes[]; + +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci); + +void fscrypt_destroy_prepared_key(struct super_block *sb, + struct fscrypt_prepared_key *prep_key); + +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key); + +int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk); + +void fscrypt_hash_inode_number(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk); + +int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported); + +/** + * fscrypt_require_key() - require an inode's encryption key + * @inode: the inode we need the key for + * + * If the inode is encrypted, set up its encryption key if not already done. + * Then require that the key be present and return -ENOKEY otherwise. + * + * No locks are needed, and the key will live as long as the struct inode --- so + * it won't go away from under you. + * + * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code + * if a problem occurred while setting up the encryption key. + */ +static inline int fscrypt_require_key(struct inode *inode) +{ + if (IS_ENCRYPTED(inode)) { + int err = fscrypt_get_encryption_info(inode, false); + + if (err) + return err; + if (!fscrypt_has_encryption_key(inode)) + return -ENOKEY; + } + return 0; +} + +/* keysetup_v1.c */ + +void fscrypt_put_direct_key(struct fscrypt_direct_key *dk); + +int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, + const u8 *raw_master_key); + +int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci); + +/* policy.c */ + +bool fscrypt_policies_equal(const union fscrypt_policy *policy1, + const union fscrypt_policy *policy2); +int fscrypt_policy_to_key_spec(const union fscrypt_policy *policy, + struct fscrypt_key_specifier *key_spec); +const union fscrypt_policy *fscrypt_get_dummy_policy(struct super_block *sb); +bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, + const struct inode *inode); +int fscrypt_policy_from_context(union fscrypt_policy *policy_u, + const union fscrypt_context *ctx_u, + int ctx_size); +const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir); + +#endif /* _FSCRYPT_PRIVATE_H */ diff --git a/fs/crypto/hkdf.c b/fs/crypto/hkdf.c new file mode 100644 index 0000000000..5a384dad2c --- /dev/null +++ b/fs/crypto/hkdf.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Implementation of HKDF ("HMAC-based Extract-and-Expand Key Derivation + * Function"), aka RFC 5869. See also the original paper (Krawczyk 2010): + * "Cryptographic Extraction and Key Derivation: The HKDF Scheme". + * + * This is used to derive keys from the fscrypt master keys. + * + * Copyright 2019 Google LLC + */ + +#include <crypto/hash.h> +#include <crypto/sha2.h> + +#include "fscrypt_private.h" + +/* + * HKDF supports any unkeyed cryptographic hash algorithm, but fscrypt uses + * SHA-512 because it is well-established, secure, and reasonably efficient. + * + * HKDF-SHA256 was also considered, as its 256-bit security strength would be + * sufficient here. A 512-bit security strength is "nice to have", though. + * Also, on 64-bit CPUs, SHA-512 is usually just as fast as SHA-256. In the + * common case of deriving an AES-256-XTS key (512 bits), that can result in + * HKDF-SHA512 being much faster than HKDF-SHA256, as the longer digest size of + * SHA-512 causes HKDF-Expand to only need to do one iteration rather than two. + */ +#define HKDF_HMAC_ALG "hmac(sha512)" +#define HKDF_HASHLEN SHA512_DIGEST_SIZE + +/* + * HKDF consists of two steps: + * + * 1. HKDF-Extract: extract a pseudorandom key of length HKDF_HASHLEN bytes from + * the input keying material and optional salt. + * 2. HKDF-Expand: expand the pseudorandom key into output keying material of + * any length, parameterized by an application-specific info string. + * + * HKDF-Extract can be skipped if the input is already a pseudorandom key of + * length HKDF_HASHLEN bytes. However, cipher modes other than AES-256-XTS take + * shorter keys, and we don't want to force users of those modes to provide + * unnecessarily long master keys. Thus fscrypt still does HKDF-Extract. No + * salt is used, since fscrypt master keys should already be pseudorandom and + * there's no way to persist a random salt per master key from kernel mode. + */ + +/* HKDF-Extract (RFC 5869 section 2.2), unsalted */ +static int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm, + unsigned int ikmlen, u8 prk[HKDF_HASHLEN]) +{ + static const u8 default_salt[HKDF_HASHLEN]; + int err; + + err = crypto_shash_setkey(hmac_tfm, default_salt, HKDF_HASHLEN); + if (err) + return err; + + return crypto_shash_tfm_digest(hmac_tfm, ikm, ikmlen, prk); +} + +/* + * Compute HKDF-Extract using the given master key as the input keying material, + * and prepare an HMAC transform object keyed by the resulting pseudorandom key. + * + * Afterwards, the keyed HMAC transform object can be used for HKDF-Expand many + * times without having to recompute HKDF-Extract each time. + */ +int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, + unsigned int master_key_size) +{ + struct crypto_shash *hmac_tfm; + u8 prk[HKDF_HASHLEN]; + int err; + + hmac_tfm = crypto_alloc_shash(HKDF_HMAC_ALG, 0, 0); + if (IS_ERR(hmac_tfm)) { + fscrypt_err(NULL, "Error allocating " HKDF_HMAC_ALG ": %ld", + PTR_ERR(hmac_tfm)); + return PTR_ERR(hmac_tfm); + } + + if (WARN_ON_ONCE(crypto_shash_digestsize(hmac_tfm) != sizeof(prk))) { + err = -EINVAL; + goto err_free_tfm; + } + + err = hkdf_extract(hmac_tfm, master_key, master_key_size, prk); + if (err) + goto err_free_tfm; + + err = crypto_shash_setkey(hmac_tfm, prk, sizeof(prk)); + if (err) + goto err_free_tfm; + + hkdf->hmac_tfm = hmac_tfm; + goto out; + +err_free_tfm: + crypto_free_shash(hmac_tfm); +out: + memzero_explicit(prk, sizeof(prk)); + return err; +} + +/* + * HKDF-Expand (RFC 5869 section 2.3). This expands the pseudorandom key, which + * was already keyed into 'hkdf->hmac_tfm' by fscrypt_init_hkdf(), into 'okmlen' + * bytes of output keying material parameterized by the application-specific + * 'info' of length 'infolen' bytes, prefixed by "fscrypt\0" and the 'context' + * byte. This is thread-safe and may be called by multiple threads in parallel. + * + * ('context' isn't part of the HKDF specification; it's just a prefix fscrypt + * adds to its application-specific info strings to guarantee that it doesn't + * accidentally repeat an info string when using HKDF for different purposes.) + */ +int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen) +{ + SHASH_DESC_ON_STACK(desc, hkdf->hmac_tfm); + u8 prefix[9]; + unsigned int i; + int err; + const u8 *prev = NULL; + u8 counter = 1; + u8 tmp[HKDF_HASHLEN]; + + if (WARN_ON_ONCE(okmlen > 255 * HKDF_HASHLEN)) + return -EINVAL; + + desc->tfm = hkdf->hmac_tfm; + + memcpy(prefix, "fscrypt\0", 8); + prefix[8] = context; + + for (i = 0; i < okmlen; i += HKDF_HASHLEN) { + + err = crypto_shash_init(desc); + if (err) + goto out; + + if (prev) { + err = crypto_shash_update(desc, prev, HKDF_HASHLEN); + if (err) + goto out; + } + + err = crypto_shash_update(desc, prefix, sizeof(prefix)); + if (err) + goto out; + + err = crypto_shash_update(desc, info, infolen); + if (err) + goto out; + + BUILD_BUG_ON(sizeof(counter) != 1); + if (okmlen - i < HKDF_HASHLEN) { + err = crypto_shash_finup(desc, &counter, 1, tmp); + if (err) + goto out; + memcpy(&okm[i], tmp, okmlen - i); + memzero_explicit(tmp, sizeof(tmp)); + } else { + err = crypto_shash_finup(desc, &counter, 1, &okm[i]); + if (err) + goto out; + } + counter++; + prev = &okm[i]; + } + err = 0; +out: + if (unlikely(err)) + memzero_explicit(okm, okmlen); /* so caller doesn't need to */ + shash_desc_zero(desc); + return err; +} + +void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf) +{ + crypto_free_shash(hkdf->hmac_tfm); +} diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c new file mode 100644 index 0000000000..6238dbcadc --- /dev/null +++ b/fs/crypto/hooks.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * fs/crypto/hooks.c + * + * Encryption hooks for higher-level filesystem operations. + */ + +#include "fscrypt_private.h" + +/** + * fscrypt_file_open() - prepare to open a possibly-encrypted regular file + * @inode: the inode being opened + * @filp: the struct file being set up + * + * Currently, an encrypted regular file can only be opened if its encryption key + * is available; access to the raw encrypted contents is not supported. + * Therefore, we first set up the inode's encryption key (if not already done) + * and return an error if it's unavailable. + * + * We also verify that if the parent directory (from the path via which the file + * is being opened) is encrypted, then the inode being opened uses the same + * encryption policy. This is needed as part of the enforcement that all files + * in an encrypted directory tree use the same encryption policy, as a + * protection against certain types of offline attacks. Note that this check is + * needed even when opening an *unencrypted* file, since it's forbidden to have + * an unencrypted file in an encrypted directory. + * + * Return: 0 on success, -ENOKEY if the key is missing, or another -errno code + */ +int fscrypt_file_open(struct inode *inode, struct file *filp) +{ + int err; + struct dentry *dir; + + err = fscrypt_require_key(inode); + if (err) + return err; + + dir = dget_parent(file_dentry(filp)); + if (IS_ENCRYPTED(d_inode(dir)) && + !fscrypt_has_permitted_context(d_inode(dir), inode)) { + fscrypt_warn(inode, + "Inconsistent encryption context (parent directory: %lu)", + d_inode(dir)->i_ino); + err = -EPERM; + } + dput(dir); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_file_open); + +int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, + struct dentry *dentry) +{ + if (fscrypt_is_nokey_name(dentry)) + return -ENOKEY; + /* + * We don't need to separately check that the directory inode's key is + * available, as it's implied by the dentry not being a no-key name. + */ + + if (!fscrypt_has_permitted_context(dir, inode)) + return -EXDEV; + + return 0; +} +EXPORT_SYMBOL_GPL(__fscrypt_prepare_link); + +int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) +{ + if (fscrypt_is_nokey_name(old_dentry) || + fscrypt_is_nokey_name(new_dentry)) + return -ENOKEY; + /* + * We don't need to separately check that the directory inodes' keys are + * available, as it's implied by the dentries not being no-key names. + */ + + if (old_dir != new_dir) { + if (IS_ENCRYPTED(new_dir) && + !fscrypt_has_permitted_context(new_dir, + d_inode(old_dentry))) + return -EXDEV; + + if ((flags & RENAME_EXCHANGE) && + IS_ENCRYPTED(old_dir) && + !fscrypt_has_permitted_context(old_dir, + d_inode(new_dentry))) + return -EXDEV; + } + return 0; +} +EXPORT_SYMBOL_GPL(__fscrypt_prepare_rename); + +int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct fscrypt_name *fname) +{ + int err = fscrypt_setup_filename(dir, &dentry->d_name, 1, fname); + + if (err && err != -ENOENT) + return err; + + if (fname->is_nokey_name) { + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_NOKEY_NAME; + spin_unlock(&dentry->d_lock); + } + return err; +} +EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup); + +/** + * fscrypt_prepare_lookup_partial() - prepare lookup without filename setup + * @dir: the encrypted directory being searched + * @dentry: the dentry being looked up in @dir + * + * This function should be used by the ->lookup and ->atomic_open methods of + * filesystems that handle filename encryption and no-key name encoding + * themselves and thus can't use fscrypt_prepare_lookup(). Like + * fscrypt_prepare_lookup(), this will try to set up the directory's encryption + * key and will set DCACHE_NOKEY_NAME on the dentry if the key is unavailable. + * However, this function doesn't set up a struct fscrypt_name for the filename. + * + * Return: 0 on success; -errno on error. Note that the encryption key being + * unavailable is not considered an error. It is also not an error if + * the encryption policy is unsupported by this kernel; that is treated + * like the key being unavailable, so that files can still be deleted. + */ +int fscrypt_prepare_lookup_partial(struct inode *dir, struct dentry *dentry) +{ + int err = fscrypt_get_encryption_info(dir, true); + + if (!err && !fscrypt_has_encryption_key(dir)) { + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_NOKEY_NAME; + spin_unlock(&dentry->d_lock); + } + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_prepare_lookup_partial); + +int __fscrypt_prepare_readdir(struct inode *dir) +{ + return fscrypt_get_encryption_info(dir, true); +} +EXPORT_SYMBOL_GPL(__fscrypt_prepare_readdir); + +int __fscrypt_prepare_setattr(struct dentry *dentry, struct iattr *attr) +{ + if (attr->ia_valid & ATTR_SIZE) + return fscrypt_require_key(d_inode(dentry)); + return 0; +} +EXPORT_SYMBOL_GPL(__fscrypt_prepare_setattr); + +/** + * fscrypt_prepare_setflags() - prepare to change flags with FS_IOC_SETFLAGS + * @inode: the inode on which flags are being changed + * @oldflags: the old flags + * @flags: the new flags + * + * The caller should be holding i_rwsem for write. + * + * Return: 0 on success; -errno if the flags change isn't allowed or if + * another error occurs. + */ +int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, unsigned int flags) +{ + struct fscrypt_info *ci; + struct fscrypt_master_key *mk; + int err; + + /* + * When the CASEFOLD flag is set on an encrypted directory, we must + * derive the secret key needed for the dirhash. This is only possible + * if the directory uses a v2 encryption policy. + */ + if (IS_ENCRYPTED(inode) && (flags & ~oldflags & FS_CASEFOLD_FL)) { + err = fscrypt_require_key(inode); + if (err) + return err; + ci = inode->i_crypt_info; + if (ci->ci_policy.version != FSCRYPT_POLICY_V2) + return -EINVAL; + mk = ci->ci_master_key; + down_read(&mk->mk_sem); + if (is_master_key_secret_present(&mk->mk_secret)) + err = fscrypt_derive_dirhash_key(ci, mk); + else + err = -ENOKEY; + up_read(&mk->mk_sem); + return err; + } + return 0; +} + +/** + * fscrypt_prepare_symlink() - prepare to create a possibly-encrypted symlink + * @dir: directory in which the symlink is being created + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @max_len: space the filesystem has available to store the symlink target + * @disk_link: (out) the on-disk symlink target being prepared + * + * This function computes the size the symlink target will require on-disk, + * stores it in @disk_link->len, and validates it against @max_len. An + * encrypted symlink may be longer than the original. + * + * Additionally, @disk_link->name is set to @target if the symlink will be + * unencrypted, but left NULL if the symlink will be encrypted. For encrypted + * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the + * on-disk target later. (The reason for the two-step process is that some + * filesystems need to know the size of the symlink target before creating the + * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) + * + * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, + * -ENOKEY if the encryption key is missing, or another -errno code if a problem + * occurred while setting up the encryption key. + */ +int fscrypt_prepare_symlink(struct inode *dir, const char *target, + unsigned int len, unsigned int max_len, + struct fscrypt_str *disk_link) +{ + const union fscrypt_policy *policy; + + /* + * To calculate the size of the encrypted symlink target we need to know + * the amount of NUL padding, which is determined by the flags set in + * the encryption policy which will be inherited from the directory. + */ + policy = fscrypt_policy_to_inherit(dir); + if (policy == NULL) { + /* Not encrypted */ + disk_link->name = (unsigned char *)target; + disk_link->len = len + 1; + if (disk_link->len > max_len) + return -ENAMETOOLONG; + return 0; + } + if (IS_ERR(policy)) + return PTR_ERR(policy); + + /* + * Calculate the size of the encrypted symlink and verify it won't + * exceed max_len. Note that for historical reasons, encrypted symlink + * targets are prefixed with the ciphertext length, despite this + * actually being redundant with i_size. This decreases by 2 bytes the + * longest symlink target we can accept. + * + * We could recover 1 byte by not counting a null terminator, but + * counting it (even though it is meaningless for ciphertext) is simpler + * for now since filesystems will assume it is there and subtract it. + */ + if (!__fscrypt_fname_encrypted_size(policy, len, + max_len - sizeof(struct fscrypt_symlink_data) - 1, + &disk_link->len)) + return -ENAMETOOLONG; + disk_link->len += sizeof(struct fscrypt_symlink_data) + 1; + + disk_link->name = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_prepare_symlink); + +int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, struct fscrypt_str *disk_link) +{ + int err; + struct qstr iname = QSTR_INIT(target, len); + struct fscrypt_symlink_data *sd; + unsigned int ciphertext_len; + + /* + * fscrypt_prepare_new_inode() should have already set up the new + * symlink inode's encryption key. We don't wait until now to do it, + * since we may be in a filesystem transaction now. + */ + if (WARN_ON_ONCE(!fscrypt_has_encryption_key(inode))) + return -ENOKEY; + + if (disk_link->name) { + /* filesystem-provided buffer */ + sd = (struct fscrypt_symlink_data *)disk_link->name; + } else { + sd = kmalloc(disk_link->len, GFP_NOFS); + if (!sd) + return -ENOMEM; + } + ciphertext_len = disk_link->len - sizeof(*sd) - 1; + sd->len = cpu_to_le16(ciphertext_len); + + err = fscrypt_fname_encrypt(inode, &iname, sd->encrypted_path, + ciphertext_len); + if (err) + goto err_free_sd; + + /* + * Null-terminating the ciphertext doesn't make sense, but we still + * count the null terminator in the length, so we might as well + * initialize it just in case the filesystem writes it out. + */ + sd->encrypted_path[ciphertext_len] = '\0'; + + /* Cache the plaintext symlink target for later use by get_link() */ + err = -ENOMEM; + inode->i_link = kmemdup(target, len + 1, GFP_NOFS); + if (!inode->i_link) + goto err_free_sd; + + if (!disk_link->name) + disk_link->name = (unsigned char *)sd; + return 0; + +err_free_sd: + if (!disk_link->name) + kfree(sd); + return err; +} +EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink); + +/** + * fscrypt_get_symlink() - get the target of an encrypted symlink + * @inode: the symlink inode + * @caddr: the on-disk contents of the symlink + * @max_size: size of @caddr buffer + * @done: if successful, will be set up to free the returned target if needed + * + * If the symlink's encryption key is available, we decrypt its target. + * Otherwise, we encode its target for presentation. + * + * This may sleep, so the filesystem must have dropped out of RCU mode already. + * + * Return: the presentable symlink target or an ERR_PTR() + */ +const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done) +{ + const struct fscrypt_symlink_data *sd; + struct fscrypt_str cstr, pstr; + bool has_key; + int err; + + /* This is for encrypted symlinks only */ + if (WARN_ON_ONCE(!IS_ENCRYPTED(inode))) + return ERR_PTR(-EINVAL); + + /* If the decrypted target is already cached, just return it. */ + pstr.name = READ_ONCE(inode->i_link); + if (pstr.name) + return pstr.name; + + /* + * Try to set up the symlink's encryption key, but we can continue + * regardless of whether the key is available or not. + */ + err = fscrypt_get_encryption_info(inode, false); + if (err) + return ERR_PTR(err); + has_key = fscrypt_has_encryption_key(inode); + + /* + * For historical reasons, encrypted symlink targets are prefixed with + * the ciphertext length, even though this is redundant with i_size. + */ + + if (max_size < sizeof(*sd) + 1) + return ERR_PTR(-EUCLEAN); + sd = caddr; + cstr.name = (unsigned char *)sd->encrypted_path; + cstr.len = le16_to_cpu(sd->len); + + if (cstr.len == 0) + return ERR_PTR(-EUCLEAN); + + if (cstr.len + sizeof(*sd) > max_size) + return ERR_PTR(-EUCLEAN); + + err = fscrypt_fname_alloc_buffer(cstr.len, &pstr); + if (err) + return ERR_PTR(err); + + err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); + if (err) + goto err_kfree; + + err = -EUCLEAN; + if (pstr.name[0] == '\0') + goto err_kfree; + + pstr.name[pstr.len] = '\0'; + + /* + * Cache decrypted symlink targets in i_link for later use. Don't cache + * symlink targets encoded without the key, since those become outdated + * once the key is added. This pairs with the READ_ONCE() above and in + * the VFS path lookup code. + */ + if (!has_key || + cmpxchg_release(&inode->i_link, NULL, pstr.name) != NULL) + set_delayed_call(done, kfree_link, pstr.name); + + return pstr.name; + +err_kfree: + kfree(pstr.name); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(fscrypt_get_symlink); + +/** + * fscrypt_symlink_getattr() - set the correct st_size for encrypted symlinks + * @path: the path for the encrypted symlink being queried + * @stat: the struct being filled with the symlink's attributes + * + * Override st_size of encrypted symlinks to be the length of the decrypted + * symlink target (or the no-key encoded symlink target, if the key is + * unavailable) rather than the length of the encrypted symlink target. This is + * necessary for st_size to match the symlink target that userspace actually + * sees. POSIX requires this, and some userspace programs depend on it. + * + * This requires reading the symlink target from disk if needed, setting up the + * inode's encryption key if possible, and then decrypting or encoding the + * symlink target. This makes lstat() more heavyweight than is normally the + * case. However, decrypted symlink targets will be cached in ->i_link, so + * usually the symlink won't have to be read and decrypted again later if/when + * it is actually followed, readlink() is called, or lstat() is called again. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat) +{ + struct dentry *dentry = path->dentry; + struct inode *inode = d_inode(dentry); + const char *link; + DEFINE_DELAYED_CALL(done); + + /* + * To get the symlink target that userspace will see (whether it's the + * decrypted target or the no-key encoded target), we can just get it in + * the same way the VFS does during path resolution and readlink(). + */ + link = READ_ONCE(inode->i_link); + if (!link) { + link = inode->i_op->get_link(dentry, inode, &done); + if (IS_ERR(link)) + return PTR_ERR(link); + } + stat->size = strlen(link); + do_delayed_call(&done); + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_symlink_getattr); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c new file mode 100644 index 0000000000..8bfb3ce864 --- /dev/null +++ b/fs/crypto/inline_crypt.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inline encryption support for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * With "inline encryption", the block layer handles the decryption/encryption + * as part of the bio, instead of the filesystem doing the crypto itself via + * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still + * provides the key and IV to use. + */ + +#include <linux/blk-crypto.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> +#include <linux/sched/mm.h> +#include <linux/slab.h> +#include <linux/uio.h> + +#include "fscrypt_private.h" + +static struct block_device **fscrypt_get_devices(struct super_block *sb, + unsigned int *num_devs) +{ + struct block_device **devs; + + if (sb->s_cop->get_devices) { + devs = sb->s_cop->get_devices(sb, num_devs); + if (devs) + return devs; + } + devs = kmalloc(sizeof(*devs), GFP_KERNEL); + if (!devs) + return ERR_PTR(-ENOMEM); + devs[0] = sb->s_bdev; + *num_devs = 1; + return devs; +} + +static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) +{ + struct super_block *sb = ci->ci_inode->i_sb; + unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); + int ino_bits = 64, lblk_bits = 64; + + if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return offsetofend(union fscrypt_iv, nonce); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) + return sizeof(__le64); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + return sizeof(__le32); + + /* Default case: IVs are just the file logical block number */ + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + return DIV_ROUND_UP(lblk_bits, 8); +} + +/* + * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback + * for an encryption mode for the first time. This is the blk-crypto + * counterpart to the message logged when starting to use the crypto API for the + * first time. A limitation is that these messages don't convey which specific + * filesystems or files are using each implementation. However, *usually* + * systems use just one implementation per mode, which makes these messages + * helpful for debugging problems where the "wrong" implementation is used. + */ +static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode, + struct block_device **devs, + unsigned int num_devs, + const struct blk_crypto_config *cfg) +{ + unsigned int i; + + for (i = 0; i < num_devs; i++) { + if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) || + blk_crypto_config_supported_natively(devs[i], cfg)) { + if (!xchg(&mode->logged_blk_crypto_native, 1)) + pr_info("fscrypt: %s using blk-crypto (native)\n", + mode->friendly_name); + } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) { + pr_info("fscrypt: %s using blk-crypto-fallback\n", + mode->friendly_name); + } + } +} + +/* Enable inline encryption for this file if supported. */ +int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + struct blk_crypto_config crypto_cfg; + struct block_device **devs; + unsigned int num_devs; + unsigned int i; + + /* The file must need contents encryption, not filenames encryption */ + if (!S_ISREG(inode->i_mode)) + return 0; + + /* The crypto mode must have a blk-crypto counterpart */ + if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return 0; + + /* The filesystem must be mounted with -o inlinecrypt */ + if (!(sb->s_flags & SB_INLINECRYPT)) + return 0; + + /* + * When a page contains multiple logically contiguous filesystem blocks, + * some filesystem code only calls fscrypt_mergeable_bio() for the first + * block in the page. This is fine for most of fscrypt's IV generation + * strategies, where contiguous blocks imply contiguous IVs. But it + * doesn't work with IV_INO_LBLK_32. For now, simply exclude + * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. + */ + if ((fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + sb->s_blocksize != PAGE_SIZE) + return 0; + + /* + * On all the filesystem's block devices, blk-crypto must support the + * crypto configuration that the file would use. + */ + crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; + crypto_cfg.data_unit_size = sb->s_blocksize; + crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); + + devs = fscrypt_get_devices(sb, &num_devs); + if (IS_ERR(devs)) + return PTR_ERR(devs); + + for (i = 0; i < num_devs; i++) { + if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) + goto out_free_devs; + } + + fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg); + + ci->ci_inlinecrypt = true; +out_free_devs: + kfree(devs); + + return 0; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + struct blk_crypto_key *blk_key; + struct block_device **devs; + unsigned int num_devs; + unsigned int i; + int err; + + blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL); + if (!blk_key) + return -ENOMEM; + + err = blk_crypto_init_key(blk_key, raw_key, crypto_mode, + fscrypt_get_dun_bytes(ci), sb->s_blocksize); + if (err) { + fscrypt_err(inode, "error %d initializing blk-crypto key", err); + goto fail; + } + + /* Start using blk-crypto on all the filesystem's block devices. */ + devs = fscrypt_get_devices(sb, &num_devs); + if (IS_ERR(devs)) { + err = PTR_ERR(devs); + goto fail; + } + for (i = 0; i < num_devs; i++) { + err = blk_crypto_start_using_key(devs[i], blk_key); + if (err) + break; + } + kfree(devs); + if (err) { + fscrypt_err(inode, "error %d starting to use blk-crypto", err); + goto fail; + } + + /* + * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). + * I.e., here we publish ->blk_key with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. Note that this concurrency is only + * possible for per-mode keys, not for per-file keys. + */ + smp_store_release(&prep_key->blk_key, blk_key); + return 0; + +fail: + kfree_sensitive(blk_key); + return err; +} + +void fscrypt_destroy_inline_crypt_key(struct super_block *sb, + struct fscrypt_prepared_key *prep_key) +{ + struct blk_crypto_key *blk_key = prep_key->blk_key; + struct block_device **devs; + unsigned int num_devs; + unsigned int i; + + if (!blk_key) + return; + + /* Evict the key from all the filesystem's block devices. */ + devs = fscrypt_get_devices(sb, &num_devs); + if (!IS_ERR(devs)) { + for (i = 0; i < num_devs; i++) + blk_crypto_evict_key(devs[i], blk_key); + kfree(devs); + } + kfree_sensitive(blk_key); +} + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return inode->i_crypt_info->ci_inlinecrypt; +} +EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); + +static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) +{ + union fscrypt_iv iv; + int i; + + fscrypt_generate_iv(&iv, lblk_num, ci); + + BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); + memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); + for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) + dun[i] = le64_to_cpu(iv.dun[i]); +} + +/** + * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto + * @bio: a bio which will eventually be submitted to the file + * @inode: the file's inode + * @first_lblk: the first file logical block number in the I/O + * @gfp_mask: memory allocation flags - these must be a waiting mask so that + * bio_crypt_set_ctx can't fail. + * + * If the contents of the file should be encrypted (or decrypted) with inline + * encryption, then assign the appropriate encryption context to the bio. + * + * Normally the bio should be newly allocated (i.e. no pages added yet), as + * otherwise fscrypt_mergeable_bio() won't work as intended. + * + * The encryption context will be freed automatically when the bio is freed. + */ +void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) +{ + const struct fscrypt_info *ci; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return; + ci = inode->i_crypt_info; + + fscrypt_generate_dun(ci, first_lblk, dun); + bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); + +/* Extract the inode and logical block number from a buffer_head. */ +static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, + const struct inode **inode_ret, + u64 *lblk_num_ret) +{ + struct page *page = bh->b_page; + const struct address_space *mapping; + const struct inode *inode; + + /* + * The ext4 journal (jbd2) can submit a buffer_head it directly created + * for a non-pagecache page. fscrypt doesn't care about these. + */ + mapping = page_mapping(page); + if (!mapping) + return false; + inode = mapping->host; + + *inode_ret = inode; + *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + + (bh_offset(bh) >> inode->i_blkbits); + return true; +} + +/** + * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline + * crypto + * @bio: a bio which will eventually be submitted to the file + * @first_bh: the first buffer_head for which I/O will be submitted + * @gfp_mask: memory allocation flags + * + * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead + * of an inode and block number directly. + */ +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) +{ + const struct inode *inode; + u64 first_lblk; + + if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) + fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); + +/** + * fscrypt_mergeable_bio() - test whether data can be added to a bio + * @bio: the bio being built up + * @inode: the inode for the next part of the I/O + * @next_lblk: the next file logical block number in the I/O + * + * When building a bio which may contain data which should undergo inline + * encryption (or decryption) via fscrypt, filesystems should call this function + * to ensure that the resulting bio contains only contiguous data unit numbers. + * This will return false if the next part of the I/O cannot be merged with the + * bio because either the encryption key would be different or the encryption + * data unit numbers would be discontiguous. + * + * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. + * + * This function isn't required in cases where crypto-mergeability is ensured in + * another way, such as I/O targeting only a single file (and thus a single key) + * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk) +{ + const struct bio_crypt_ctx *bc = bio->bi_crypt_context; + u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) + return false; + if (!bc) + return true; + + /* + * Comparing the key pointers is good enough, as all I/O for each key + * uses the same pointer. I.e., there's currently no need to support + * merging requests where the keys are the same but the pointers differ. + */ + if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key) + return false; + + fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); + return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); + +/** + * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio + * @bio: the bio being built up + * @next_bh: the next buffer_head for which I/O will be submitted + * + * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of + * an inode and block number directly. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + const struct inode *inode; + u64 next_lblk; + + if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) + return !bio->bi_crypt_context; + + return fscrypt_mergeable_bio(bio, inode, next_lblk); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); + +/** + * fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an + * inode, as far as encryption is concerned + * @inode: the inode in question + * + * Return: %true if there are no encryption constraints that prevent DIO from + * being supported; %false if DIO is unsupported. (Note that in the + * %true case, the filesystem might have other, non-encryption-related + * constraints that prevent DIO from actually being supported. Also, on + * encrypted files the filesystem is still responsible for only allowing + * DIO when requests are filesystem-block-aligned.) + */ +bool fscrypt_dio_supported(struct inode *inode) +{ + int err; + + /* If the file is unencrypted, no veto from us. */ + if (!fscrypt_needs_contents_encryption(inode)) + return true; + + /* + * We only support DIO with inline crypto, not fs-layer crypto. + * + * To determine whether the inode is using inline crypto, we have to set + * up the key if it wasn't already done. This is because in the current + * design of fscrypt, the decision of whether to use inline crypto or + * not isn't made until the inode's encryption key is being set up. In + * the DIO read/write case, the key will always be set up already, since + * the file will be open. But in the case of statx(), the key might not + * be set up yet, as the file might not have been opened yet. + */ + err = fscrypt_require_key(inode); + if (err) { + /* + * Key unavailable or couldn't be set up. This edge case isn't + * worth worrying about; just report that DIO is unsupported. + */ + return false; + } + return fscrypt_inode_uses_inline_crypto(inode); +} +EXPORT_SYMBOL_GPL(fscrypt_dio_supported); + +/** + * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs + * @inode: the file on which I/O is being done + * @lblk: the block at which the I/O is being started from + * @nr_blocks: the number of blocks we want to submit starting at @lblk + * + * Determine the limit to the number of blocks that can be submitted in a bio + * targeting @lblk without causing a data unit number (DUN) discontiguity. + * + * This is normally just @nr_blocks, as normally the DUNs just increment along + * with the logical blocks. (Or the file is not encrypted.) + * + * In rare cases, fscrypt can be using an IV generation method that allows the + * DUN to wrap around within logically contiguous blocks, and that wraparound + * will occur. If this happens, a value less than @nr_blocks will be returned + * so that the wraparound doesn't occur in the middle of a bio, which would + * cause encryption/decryption to produce wrong results. + * + * Return: the actual number of blocks that can be submitted + */ +u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) +{ + const struct fscrypt_info *ci; + u32 dun; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return nr_blocks; + + if (nr_blocks <= 1) + return nr_blocks; + + ci = inode->i_crypt_info; + if (!(fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + return nr_blocks; + + /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */ + + dun = ci->ci_hashed_ino + lblk; + + return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun); +} +EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c new file mode 100644 index 0000000000..7cbb1fd872 --- /dev/null +++ b/fs/crypto/keyring.c @@ -0,0 +1,1204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Filesystem-level keyring for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * This file implements management of fscrypt master keys in the + * filesystem-level keyring, including the ioctls: + * + * - FS_IOC_ADD_ENCRYPTION_KEY + * - FS_IOC_REMOVE_ENCRYPTION_KEY + * - FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS + * - FS_IOC_GET_ENCRYPTION_KEY_STATUS + * + * See the "User API" section of Documentation/filesystems/fscrypt.rst for more + * information about these ioctls. + */ + +#include <asm/unaligned.h> +#include <crypto/skcipher.h> +#include <linux/key-type.h> +#include <linux/random.h> +#include <linux/seq_file.h> + +#include "fscrypt_private.h" + +/* The master encryption keys for a filesystem (->s_master_keys) */ +struct fscrypt_keyring { + /* + * Lock that protects ->key_hashtable. It does *not* protect the + * fscrypt_master_key structs themselves. + */ + spinlock_t lock; + + /* Hash table that maps fscrypt_key_specifier to fscrypt_master_key */ + struct hlist_head key_hashtable[128]; +}; + +static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) +{ + fscrypt_destroy_hkdf(&secret->hkdf); + memzero_explicit(secret, sizeof(*secret)); +} + +static void move_master_key_secret(struct fscrypt_master_key_secret *dst, + struct fscrypt_master_key_secret *src) +{ + memcpy(dst, src, sizeof(*dst)); + memzero_explicit(src, sizeof(*src)); +} + +static void fscrypt_free_master_key(struct rcu_head *head) +{ + struct fscrypt_master_key *mk = + container_of(head, struct fscrypt_master_key, mk_rcu_head); + /* + * The master key secret and any embedded subkeys should have already + * been wiped when the last active reference to the fscrypt_master_key + * struct was dropped; doing it here would be unnecessarily late. + * Nevertheless, use kfree_sensitive() in case anything was missed. + */ + kfree_sensitive(mk); +} + +void fscrypt_put_master_key(struct fscrypt_master_key *mk) +{ + if (!refcount_dec_and_test(&mk->mk_struct_refs)) + return; + /* + * No structural references left, so free ->mk_users, and also free the + * fscrypt_master_key struct itself after an RCU grace period ensures + * that concurrent keyring lookups can no longer find it. + */ + WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 0); + key_put(mk->mk_users); + mk->mk_users = NULL; + call_rcu(&mk->mk_rcu_head, fscrypt_free_master_key); +} + +void fscrypt_put_master_key_activeref(struct super_block *sb, + struct fscrypt_master_key *mk) +{ + size_t i; + + if (!refcount_dec_and_test(&mk->mk_active_refs)) + return; + /* + * No active references left, so complete the full removal of this + * fscrypt_master_key struct by removing it from the keyring and + * destroying any subkeys embedded in it. + */ + + if (WARN_ON_ONCE(!sb->s_master_keys)) + return; + spin_lock(&sb->s_master_keys->lock); + hlist_del_rcu(&mk->mk_node); + spin_unlock(&sb->s_master_keys->lock); + + /* + * ->mk_active_refs == 0 implies that ->mk_secret is not present and + * that ->mk_decrypted_inodes is empty. + */ + WARN_ON_ONCE(is_master_key_secret_present(&mk->mk_secret)); + WARN_ON_ONCE(!list_empty(&mk->mk_decrypted_inodes)); + + for (i = 0; i <= FSCRYPT_MODE_MAX; i++) { + fscrypt_destroy_prepared_key( + sb, &mk->mk_direct_keys[i]); + fscrypt_destroy_prepared_key( + sb, &mk->mk_iv_ino_lblk_64_keys[i]); + fscrypt_destroy_prepared_key( + sb, &mk->mk_iv_ino_lblk_32_keys[i]); + } + memzero_explicit(&mk->mk_ino_hash_key, + sizeof(mk->mk_ino_hash_key)); + mk->mk_ino_hash_key_initialized = false; + + /* Drop the structural ref associated with the active refs. */ + fscrypt_put_master_key(mk); +} + +static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) +{ + if (spec->__reserved) + return false; + return master_key_spec_len(spec) != 0; +} + +static int fscrypt_user_key_instantiate(struct key *key, + struct key_preparsed_payload *prep) +{ + /* + * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for + * each key, regardless of the exact key size. The amount of memory + * actually used is greater than the size of the raw key anyway. + */ + return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE); +} + +static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m) +{ + seq_puts(m, key->description); +} + +/* + * Type of key in ->mk_users. Each key of this type represents a particular + * user who has added a particular master key. + * + * Note that the name of this key type really should be something like + * ".fscrypt-user" instead of simply ".fscrypt". But the shorter name is chosen + * mainly for simplicity of presentation in /proc/keys when read by a non-root + * user. And it is expected to be rare that a key is actually added by multiple + * users, since users should keep their encryption keys confidential. + */ +static struct key_type key_type_fscrypt_user = { + .name = ".fscrypt", + .instantiate = fscrypt_user_key_instantiate, + .describe = fscrypt_user_key_describe, +}; + +#define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ + (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ + CONST_STRLEN("-users") + 1) + +#define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ + (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) + +static void format_mk_users_keyring_description( + char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], + const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) +{ + sprintf(description, "fscrypt-%*phN-users", + FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier); +} + +static void format_mk_user_description( + char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE], + const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) +{ + + sprintf(description, "%*phN.uid.%u", FSCRYPT_KEY_IDENTIFIER_SIZE, + mk_identifier, __kuid_val(current_fsuid())); +} + +/* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ +static int allocate_filesystem_keyring(struct super_block *sb) +{ + struct fscrypt_keyring *keyring; + + if (sb->s_master_keys) + return 0; + + keyring = kzalloc(sizeof(*keyring), GFP_KERNEL); + if (!keyring) + return -ENOMEM; + spin_lock_init(&keyring->lock); + /* + * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). + * I.e., here we publish ->s_master_keys with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. + */ + smp_store_release(&sb->s_master_keys, keyring); + return 0; +} + +/* + * Release all encryption keys that have been added to the filesystem, along + * with the keyring that contains them. + * + * This is called at unmount time, after all potentially-encrypted inodes have + * been evicted. The filesystem's underlying block device(s) are still + * available at this time; this is important because after user file accesses + * have been allowed, this function may need to evict keys from the keyslots of + * an inline crypto engine, which requires the block device(s). + */ +void fscrypt_destroy_keyring(struct super_block *sb) +{ + struct fscrypt_keyring *keyring = sb->s_master_keys; + size_t i; + + if (!keyring) + return; + + for (i = 0; i < ARRAY_SIZE(keyring->key_hashtable); i++) { + struct hlist_head *bucket = &keyring->key_hashtable[i]; + struct fscrypt_master_key *mk; + struct hlist_node *tmp; + + hlist_for_each_entry_safe(mk, tmp, bucket, mk_node) { + /* + * Since all potentially-encrypted inodes were already + * evicted, every key remaining in the keyring should + * have an empty inode list, and should only still be in + * the keyring due to the single active ref associated + * with ->mk_secret. There should be no structural refs + * beyond the one associated with the active ref. + */ + WARN_ON_ONCE(refcount_read(&mk->mk_active_refs) != 1); + WARN_ON_ONCE(refcount_read(&mk->mk_struct_refs) != 1); + WARN_ON_ONCE(!is_master_key_secret_present(&mk->mk_secret)); + wipe_master_key_secret(&mk->mk_secret); + fscrypt_put_master_key_activeref(sb, mk); + } + } + kfree_sensitive(keyring); + sb->s_master_keys = NULL; +} + +static struct hlist_head * +fscrypt_mk_hash_bucket(struct fscrypt_keyring *keyring, + const struct fscrypt_key_specifier *mk_spec) +{ + /* + * Since key specifiers should be "random" values, it is sufficient to + * use a trivial hash function that just takes the first several bits of + * the key specifier. + */ + unsigned long i = get_unaligned((unsigned long *)&mk_spec->u); + + return &keyring->key_hashtable[i % ARRAY_SIZE(keyring->key_hashtable)]; +} + +/* + * Find the specified master key struct in ->s_master_keys and take a structural + * ref to it. The structural ref guarantees that the key struct continues to + * exist, but it does *not* guarantee that ->s_master_keys continues to contain + * the key struct. The structural ref needs to be dropped by + * fscrypt_put_master_key(). Returns NULL if the key struct is not found. + */ +struct fscrypt_master_key * +fscrypt_find_master_key(struct super_block *sb, + const struct fscrypt_key_specifier *mk_spec) +{ + struct fscrypt_keyring *keyring; + struct hlist_head *bucket; + struct fscrypt_master_key *mk; + + /* + * Pairs with the smp_store_release() in allocate_filesystem_keyring(). + * I.e., another task can publish ->s_master_keys concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + keyring = smp_load_acquire(&sb->s_master_keys); + if (keyring == NULL) + return NULL; /* No keyring yet, so no keys yet. */ + + bucket = fscrypt_mk_hash_bucket(keyring, mk_spec); + rcu_read_lock(); + switch (mk_spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + hlist_for_each_entry_rcu(mk, bucket, mk_node) { + if (mk->mk_spec.type == + FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + memcmp(mk->mk_spec.u.descriptor, + mk_spec->u.descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && + refcount_inc_not_zero(&mk->mk_struct_refs)) + goto out; + } + break; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + hlist_for_each_entry_rcu(mk, bucket, mk_node) { + if (mk->mk_spec.type == + FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && + memcmp(mk->mk_spec.u.identifier, + mk_spec->u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE) == 0 && + refcount_inc_not_zero(&mk->mk_struct_refs)) + goto out; + } + break; + } + mk = NULL; +out: + rcu_read_unlock(); + return mk; +} + +static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) +{ + char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE]; + struct key *keyring; + + format_mk_users_keyring_description(description, + mk->mk_spec.u.identifier); + keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + current_cred(), KEY_POS_SEARCH | + KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); + if (IS_ERR(keyring)) + return PTR_ERR(keyring); + + mk->mk_users = keyring; + return 0; +} + +/* + * Find the current user's "key" in the master key's ->mk_users. + * Returns ERR_PTR(-ENOKEY) if not found. + */ +static struct key *find_master_key_user(struct fscrypt_master_key *mk) +{ + char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; + key_ref_t keyref; + + format_mk_user_description(description, mk->mk_spec.u.identifier); + + /* + * We need to mark the keyring reference as "possessed" so that we + * acquire permission to search it, via the KEY_POS_SEARCH permission. + */ + keyref = keyring_search(make_key_ref(mk->mk_users, true /*possessed*/), + &key_type_fscrypt_user, description, false); + if (IS_ERR(keyref)) { + if (PTR_ERR(keyref) == -EAGAIN || /* not found */ + PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ + keyref = ERR_PTR(-ENOKEY); + return ERR_CAST(keyref); + } + return key_ref_to_ptr(keyref); +} + +/* + * Give the current user a "key" in ->mk_users. This charges the user's quota + * and marks the master key as added by the current user, so that it cannot be + * removed by another user with the key. Either ->mk_sem must be held for + * write, or the master key must be still undergoing initialization. + */ +static int add_master_key_user(struct fscrypt_master_key *mk) +{ + char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; + struct key *mk_user; + int err; + + format_mk_user_description(description, mk->mk_spec.u.identifier); + mk_user = key_alloc(&key_type_fscrypt_user, description, + current_fsuid(), current_gid(), current_cred(), + KEY_POS_SEARCH | KEY_USR_VIEW, 0, NULL); + if (IS_ERR(mk_user)) + return PTR_ERR(mk_user); + + err = key_instantiate_and_link(mk_user, NULL, 0, mk->mk_users, NULL); + key_put(mk_user); + return err; +} + +/* + * Remove the current user's "key" from ->mk_users. + * ->mk_sem must be held for write. + * + * Returns 0 if removed, -ENOKEY if not found, or another -errno code. + */ +static int remove_master_key_user(struct fscrypt_master_key *mk) +{ + struct key *mk_user; + int err; + + mk_user = find_master_key_user(mk); + if (IS_ERR(mk_user)) + return PTR_ERR(mk_user); + err = key_unlink(mk->mk_users, mk_user); + key_put(mk_user); + return err; +} + +/* + * Allocate a new fscrypt_master_key, transfer the given secret over to it, and + * insert it into sb->s_master_keys. + */ +static int add_new_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) +{ + struct fscrypt_keyring *keyring = sb->s_master_keys; + struct fscrypt_master_key *mk; + int err; + + mk = kzalloc(sizeof(*mk), GFP_KERNEL); + if (!mk) + return -ENOMEM; + + init_rwsem(&mk->mk_sem); + refcount_set(&mk->mk_struct_refs, 1); + mk->mk_spec = *mk_spec; + + INIT_LIST_HEAD(&mk->mk_decrypted_inodes); + spin_lock_init(&mk->mk_decrypted_inodes_lock); + + if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + err = allocate_master_key_users_keyring(mk); + if (err) + goto out_put; + err = add_master_key_user(mk); + if (err) + goto out_put; + } + + move_master_key_secret(&mk->mk_secret, secret); + refcount_set(&mk->mk_active_refs, 1); /* ->mk_secret is present */ + + spin_lock(&keyring->lock); + hlist_add_head_rcu(&mk->mk_node, + fscrypt_mk_hash_bucket(keyring, mk_spec)); + spin_unlock(&keyring->lock); + return 0; + +out_put: + fscrypt_put_master_key(mk); + return err; +} + +#define KEY_DEAD 1 + +static int add_existing_master_key(struct fscrypt_master_key *mk, + struct fscrypt_master_key_secret *secret) +{ + int err; + + /* + * If the current user is already in ->mk_users, then there's nothing to + * do. Otherwise, we need to add the user to ->mk_users. (Neither is + * applicable for v1 policy keys, which have NULL ->mk_users.) + */ + if (mk->mk_users) { + struct key *mk_user = find_master_key_user(mk); + + if (mk_user != ERR_PTR(-ENOKEY)) { + if (IS_ERR(mk_user)) + return PTR_ERR(mk_user); + key_put(mk_user); + return 0; + } + err = add_master_key_user(mk); + if (err) + return err; + } + + /* Re-add the secret if needed. */ + if (!is_master_key_secret_present(&mk->mk_secret)) { + if (!refcount_inc_not_zero(&mk->mk_active_refs)) + return KEY_DEAD; + move_master_key_secret(&mk->mk_secret, secret); + } + + return 0; +} + +static int do_add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) +{ + static DEFINE_MUTEX(fscrypt_add_key_mutex); + struct fscrypt_master_key *mk; + int err; + + mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ + + mk = fscrypt_find_master_key(sb, mk_spec); + if (!mk) { + /* Didn't find the key in ->s_master_keys. Add it. */ + err = allocate_filesystem_keyring(sb); + if (!err) + err = add_new_master_key(sb, secret, mk_spec); + } else { + /* + * Found the key in ->s_master_keys. Re-add the secret if + * needed, and add the user to ->mk_users if needed. + */ + down_write(&mk->mk_sem); + err = add_existing_master_key(mk, secret); + up_write(&mk->mk_sem); + if (err == KEY_DEAD) { + /* + * We found a key struct, but it's already been fully + * removed. Ignore the old struct and add a new one. + * fscrypt_add_key_mutex means we don't need to worry + * about concurrent adds. + */ + err = add_new_master_key(sb, secret, mk_spec); + } + fscrypt_put_master_key(mk); + } + mutex_unlock(&fscrypt_add_key_mutex); + return err; +} + +static int add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + struct fscrypt_key_specifier *key_spec) +{ + int err; + + if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + err = fscrypt_init_hkdf(&secret->hkdf, secret->raw, + secret->size); + if (err) + return err; + + /* + * Now that the HKDF context is initialized, the raw key is no + * longer needed. + */ + memzero_explicit(secret->raw, secret->size); + + /* Calculate the key identifier */ + err = fscrypt_hkdf_expand(&secret->hkdf, + HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0, + key_spec->u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + if (err) + return err; + } + return do_add_master_key(sb, secret, key_spec); +} + +static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) +{ + const struct fscrypt_provisioning_key_payload *payload = prep->data; + + if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE || + prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE) + return -EINVAL; + + if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + payload->type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) + return -EINVAL; + + if (payload->__reserved) + return -EINVAL; + + prep->payload.data[0] = kmemdup(payload, prep->datalen, GFP_KERNEL); + if (!prep->payload.data[0]) + return -ENOMEM; + + prep->quotalen = prep->datalen; + return 0; +} + +static void fscrypt_provisioning_key_free_preparse( + struct key_preparsed_payload *prep) +{ + kfree_sensitive(prep->payload.data[0]); +} + +static void fscrypt_provisioning_key_describe(const struct key *key, + struct seq_file *m) +{ + seq_puts(m, key->description); + if (key_is_positive(key)) { + const struct fscrypt_provisioning_key_payload *payload = + key->payload.data[0]; + + seq_printf(m, ": %u [%u]", key->datalen, payload->type); + } +} + +static void fscrypt_provisioning_key_destroy(struct key *key) +{ + kfree_sensitive(key->payload.data[0]); +} + +static struct key_type key_type_fscrypt_provisioning = { + .name = "fscrypt-provisioning", + .preparse = fscrypt_provisioning_key_preparse, + .free_preparse = fscrypt_provisioning_key_free_preparse, + .instantiate = generic_key_instantiate, + .describe = fscrypt_provisioning_key_describe, + .destroy = fscrypt_provisioning_key_destroy, +}; + +/* + * Retrieve the raw key from the Linux keyring key specified by 'key_id', and + * store it into 'secret'. + * + * The key must be of type "fscrypt-provisioning" and must have the field + * fscrypt_provisioning_key_payload::type set to 'type', indicating that it's + * only usable with fscrypt with the particular KDF version identified by + * 'type'. We don't use the "logon" key type because there's no way to + * completely restrict the use of such keys; they can be used by any kernel API + * that accepts "logon" keys and doesn't require a specific service prefix. + * + * The ability to specify the key via Linux keyring key is intended for cases + * where userspace needs to re-add keys after the filesystem is unmounted and + * re-mounted. Most users should just provide the raw key directly instead. + */ +static int get_keyring_key(u32 key_id, u32 type, + struct fscrypt_master_key_secret *secret) +{ + key_ref_t ref; + struct key *key; + const struct fscrypt_provisioning_key_payload *payload; + int err; + + ref = lookup_user_key(key_id, 0, KEY_NEED_SEARCH); + if (IS_ERR(ref)) + return PTR_ERR(ref); + key = key_ref_to_ptr(ref); + + if (key->type != &key_type_fscrypt_provisioning) + goto bad_key; + payload = key->payload.data[0]; + + /* Don't allow fscrypt v1 keys to be used as v2 keys and vice versa. */ + if (payload->type != type) + goto bad_key; + + secret->size = key->datalen - sizeof(*payload); + memcpy(secret->raw, payload->raw, secret->size); + err = 0; + goto out_put; + +bad_key: + err = -EKEYREJECTED; +out_put: + key_ref_put(ref); + return err; +} + +/* + * Add a master encryption key to the filesystem, causing all files which were + * encrypted with it to appear "unlocked" (decrypted) when accessed. + * + * When adding a key for use by v1 encryption policies, this ioctl is + * privileged, and userspace must provide the 'key_descriptor'. + * + * When adding a key for use by v2+ encryption policies, this ioctl is + * unprivileged. This is needed, in general, to allow non-root users to use + * encryption without encountering the visibility problems of process-subscribed + * keyrings and the inability to properly remove keys. This works by having + * each key identified by its cryptographically secure hash --- the + * 'key_identifier'. The cryptographic hash ensures that a malicious user + * cannot add the wrong key for a given identifier. Furthermore, each added key + * is charged to the appropriate user's quota for the keyrings service, which + * prevents a malicious user from adding too many keys. Finally, we forbid a + * user from removing a key while other users have added it too, which prevents + * a user who knows another user's key from causing a denial-of-service by + * removing it at an inopportune time. (We tolerate that a user who knows a key + * can prevent other users from removing it.) + * + * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of + * Documentation/filesystems/fscrypt.rst. + */ +int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct fscrypt_add_key_arg __user *uarg = _uarg; + struct fscrypt_add_key_arg arg; + struct fscrypt_master_key_secret secret; + int err; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (!valid_key_spec(&arg.key_spec)) + return -EINVAL; + + if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) + return -EINVAL; + + /* + * Only root can add keys that are identified by an arbitrary descriptor + * rather than by a cryptographic hash --- since otherwise a malicious + * user could add the wrong key. + */ + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + !capable(CAP_SYS_ADMIN)) + return -EACCES; + + memset(&secret, 0, sizeof(secret)); + if (arg.key_id) { + if (arg.raw_size != 0) + return -EINVAL; + err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret); + if (err) + goto out_wipe_secret; + } else { + if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || + arg.raw_size > FSCRYPT_MAX_KEY_SIZE) + return -EINVAL; + secret.size = arg.raw_size; + err = -EFAULT; + if (copy_from_user(secret.raw, uarg->raw, secret.size)) + goto out_wipe_secret; + } + + err = add_master_key(sb, &secret, &arg.key_spec); + if (err) + goto out_wipe_secret; + + /* Return the key identifier to userspace, if applicable */ + err = -EFAULT; + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER && + copy_to_user(uarg->key_spec.u.identifier, arg.key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE)) + goto out_wipe_secret; + err = 0; +out_wipe_secret: + wipe_master_key_secret(&secret); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); + +static void +fscrypt_get_test_dummy_secret(struct fscrypt_master_key_secret *secret) +{ + static u8 test_key[FSCRYPT_MAX_KEY_SIZE]; + + get_random_once(test_key, FSCRYPT_MAX_KEY_SIZE); + + memset(secret, 0, sizeof(*secret)); + secret->size = FSCRYPT_MAX_KEY_SIZE; + memcpy(secret->raw, test_key, FSCRYPT_MAX_KEY_SIZE); +} + +int fscrypt_get_test_dummy_key_identifier( + u8 key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) +{ + struct fscrypt_master_key_secret secret; + int err; + + fscrypt_get_test_dummy_secret(&secret); + + err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); + if (err) + goto out; + err = fscrypt_hkdf_expand(&secret.hkdf, HKDF_CONTEXT_KEY_IDENTIFIER, + NULL, 0, key_identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); +out: + wipe_master_key_secret(&secret); + return err; +} + +/** + * fscrypt_add_test_dummy_key() - add the test dummy encryption key + * @sb: the filesystem instance to add the key to + * @key_spec: the key specifier of the test dummy encryption key + * + * Add the key for the test_dummy_encryption mount option to the filesystem. To + * prevent misuse of this mount option, a per-boot random key is used instead of + * a hardcoded one. This makes it so that any encrypted files created using + * this option won't be accessible after a reboot. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_add_test_dummy_key(struct super_block *sb, + struct fscrypt_key_specifier *key_spec) +{ + struct fscrypt_master_key_secret secret; + int err; + + fscrypt_get_test_dummy_secret(&secret); + err = add_master_key(sb, &secret, key_spec); + wipe_master_key_secret(&secret); + return err; +} + +/* + * Verify that the current user has added a master key with the given identifier + * (returns -ENOKEY if not). This is needed to prevent a user from encrypting + * their files using some other user's key which they don't actually know. + * Cryptographically this isn't much of a problem, but the semantics of this + * would be a bit weird, so it's best to just forbid it. + * + * The system administrator (CAP_FOWNER) can override this, which should be + * enough for any use cases where encryption policies are being set using keys + * that were chosen ahead of time but aren't available at the moment. + * + * Note that the key may have already removed by the time this returns, but + * that's okay; we just care whether the key was there at some point. + * + * Return: 0 if the key is added, -ENOKEY if it isn't, or another -errno code + */ +int fscrypt_verify_key_added(struct super_block *sb, + const u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) +{ + struct fscrypt_key_specifier mk_spec; + struct fscrypt_master_key *mk; + struct key *mk_user; + int err; + + mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; + memcpy(mk_spec.u.identifier, identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); + + mk = fscrypt_find_master_key(sb, &mk_spec); + if (!mk) { + err = -ENOKEY; + goto out; + } + down_read(&mk->mk_sem); + mk_user = find_master_key_user(mk); + if (IS_ERR(mk_user)) { + err = PTR_ERR(mk_user); + } else { + key_put(mk_user); + err = 0; + } + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); +out: + if (err == -ENOKEY && capable(CAP_FOWNER)) + err = 0; + return err; +} + +/* + * Try to evict the inode's dentries from the dentry cache. If the inode is a + * directory, then it can have at most one dentry; however, that dentry may be + * pinned by child dentries, so first try to evict the children too. + */ +static void shrink_dcache_inode(struct inode *inode) +{ + struct dentry *dentry; + + if (S_ISDIR(inode->i_mode)) { + dentry = d_find_any_alias(inode); + if (dentry) { + shrink_dcache_parent(dentry); + dput(dentry); + } + } + d_prune_aliases(inode); +} + +static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk) +{ + struct fscrypt_info *ci; + struct inode *inode; + struct inode *toput_inode = NULL; + + spin_lock(&mk->mk_decrypted_inodes_lock); + + list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { + inode = ci->ci_inode; + spin_lock(&inode->i_lock); + if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { + spin_unlock(&inode->i_lock); + continue; + } + __iget(inode); + spin_unlock(&inode->i_lock); + spin_unlock(&mk->mk_decrypted_inodes_lock); + + shrink_dcache_inode(inode); + iput(toput_inode); + toput_inode = inode; + + spin_lock(&mk->mk_decrypted_inodes_lock); + } + + spin_unlock(&mk->mk_decrypted_inodes_lock); + iput(toput_inode); +} + +static int check_for_busy_inodes(struct super_block *sb, + struct fscrypt_master_key *mk) +{ + struct list_head *pos; + size_t busy_count = 0; + unsigned long ino; + char ino_str[50] = ""; + + spin_lock(&mk->mk_decrypted_inodes_lock); + + list_for_each(pos, &mk->mk_decrypted_inodes) + busy_count++; + + if (busy_count == 0) { + spin_unlock(&mk->mk_decrypted_inodes_lock); + return 0; + } + + { + /* select an example file to show for debugging purposes */ + struct inode *inode = + list_first_entry(&mk->mk_decrypted_inodes, + struct fscrypt_info, + ci_master_key_link)->ci_inode; + ino = inode->i_ino; + } + spin_unlock(&mk->mk_decrypted_inodes_lock); + + /* If the inode is currently being created, ino may still be 0. */ + if (ino) + snprintf(ino_str, sizeof(ino_str), ", including ino %lu", ino); + + fscrypt_warn(NULL, + "%s: %zu inode(s) still busy after removing key with %s %*phN%s", + sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), + master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, + ino_str); + return -EBUSY; +} + +static int try_to_lock_encrypted_files(struct super_block *sb, + struct fscrypt_master_key *mk) +{ + int err1; + int err2; + + /* + * An inode can't be evicted while it is dirty or has dirty pages. + * Thus, we first have to clean the inodes in ->mk_decrypted_inodes. + * + * Just do it the easy way: call sync_filesystem(). It's overkill, but + * it works, and it's more important to minimize the amount of caches we + * drop than the amount of data we sync. Also, unprivileged users can + * already call sync_filesystem() via sys_syncfs() or sys_sync(). + */ + down_read(&sb->s_umount); + err1 = sync_filesystem(sb); + up_read(&sb->s_umount); + /* If a sync error occurs, still try to evict as much as possible. */ + + /* + * Inodes are pinned by their dentries, so we have to evict their + * dentries. shrink_dcache_sb() would suffice, but would be overkill + * and inappropriate for use by unprivileged users. So instead go + * through the inodes' alias lists and try to evict each dentry. + */ + evict_dentries_for_decrypted_inodes(mk); + + /* + * evict_dentries_for_decrypted_inodes() already iput() each inode in + * the list; any inodes for which that dropped the last reference will + * have been evicted due to fscrypt_drop_inode() detecting the key + * removal and telling the VFS to evict the inode. So to finish, we + * just need to check whether any inodes couldn't be evicted. + */ + err2 = check_for_busy_inodes(sb, mk); + + return err1 ?: err2; +} + +/* + * Try to remove an fscrypt master encryption key. + * + * FS_IOC_REMOVE_ENCRYPTION_KEY (all_users=false) removes the current user's + * claim to the key, then removes the key itself if no other users have claims. + * FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the + * key itself. + * + * To "remove the key itself", first we wipe the actual master key secret, so + * that no more inodes can be unlocked with it. Then we try to evict all cached + * inodes that had been unlocked with the key. + * + * If all inodes were evicted, then we unlink the fscrypt_master_key from the + * keyring. Otherwise it remains in the keyring in the "incompletely removed" + * state (without the actual secret key) where it tracks the list of remaining + * inodes. Userspace can execute the ioctl again later to retry eviction, or + * alternatively can re-add the secret key again. + * + * For more details, see the "Removing keys" section of + * Documentation/filesystems/fscrypt.rst. + */ +static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct fscrypt_remove_key_arg __user *uarg = _uarg; + struct fscrypt_remove_key_arg arg; + struct fscrypt_master_key *mk; + u32 status_flags = 0; + int err; + bool inodes_remain; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (!valid_key_spec(&arg.key_spec)) + return -EINVAL; + + if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) + return -EINVAL; + + /* + * Only root can add and remove keys that are identified by an arbitrary + * descriptor rather than by a cryptographic hash. + */ + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + !capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* Find the key being removed. */ + mk = fscrypt_find_master_key(sb, &arg.key_spec); + if (!mk) + return -ENOKEY; + down_write(&mk->mk_sem); + + /* If relevant, remove current user's (or all users) claim to the key */ + if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { + if (all_users) + err = keyring_clear(mk->mk_users); + else + err = remove_master_key_user(mk); + if (err) { + up_write(&mk->mk_sem); + goto out_put_key; + } + if (mk->mk_users->keys.nr_leaves_on_tree != 0) { + /* + * Other users have still added the key too. We removed + * the current user's claim to the key, but we still + * can't remove the key itself. + */ + status_flags |= + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; + err = 0; + up_write(&mk->mk_sem); + goto out_put_key; + } + } + + /* No user claims remaining. Go ahead and wipe the secret. */ + err = -ENOKEY; + if (is_master_key_secret_present(&mk->mk_secret)) { + wipe_master_key_secret(&mk->mk_secret); + fscrypt_put_master_key_activeref(sb, mk); + err = 0; + } + inodes_remain = refcount_read(&mk->mk_active_refs) > 0; + up_write(&mk->mk_sem); + + if (inodes_remain) { + /* Some inodes still reference this key; try to evict them. */ + err = try_to_lock_encrypted_files(sb, mk); + if (err == -EBUSY) { + status_flags |= + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY; + err = 0; + } + } + /* + * We return 0 if we successfully did something: removed a claim to the + * key, wiped the secret, or tried locking the files again. Users need + * to check the informational status flags if they care whether the key + * has been fully removed including all files locked. + */ +out_put_key: + fscrypt_put_master_key(mk); + if (err == 0) + err = put_user(status_flags, &uarg->removal_status_flags); + return err; +} + +int fscrypt_ioctl_remove_key(struct file *filp, void __user *uarg) +{ + return do_remove_key(filp, uarg, false); +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); + +int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *uarg) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return do_remove_key(filp, uarg, true); +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users); + +/* + * Retrieve the status of an fscrypt master encryption key. + * + * We set ->status to indicate whether the key is absent, present, or + * incompletely removed. "Incompletely removed" means that the master key + * secret has been removed, but some files which had been unlocked with it are + * still in use. This field allows applications to easily determine the state + * of an encrypted directory without using a hack such as trying to open a + * regular file in it (which can confuse the "incompletely removed" state with + * absent or present). + * + * In addition, for v2 policy keys we allow applications to determine, via + * ->status_flags and ->user_count, whether the key has been added by the + * current user, by other users, or by both. Most applications should not need + * this, since ordinarily only one user should know a given key. However, if a + * secret key is shared by multiple users, applications may wish to add an + * already-present key to prevent other users from removing it. This ioctl can + * be used to check whether that really is the case before the work is done to + * add the key --- which might e.g. require prompting the user for a passphrase. + * + * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of + * Documentation/filesystems/fscrypt.rst. + */ +int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct fscrypt_get_key_status_arg arg; + struct fscrypt_master_key *mk; + int err; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (!valid_key_spec(&arg.key_spec)) + return -EINVAL; + + if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) + return -EINVAL; + + arg.status_flags = 0; + arg.user_count = 0; + memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); + + mk = fscrypt_find_master_key(sb, &arg.key_spec); + if (!mk) { + arg.status = FSCRYPT_KEY_STATUS_ABSENT; + err = 0; + goto out; + } + down_read(&mk->mk_sem); + + if (!is_master_key_secret_present(&mk->mk_secret)) { + arg.status = refcount_read(&mk->mk_active_refs) > 0 ? + FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED : + FSCRYPT_KEY_STATUS_ABSENT /* raced with full removal */; + err = 0; + goto out_release_key; + } + + arg.status = FSCRYPT_KEY_STATUS_PRESENT; + if (mk->mk_users) { + struct key *mk_user; + + arg.user_count = mk->mk_users->keys.nr_leaves_on_tree; + mk_user = find_master_key_user(mk); + if (!IS_ERR(mk_user)) { + arg.status_flags |= + FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF; + key_put(mk_user); + } else if (mk_user != ERR_PTR(-ENOKEY)) { + err = PTR_ERR(mk_user); + goto out_release_key; + } + } + err = 0; +out_release_key: + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); +out: + if (!err && copy_to_user(uarg, &arg, sizeof(arg))) + err = -EFAULT; + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status); + +int __init fscrypt_init_keyring(void) +{ + int err; + + err = register_key_type(&key_type_fscrypt_user); + if (err) + return err; + + err = register_key_type(&key_type_fscrypt_provisioning); + if (err) + goto err_unregister_fscrypt_user; + + return 0; + +err_unregister_fscrypt_user: + unregister_key_type(&key_type_fscrypt_user); + return err; +} diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c new file mode 100644 index 0000000000..361f41ef46 --- /dev/null +++ b/fs/crypto/keysetup.c @@ -0,0 +1,806 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Key setup facility for FS encryption support. + * + * Copyright (C) 2015, Google, Inc. + * + * Originally written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar. + * Heavily modified since then. + */ + +#include <crypto/skcipher.h> +#include <linux/random.h> + +#include "fscrypt_private.h" + +struct fscrypt_mode fscrypt_modes[] = { + [FSCRYPT_MODE_AES_256_XTS] = { + .friendly_name = "AES-256-XTS", + .cipher_str = "xts(aes)", + .keysize = 64, + .security_strength = 32, + .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, + }, + [FSCRYPT_MODE_AES_256_CTS] = { + .friendly_name = "AES-256-CTS-CBC", + .cipher_str = "cts(cbc(aes))", + .keysize = 32, + .security_strength = 32, + .ivsize = 16, + }, + [FSCRYPT_MODE_AES_128_CBC] = { + .friendly_name = "AES-128-CBC-ESSIV", + .cipher_str = "essiv(cbc(aes),sha256)", + .keysize = 16, + .security_strength = 16, + .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, + }, + [FSCRYPT_MODE_AES_128_CTS] = { + .friendly_name = "AES-128-CTS-CBC", + .cipher_str = "cts(cbc(aes))", + .keysize = 16, + .security_strength = 16, + .ivsize = 16, + }, + [FSCRYPT_MODE_SM4_XTS] = { + .friendly_name = "SM4-XTS", + .cipher_str = "xts(sm4)", + .keysize = 32, + .security_strength = 16, + .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_SM4_XTS, + }, + [FSCRYPT_MODE_SM4_CTS] = { + .friendly_name = "SM4-CTS-CBC", + .cipher_str = "cts(cbc(sm4))", + .keysize = 16, + .security_strength = 16, + .ivsize = 16, + }, + [FSCRYPT_MODE_ADIANTUM] = { + .friendly_name = "Adiantum", + .cipher_str = "adiantum(xchacha12,aes)", + .keysize = 32, + .security_strength = 32, + .ivsize = 32, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, + }, + [FSCRYPT_MODE_AES_256_HCTR2] = { + .friendly_name = "AES-256-HCTR2", + .cipher_str = "hctr2(aes)", + .keysize = 32, + .security_strength = 32, + .ivsize = 32, + }, +}; + +static DEFINE_MUTEX(fscrypt_mode_key_setup_mutex); + +static struct fscrypt_mode * +select_encryption_mode(const union fscrypt_policy *policy, + const struct inode *inode) +{ + BUILD_BUG_ON(ARRAY_SIZE(fscrypt_modes) != FSCRYPT_MODE_MAX + 1); + + if (S_ISREG(inode->i_mode)) + return &fscrypt_modes[fscrypt_policy_contents_mode(policy)]; + + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + return &fscrypt_modes[fscrypt_policy_fnames_mode(policy)]; + + WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n", + inode->i_ino, (inode->i_mode & S_IFMT)); + return ERR_PTR(-EINVAL); +} + +/* Create a symmetric cipher object for the given encryption mode and key */ +static struct crypto_skcipher * +fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode) +{ + struct crypto_skcipher *tfm; + int err; + + tfm = crypto_alloc_skcipher(mode->cipher_str, 0, 0); + if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) { + fscrypt_warn(inode, + "Missing crypto API support for %s (API name: \"%s\")", + mode->friendly_name, mode->cipher_str); + return ERR_PTR(-ENOPKG); + } + fscrypt_err(inode, "Error allocating '%s' transform: %ld", + mode->cipher_str, PTR_ERR(tfm)); + return tfm; + } + if (!xchg(&mode->logged_cryptoapi_impl, 1)) { + /* + * fscrypt performance can vary greatly depending on which + * crypto algorithm implementation is used. Help people debug + * performance problems by logging the ->cra_driver_name the + * first time a mode is used. + */ + pr_info("fscrypt: %s using implementation \"%s\"\n", + mode->friendly_name, crypto_skcipher_driver_name(tfm)); + } + if (WARN_ON_ONCE(crypto_skcipher_ivsize(tfm) != mode->ivsize)) { + err = -EINVAL; + goto err_free_tfm; + } + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + err = crypto_skcipher_setkey(tfm, raw_key, mode->keysize); + if (err) + goto err_free_tfm; + + return tfm; + +err_free_tfm: + crypto_free_skcipher(tfm); + return ERR_PTR(err); +} + +/* + * Prepare the crypto transform object or blk-crypto key in @prep_key, given the + * raw key, encryption mode (@ci->ci_mode), flag indicating which encryption + * implementation (fs-layer or blk-crypto) will be used (@ci->ci_inlinecrypt), + * and IV generation method (@ci->ci_policy.flags). + */ +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci) +{ + struct crypto_skcipher *tfm; + + if (fscrypt_using_inline_encryption(ci)) + return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci); + + tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + /* + * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). + * I.e., here we publish ->tfm with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. Note that this concurrency is only + * possible for per-mode keys, not for per-file keys. + */ + smp_store_release(&prep_key->tfm, tfm); + return 0; +} + +/* Destroy a crypto transform object and/or blk-crypto key. */ +void fscrypt_destroy_prepared_key(struct super_block *sb, + struct fscrypt_prepared_key *prep_key) +{ + crypto_free_skcipher(prep_key->tfm); + fscrypt_destroy_inline_crypt_key(sb, prep_key); + memzero_explicit(prep_key, sizeof(*prep_key)); +} + +/* Given a per-file encryption key, set up the file's crypto transform object */ +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +{ + ci->ci_owns_key = true; + return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci); +} + +static int setup_per_mode_enc_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk, + struct fscrypt_prepared_key *keys, + u8 hkdf_context, bool include_fs_uuid) +{ + const struct inode *inode = ci->ci_inode; + const struct super_block *sb = inode->i_sb; + struct fscrypt_mode *mode = ci->ci_mode; + const u8 mode_num = mode - fscrypt_modes; + struct fscrypt_prepared_key *prep_key; + u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; + u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; + unsigned int hkdf_infolen = 0; + int err; + + if (WARN_ON_ONCE(mode_num > FSCRYPT_MODE_MAX)) + return -EINVAL; + + prep_key = &keys[mode_num]; + if (fscrypt_is_key_prepared(prep_key, ci)) { + ci->ci_enc_key = *prep_key; + return 0; + } + + mutex_lock(&fscrypt_mode_key_setup_mutex); + + if (fscrypt_is_key_prepared(prep_key, ci)) + goto done_unlock; + + BUILD_BUG_ON(sizeof(mode_num) != 1); + BUILD_BUG_ON(sizeof(sb->s_uuid) != 16); + BUILD_BUG_ON(sizeof(hkdf_info) != 17); + hkdf_info[hkdf_infolen++] = mode_num; + if (include_fs_uuid) { + memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid, + sizeof(sb->s_uuid)); + hkdf_infolen += sizeof(sb->s_uuid); + } + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + hkdf_context, hkdf_info, hkdf_infolen, + mode_key, mode->keysize); + if (err) + goto out_unlock; + err = fscrypt_prepare_key(prep_key, mode_key, ci); + memzero_explicit(mode_key, mode->keysize); + if (err) + goto out_unlock; +done_unlock: + ci->ci_enc_key = *prep_key; + err = 0; +out_unlock: + mutex_unlock(&fscrypt_mode_key_setup_mutex); + return err; +} + +/* + * Derive a SipHash key from the given fscrypt master key and the given + * application-specific information string. + * + * Note that the KDF produces a byte array, but the SipHash APIs expect the key + * as a pair of 64-bit words. Therefore, on big endian CPUs we have to do an + * endianness swap in order to get the same results as on little endian CPUs. + */ +static int fscrypt_derive_siphash_key(const struct fscrypt_master_key *mk, + u8 context, const u8 *info, + unsigned int infolen, siphash_key_t *key) +{ + int err; + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, context, info, infolen, + (u8 *)key, sizeof(*key)); + if (err) + return err; + + BUILD_BUG_ON(sizeof(*key) != 16); + BUILD_BUG_ON(ARRAY_SIZE(key->key) != 2); + le64_to_cpus(&key->key[0]); + le64_to_cpus(&key->key[1]); + return 0; +} + +int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk) +{ + int err; + + err = fscrypt_derive_siphash_key(mk, HKDF_CONTEXT_DIRHASH_KEY, + ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE, + &ci->ci_dirhash_key); + if (err) + return err; + ci->ci_dirhash_key_initialized = true; + return 0; +} + +void fscrypt_hash_inode_number(struct fscrypt_info *ci, + const struct fscrypt_master_key *mk) +{ + WARN_ON_ONCE(ci->ci_inode->i_ino == 0); + WARN_ON_ONCE(!mk->mk_ino_hash_key_initialized); + + ci->ci_hashed_ino = (u32)siphash_1u64(ci->ci_inode->i_ino, + &mk->mk_ino_hash_key); +} + +static int fscrypt_setup_iv_ino_lblk_32_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk) +{ + int err; + + err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_32_keys, + HKDF_CONTEXT_IV_INO_LBLK_32_KEY, true); + if (err) + return err; + + /* pairs with smp_store_release() below */ + if (!smp_load_acquire(&mk->mk_ino_hash_key_initialized)) { + + mutex_lock(&fscrypt_mode_key_setup_mutex); + + if (mk->mk_ino_hash_key_initialized) + goto unlock; + + err = fscrypt_derive_siphash_key(mk, + HKDF_CONTEXT_INODE_HASH_KEY, + NULL, 0, &mk->mk_ino_hash_key); + if (err) + goto unlock; + /* pairs with smp_load_acquire() above */ + smp_store_release(&mk->mk_ino_hash_key_initialized, true); +unlock: + mutex_unlock(&fscrypt_mode_key_setup_mutex); + if (err) + return err; + } + + /* + * New inodes may not have an inode number assigned yet. + * Hashing their inode number is delayed until later. + */ + if (ci->ci_inode->i_ino) + fscrypt_hash_inode_number(ci, mk); + return 0; +} + +static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk, + bool need_dirhash_key) +{ + int err; + + if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { + /* + * DIRECT_KEY: instead of deriving per-file encryption keys, the + * per-file nonce will be included in all the IVs. But unlike + * v1 policies, for v2 policies in this case we don't encrypt + * with the master key directly but rather derive a per-mode + * encryption key. This ensures that the master key is + * consistently used only for HKDF, avoiding key reuse issues. + */ + err = setup_per_mode_enc_key(ci, mk, mk->mk_direct_keys, + HKDF_CONTEXT_DIRECT_KEY, false); + } else if (ci->ci_policy.v2.flags & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) { + /* + * IV_INO_LBLK_64: encryption keys are derived from (master_key, + * mode_num, filesystem_uuid), and inode number is included in + * the IVs. This format is optimized for use with inline + * encryption hardware compliant with the UFS standard. + */ + err = setup_per_mode_enc_key(ci, mk, mk->mk_iv_ino_lblk_64_keys, + HKDF_CONTEXT_IV_INO_LBLK_64_KEY, + true); + } else if (ci->ci_policy.v2.flags & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) { + err = fscrypt_setup_iv_ino_lblk_32_key(ci, mk); + } else { + u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_PER_FILE_ENC_KEY, + ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE, + derived_key, ci->ci_mode->keysize); + if (err) + return err; + + err = fscrypt_set_per_file_enc_key(ci, derived_key); + memzero_explicit(derived_key, ci->ci_mode->keysize); + } + if (err) + return err; + + /* Derive a secret dirhash key for directories that need it. */ + if (need_dirhash_key) { + err = fscrypt_derive_dirhash_key(ci, mk); + if (err) + return err; + } + + return 0; +} + +/* + * Check whether the size of the given master key (@mk) is appropriate for the + * encryption settings which a particular file will use (@ci). + * + * If the file uses a v1 encryption policy, then the master key must be at least + * as long as the derived key, as this is a requirement of the v1 KDF. + * + * Otherwise, the KDF can accept any size key, so we enforce a slightly looser + * requirement: we require that the size of the master key be at least the + * maximum security strength of any algorithm whose key will be derived from it + * (but in practice we only need to consider @ci->ci_mode, since any other + * possible subkeys such as DIRHASH and INODE_HASH will never increase the + * required key size over @ci->ci_mode). This allows AES-256-XTS keys to be + * derived from a 256-bit master key, which is cryptographically sufficient, + * rather than requiring a 512-bit master key which is unnecessarily long. (We + * still allow 512-bit master keys if the user chooses to use them, though.) + */ +static bool fscrypt_valid_master_key_size(const struct fscrypt_master_key *mk, + const struct fscrypt_info *ci) +{ + unsigned int min_keysize; + + if (ci->ci_policy.version == FSCRYPT_POLICY_V1) + min_keysize = ci->ci_mode->keysize; + else + min_keysize = ci->ci_mode->security_strength; + + if (mk->mk_secret.size < min_keysize) { + fscrypt_warn(NULL, + "key with %s %*phN is too short (got %u bytes, need %u+ bytes)", + master_key_spec_type(&mk->mk_spec), + master_key_spec_len(&mk->mk_spec), + (u8 *)&mk->mk_spec.u, + mk->mk_secret.size, min_keysize); + return false; + } + return true; +} + +/* + * Find the master key, then set up the inode's actual encryption key. + * + * If the master key is found in the filesystem-level keyring, then it is + * returned in *mk_ret with its semaphore read-locked. This is needed to ensure + * that only one task links the fscrypt_info into ->mk_decrypted_inodes (as + * multiple tasks may race to create an fscrypt_info for the same inode), and to + * synchronize the master key being removed with a new inode starting to use it. + */ +static int setup_file_encryption_key(struct fscrypt_info *ci, + bool need_dirhash_key, + struct fscrypt_master_key **mk_ret) +{ + struct super_block *sb = ci->ci_inode->i_sb; + struct fscrypt_key_specifier mk_spec; + struct fscrypt_master_key *mk; + int err; + + err = fscrypt_select_encryption_impl(ci); + if (err) + return err; + + err = fscrypt_policy_to_key_spec(&ci->ci_policy, &mk_spec); + if (err) + return err; + + mk = fscrypt_find_master_key(sb, &mk_spec); + if (unlikely(!mk)) { + const union fscrypt_policy *dummy_policy = + fscrypt_get_dummy_policy(sb); + + /* + * Add the test_dummy_encryption key on-demand. In principle, + * it should be added at mount time. Do it here instead so that + * the individual filesystems don't need to worry about adding + * this key at mount time and cleaning up on mount failure. + */ + if (dummy_policy && + fscrypt_policies_equal(dummy_policy, &ci->ci_policy)) { + err = fscrypt_add_test_dummy_key(sb, &mk_spec); + if (err) + return err; + mk = fscrypt_find_master_key(sb, &mk_spec); + } + } + if (unlikely(!mk)) { + if (ci->ci_policy.version != FSCRYPT_POLICY_V1) + return -ENOKEY; + + /* + * As a legacy fallback for v1 policies, search for the key in + * the current task's subscribed keyrings too. Don't move this + * to before the search of ->s_master_keys, since users + * shouldn't be able to override filesystem-level keys. + */ + return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci); + } + down_read(&mk->mk_sem); + + /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */ + if (!is_master_key_secret_present(&mk->mk_secret)) { + err = -ENOKEY; + goto out_release_key; + } + + if (!fscrypt_valid_master_key_size(mk, ci)) { + err = -ENOKEY; + goto out_release_key; + } + + switch (ci->ci_policy.version) { + case FSCRYPT_POLICY_V1: + err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); + break; + case FSCRYPT_POLICY_V2: + err = fscrypt_setup_v2_file_key(ci, mk, need_dirhash_key); + break; + default: + WARN_ON_ONCE(1); + err = -EINVAL; + break; + } + if (err) + goto out_release_key; + + *mk_ret = mk; + return 0; + +out_release_key: + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); + return err; +} + +static void put_crypt_info(struct fscrypt_info *ci) +{ + struct fscrypt_master_key *mk; + + if (!ci) + return; + + if (ci->ci_direct_key) + fscrypt_put_direct_key(ci->ci_direct_key); + else if (ci->ci_owns_key) + fscrypt_destroy_prepared_key(ci->ci_inode->i_sb, + &ci->ci_enc_key); + + mk = ci->ci_master_key; + if (mk) { + /* + * Remove this inode from the list of inodes that were unlocked + * with the master key. In addition, if we're removing the last + * inode from a master key struct that already had its secret + * removed, then complete the full removal of the struct. + */ + spin_lock(&mk->mk_decrypted_inodes_lock); + list_del(&ci->ci_master_key_link); + spin_unlock(&mk->mk_decrypted_inodes_lock); + fscrypt_put_master_key_activeref(ci->ci_inode->i_sb, mk); + } + memzero_explicit(ci, sizeof(*ci)); + kmem_cache_free(fscrypt_info_cachep, ci); +} + +static int +fscrypt_setup_encryption_info(struct inode *inode, + const union fscrypt_policy *policy, + const u8 nonce[FSCRYPT_FILE_NONCE_SIZE], + bool need_dirhash_key) +{ + struct fscrypt_info *crypt_info; + struct fscrypt_mode *mode; + struct fscrypt_master_key *mk = NULL; + int res; + + res = fscrypt_initialize(inode->i_sb); + if (res) + return res; + + crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_KERNEL); + if (!crypt_info) + return -ENOMEM; + + crypt_info->ci_inode = inode; + crypt_info->ci_policy = *policy; + memcpy(crypt_info->ci_nonce, nonce, FSCRYPT_FILE_NONCE_SIZE); + + mode = select_encryption_mode(&crypt_info->ci_policy, inode); + if (IS_ERR(mode)) { + res = PTR_ERR(mode); + goto out; + } + WARN_ON_ONCE(mode->ivsize > FSCRYPT_MAX_IV_SIZE); + crypt_info->ci_mode = mode; + + res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk); + if (res) + goto out; + + /* + * For existing inodes, multiple tasks may race to set ->i_crypt_info. + * So use cmpxchg_release(). This pairs with the smp_load_acquire() in + * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. + */ + if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) { + /* + * We won the race and set ->i_crypt_info to our crypt_info. + * Now link it into the master key's inode list. + */ + if (mk) { + crypt_info->ci_master_key = mk; + refcount_inc(&mk->mk_active_refs); + spin_lock(&mk->mk_decrypted_inodes_lock); + list_add(&crypt_info->ci_master_key_link, + &mk->mk_decrypted_inodes); + spin_unlock(&mk->mk_decrypted_inodes_lock); + } + crypt_info = NULL; + } + res = 0; +out: + if (mk) { + up_read(&mk->mk_sem); + fscrypt_put_master_key(mk); + } + put_crypt_info(crypt_info); + return res; +} + +/** + * fscrypt_get_encryption_info() - set up an inode's encryption key + * @inode: the inode to set up the key for. Must be encrypted. + * @allow_unsupported: if %true, treat an unsupported encryption policy (or + * unrecognized encryption context) the same way as the key + * being unavailable, instead of returning an error. Use + * %false unless the operation being performed is needed in + * order for files (or directories) to be deleted. + * + * Set up ->i_crypt_info, if it hasn't already been done. + * + * Note: unless ->i_crypt_info is already set, this isn't %GFP_NOFS-safe. So + * generally this shouldn't be called from within a filesystem transaction. + * + * Return: 0 if ->i_crypt_info was set or was already set, *or* if the + * encryption key is unavailable. (Use fscrypt_has_encryption_key() to + * distinguish these cases.) Also can return another -errno code. + */ +int fscrypt_get_encryption_info(struct inode *inode, bool allow_unsupported) +{ + int res; + union fscrypt_context ctx; + union fscrypt_policy policy; + + if (fscrypt_has_encryption_key(inode)) + return 0; + + res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (res < 0) { + if (res == -ERANGE && allow_unsupported) + return 0; + fscrypt_warn(inode, "Error %d getting encryption context", res); + return res; + } + + res = fscrypt_policy_from_context(&policy, &ctx, res); + if (res) { + if (allow_unsupported) + return 0; + fscrypt_warn(inode, + "Unrecognized or corrupt encryption context"); + return res; + } + + if (!fscrypt_supported_policy(&policy, inode)) { + if (allow_unsupported) + return 0; + return -EINVAL; + } + + res = fscrypt_setup_encryption_info(inode, &policy, + fscrypt_context_nonce(&ctx), + IS_CASEFOLDED(inode) && + S_ISDIR(inode->i_mode)); + + if (res == -ENOPKG && allow_unsupported) /* Algorithm unavailable? */ + res = 0; + if (res == -ENOKEY) + res = 0; + return res; +} + +/** + * fscrypt_prepare_new_inode() - prepare to create a new inode in a directory + * @dir: a possibly-encrypted directory + * @inode: the new inode. ->i_mode must be set already. + * ->i_ino doesn't need to be set yet. + * @encrypt_ret: (output) set to %true if the new inode will be encrypted + * + * If the directory is encrypted, set up its ->i_crypt_info in preparation for + * encrypting the name of the new file. Also, if the new inode will be + * encrypted, set up its ->i_crypt_info and set *encrypt_ret=true. + * + * This isn't %GFP_NOFS-safe, and therefore it should be called before starting + * any filesystem transaction to create the inode. For this reason, ->i_ino + * isn't required to be set yet, as the filesystem may not have set it yet. + * + * This doesn't persist the new inode's encryption context. That still needs to + * be done later by calling fscrypt_set_context(). + * + * Return: 0 on success, -ENOKEY if the encryption key is missing, or another + * -errno code + */ +int fscrypt_prepare_new_inode(struct inode *dir, struct inode *inode, + bool *encrypt_ret) +{ + const union fscrypt_policy *policy; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; + + policy = fscrypt_policy_to_inherit(dir); + if (policy == NULL) + return 0; + if (IS_ERR(policy)) + return PTR_ERR(policy); + + if (WARN_ON_ONCE(inode->i_mode == 0)) + return -EINVAL; + + /* + * Only regular files, directories, and symlinks are encrypted. + * Special files like device nodes and named pipes aren't. + */ + if (!S_ISREG(inode->i_mode) && + !S_ISDIR(inode->i_mode) && + !S_ISLNK(inode->i_mode)) + return 0; + + *encrypt_ret = true; + + get_random_bytes(nonce, FSCRYPT_FILE_NONCE_SIZE); + return fscrypt_setup_encryption_info(inode, policy, nonce, + IS_CASEFOLDED(dir) && + S_ISDIR(inode->i_mode)); +} +EXPORT_SYMBOL_GPL(fscrypt_prepare_new_inode); + +/** + * fscrypt_put_encryption_info() - free most of an inode's fscrypt data + * @inode: an inode being evicted + * + * Free the inode's fscrypt_info. Filesystems must call this when the inode is + * being evicted. An RCU grace period need not have elapsed yet. + */ +void fscrypt_put_encryption_info(struct inode *inode) +{ + put_crypt_info(inode->i_crypt_info); + inode->i_crypt_info = NULL; +} +EXPORT_SYMBOL(fscrypt_put_encryption_info); + +/** + * fscrypt_free_inode() - free an inode's fscrypt data requiring RCU delay + * @inode: an inode being freed + * + * Free the inode's cached decrypted symlink target, if any. Filesystems must + * call this after an RCU grace period, just before they free the inode. + */ +void fscrypt_free_inode(struct inode *inode) +{ + if (IS_ENCRYPTED(inode) && S_ISLNK(inode->i_mode)) { + kfree(inode->i_link); + inode->i_link = NULL; + } +} +EXPORT_SYMBOL(fscrypt_free_inode); + +/** + * fscrypt_drop_inode() - check whether the inode's master key has been removed + * @inode: an inode being considered for eviction + * + * Filesystems supporting fscrypt must call this from their ->drop_inode() + * method so that encrypted inodes are evicted as soon as they're no longer in + * use and their master key has been removed. + * + * Return: 1 if fscrypt wants the inode to be evicted now, otherwise 0 + */ +int fscrypt_drop_inode(struct inode *inode) +{ + const struct fscrypt_info *ci = fscrypt_get_info(inode); + + /* + * If ci is NULL, then the inode doesn't have an encryption key set up + * so it's irrelevant. If ci_master_key is NULL, then the master key + * was provided via the legacy mechanism of the process-subscribed + * keyrings, so we don't know whether it's been removed or not. + */ + if (!ci || !ci->ci_master_key) + return 0; + + /* + * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes + * protected by the key were cleaned by sync_filesystem(). But if + * userspace is still using the files, inodes can be dirtied between + * then and now. We mustn't lose any writes, so skip dirty inodes here. + */ + if (inode->i_state & I_DIRTY_ALL) + return 0; + + /* + * Note: since we aren't holding the key semaphore, the result here can + * immediately become outdated. But there's no correctness problem with + * unnecessarily evicting. Nor is there a correctness problem with not + * evicting while iput() is racing with the key being removed, since + * then the thread removing the key will either evict the inode itself + * or will correctly detect that it wasn't evicted due to the race. + */ + return !is_master_key_secret_present(&ci->ci_master_key->mk_secret); +} +EXPORT_SYMBOL_GPL(fscrypt_drop_inode); diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c new file mode 100644 index 0000000000..75dabd9b27 --- /dev/null +++ b/fs/crypto/keysetup_v1.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Key setup for v1 encryption policies + * + * Copyright 2015, 2019 Google LLC + */ + +/* + * This file implements compatibility functions for the original encryption + * policy version ("v1"), including: + * + * - Deriving per-file encryption keys using the AES-128-ECB based KDF + * (rather than the new method of using HKDF-SHA512) + * + * - Retrieving fscrypt master keys from process-subscribed keyrings + * (rather than the new method of using a filesystem-level keyring) + * + * - Handling policies with the DIRECT_KEY flag set using a master key table + * (rather than the new method of implementing DIRECT_KEY with per-mode keys + * managed alongside the master keys in the filesystem-level keyring) + */ + +#include <crypto/algapi.h> +#include <crypto/skcipher.h> +#include <keys/user-type.h> +#include <linux/hashtable.h> +#include <linux/scatterlist.h> + +#include "fscrypt_private.h" + +/* Table of keys referenced by DIRECT_KEY policies */ +static DEFINE_HASHTABLE(fscrypt_direct_keys, 6); /* 6 bits = 64 buckets */ +static DEFINE_SPINLOCK(fscrypt_direct_keys_lock); + +/* + * v1 key derivation function. This generates the derived key by encrypting the + * master key with AES-128-ECB using the nonce as the AES key. This provides a + * unique derived key with sufficient entropy for each inode. However, it's + * nonstandard, non-extensible, doesn't evenly distribute the entropy from the + * master key, and is trivially reversible: an attacker who compromises a + * derived key can "decrypt" it to get back to the master key, then derive any + * other key. For all new code, use HKDF instead. + * + * The master key must be at least as long as the derived key. If the master + * key is longer, then only the first 'derived_keysize' bytes are used. + */ +static int derive_key_aes(const u8 *master_key, + const u8 nonce[FSCRYPT_FILE_NONCE_SIZE], + u8 *derived_key, unsigned int derived_keysize) +{ + int res = 0; + struct skcipher_request *req = NULL; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist src_sg, dst_sg; + struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); + + if (IS_ERR(tfm)) { + res = PTR_ERR(tfm); + tfm = NULL; + goto out; + } + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + res = -ENOMEM; + goto out; + } + skcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + res = crypto_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE); + if (res < 0) + goto out; + + sg_init_one(&src_sg, master_key, derived_keysize); + sg_init_one(&dst_sg, derived_key, derived_keysize); + skcipher_request_set_crypt(req, &src_sg, &dst_sg, derived_keysize, + NULL); + res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); +out: + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return res; +} + +/* + * Search the current task's subscribed keyrings for a "logon" key with + * description prefix:descriptor, and if found acquire a read lock on it and + * return a pointer to its validated payload in *payload_ret. + */ +static struct key * +find_and_lock_process_key(const char *prefix, + const u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE], + unsigned int min_keysize, + const struct fscrypt_key **payload_ret) +{ + char *description; + struct key *key; + const struct user_key_payload *ukp; + const struct fscrypt_key *payload; + + description = kasprintf(GFP_KERNEL, "%s%*phN", prefix, + FSCRYPT_KEY_DESCRIPTOR_SIZE, descriptor); + if (!description) + return ERR_PTR(-ENOMEM); + + key = request_key(&key_type_logon, description, NULL); + kfree(description); + if (IS_ERR(key)) + return key; + + down_read(&key->sem); + ukp = user_key_payload_locked(key); + + if (!ukp) /* was the key revoked before we acquired its semaphore? */ + goto invalid; + + payload = (const struct fscrypt_key *)ukp->data; + + if (ukp->datalen != sizeof(struct fscrypt_key) || + payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) { + fscrypt_warn(NULL, + "key with description '%s' has invalid payload", + key->description); + goto invalid; + } + + if (payload->size < min_keysize) { + fscrypt_warn(NULL, + "key with description '%s' is too short (got %u bytes, need %u+ bytes)", + key->description, payload->size, min_keysize); + goto invalid; + } + + *payload_ret = payload; + return key; + +invalid: + up_read(&key->sem); + key_put(key); + return ERR_PTR(-ENOKEY); +} + +/* Master key referenced by DIRECT_KEY policy */ +struct fscrypt_direct_key { + struct super_block *dk_sb; + struct hlist_node dk_node; + refcount_t dk_refcount; + const struct fscrypt_mode *dk_mode; + struct fscrypt_prepared_key dk_key; + u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; +}; + +static void free_direct_key(struct fscrypt_direct_key *dk) +{ + if (dk) { + fscrypt_destroy_prepared_key(dk->dk_sb, &dk->dk_key); + kfree_sensitive(dk); + } +} + +void fscrypt_put_direct_key(struct fscrypt_direct_key *dk) +{ + if (!refcount_dec_and_lock(&dk->dk_refcount, &fscrypt_direct_keys_lock)) + return; + hash_del(&dk->dk_node); + spin_unlock(&fscrypt_direct_keys_lock); + + free_direct_key(dk); +} + +/* + * Find/insert the given key into the fscrypt_direct_keys table. If found, it + * is returned with elevated refcount, and 'to_insert' is freed if non-NULL. If + * not found, 'to_insert' is inserted and returned if it's non-NULL; otherwise + * NULL is returned. + */ +static struct fscrypt_direct_key * +find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, + const u8 *raw_key, const struct fscrypt_info *ci) +{ + unsigned long hash_key; + struct fscrypt_direct_key *dk; + + /* + * Careful: to avoid potentially leaking secret key bytes via timing + * information, we must key the hash table by descriptor rather than by + * raw key, and use crypto_memneq() when comparing raw keys. + */ + + BUILD_BUG_ON(sizeof(hash_key) > FSCRYPT_KEY_DESCRIPTOR_SIZE); + memcpy(&hash_key, ci->ci_policy.v1.master_key_descriptor, + sizeof(hash_key)); + + spin_lock(&fscrypt_direct_keys_lock); + hash_for_each_possible(fscrypt_direct_keys, dk, dk_node, hash_key) { + if (memcmp(ci->ci_policy.v1.master_key_descriptor, + dk->dk_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) != 0) + continue; + if (ci->ci_mode != dk->dk_mode) + continue; + if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) + continue; + if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) + continue; + /* using existing tfm with same (descriptor, mode, raw_key) */ + refcount_inc(&dk->dk_refcount); + spin_unlock(&fscrypt_direct_keys_lock); + free_direct_key(to_insert); + return dk; + } + if (to_insert) + hash_add(fscrypt_direct_keys, &to_insert->dk_node, hash_key); + spin_unlock(&fscrypt_direct_keys_lock); + return to_insert; +} + +/* Prepare to encrypt directly using the master key in the given mode */ +static struct fscrypt_direct_key * +fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) +{ + struct fscrypt_direct_key *dk; + int err; + + /* Is there already a tfm for this key? */ + dk = find_or_insert_direct_key(NULL, raw_key, ci); + if (dk) + return dk; + + /* Nope, allocate one. */ + dk = kzalloc(sizeof(*dk), GFP_KERNEL); + if (!dk) + return ERR_PTR(-ENOMEM); + dk->dk_sb = ci->ci_inode->i_sb; + refcount_set(&dk->dk_refcount, 1); + dk->dk_mode = ci->ci_mode; + err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci); + if (err) + goto err_free_dk; + memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); + + return find_or_insert_direct_key(dk, raw_key, ci); + +err_free_dk: + free_direct_key(dk); + return ERR_PTR(err); +} + +/* v1 policy, DIRECT_KEY: use the master key directly */ +static int setup_v1_file_key_direct(struct fscrypt_info *ci, + const u8 *raw_master_key) +{ + struct fscrypt_direct_key *dk; + + dk = fscrypt_get_direct_key(ci, raw_master_key); + if (IS_ERR(dk)) + return PTR_ERR(dk); + ci->ci_direct_key = dk; + ci->ci_enc_key = dk->dk_key; + return 0; +} + +/* v1 policy, !DIRECT_KEY: derive the file's encryption key */ +static int setup_v1_file_key_derived(struct fscrypt_info *ci, + const u8 *raw_master_key) +{ + u8 *derived_key; + int err; + + /* + * This cannot be a stack buffer because it will be passed to the + * scatterlist crypto API during derive_key_aes(). + */ + derived_key = kmalloc(ci->ci_mode->keysize, GFP_KERNEL); + if (!derived_key) + return -ENOMEM; + + err = derive_key_aes(raw_master_key, ci->ci_nonce, + derived_key, ci->ci_mode->keysize); + if (err) + goto out; + + err = fscrypt_set_per_file_enc_key(ci, derived_key); +out: + kfree_sensitive(derived_key); + return err; +} + +int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key) +{ + if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return setup_v1_file_key_direct(ci, raw_master_key); + else + return setup_v1_file_key_derived(ci, raw_master_key); +} + +int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci) +{ + struct key *key; + const struct fscrypt_key *payload; + int err; + + key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX, + ci->ci_policy.v1.master_key_descriptor, + ci->ci_mode->keysize, &payload); + if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) { + key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix, + ci->ci_policy.v1.master_key_descriptor, + ci->ci_mode->keysize, &payload); + } + if (IS_ERR(key)) + return PTR_ERR(key); + + err = fscrypt_setup_v1_file_key(ci, payload->raw); + up_read(&key->sem); + key_put(key); + return err; +} diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c new file mode 100644 index 0000000000..f4456ecb3f --- /dev/null +++ b/fs/crypto/policy.c @@ -0,0 +1,867 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Encryption policy functions for per-file encryption support. + * + * Copyright (C) 2015, Google, Inc. + * Copyright (C) 2015, Motorola Mobility. + * + * Originally written by Michael Halcrow, 2015. + * Modified by Jaegeuk Kim, 2015. + * Modified by Eric Biggers, 2019 for v2 policy support. + */ + +#include <linux/fs_context.h> +#include <linux/random.h> +#include <linux/seq_file.h> +#include <linux/string.h> +#include <linux/mount.h> +#include "fscrypt_private.h" + +/** + * fscrypt_policies_equal() - check whether two encryption policies are the same + * @policy1: the first policy + * @policy2: the second policy + * + * Return: %true if equal, else %false + */ +bool fscrypt_policies_equal(const union fscrypt_policy *policy1, + const union fscrypt_policy *policy2) +{ + if (policy1->version != policy2->version) + return false; + + return !memcmp(policy1, policy2, fscrypt_policy_size(policy1)); +} + +int fscrypt_policy_to_key_spec(const union fscrypt_policy *policy, + struct fscrypt_key_specifier *key_spec) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + key_spec->type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; + memcpy(key_spec->u.descriptor, policy->v1.master_key_descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + return 0; + case FSCRYPT_POLICY_V2: + key_spec->type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; + memcpy(key_spec->u.identifier, policy->v2.master_key_identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + return 0; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } +} + +const union fscrypt_policy *fscrypt_get_dummy_policy(struct super_block *sb) +{ + if (!sb->s_cop->get_dummy_policy) + return NULL; + return sb->s_cop->get_dummy_policy(sb); +} + +/* + * Return %true if the given combination of encryption modes is supported for v1 + * (and later) encryption policies. + * + * Do *not* add anything new here, since v1 encryption policies are deprecated. + * New combinations of modes should go in fscrypt_valid_enc_modes_v2() only. + */ +static bool fscrypt_valid_enc_modes_v1(u32 contents_mode, u32 filenames_mode) +{ + if (contents_mode == FSCRYPT_MODE_AES_256_XTS && + filenames_mode == FSCRYPT_MODE_AES_256_CTS) + return true; + + if (contents_mode == FSCRYPT_MODE_AES_128_CBC && + filenames_mode == FSCRYPT_MODE_AES_128_CTS) + return true; + + if (contents_mode == FSCRYPT_MODE_ADIANTUM && + filenames_mode == FSCRYPT_MODE_ADIANTUM) + return true; + + return false; +} + +static bool fscrypt_valid_enc_modes_v2(u32 contents_mode, u32 filenames_mode) +{ + if (contents_mode == FSCRYPT_MODE_AES_256_XTS && + filenames_mode == FSCRYPT_MODE_AES_256_HCTR2) + return true; + + if (contents_mode == FSCRYPT_MODE_SM4_XTS && + filenames_mode == FSCRYPT_MODE_SM4_CTS) + return true; + + return fscrypt_valid_enc_modes_v1(contents_mode, filenames_mode); +} + +static bool supported_direct_key_modes(const struct inode *inode, + u32 contents_mode, u32 filenames_mode) +{ + const struct fscrypt_mode *mode; + + if (contents_mode != filenames_mode) { + fscrypt_warn(inode, + "Direct key flag not allowed with different contents and filenames modes"); + return false; + } + mode = &fscrypt_modes[contents_mode]; + + if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) { + fscrypt_warn(inode, "Direct key flag not allowed with %s", + mode->friendly_name); + return false; + } + return true; +} + +static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, + const struct inode *inode, + const char *type, + int max_ino_bits, int max_lblk_bits) +{ + struct super_block *sb = inode->i_sb; + int ino_bits = 64, lblk_bits = 64; + + /* + * IV_INO_LBLK_* exist only because of hardware limitations, and + * currently the only known use case for them involves AES-256-XTS. + * That's also all we test currently. For these reasons, for now only + * allow AES-256-XTS here. This can be relaxed later if a use case for + * IV_INO_LBLK_* with other encryption modes arises. + */ + if (policy->contents_encryption_mode != FSCRYPT_MODE_AES_256_XTS) { + fscrypt_warn(inode, + "Can't use %s policy with contents mode other than AES-256-XTS", + type); + return false; + } + + /* + * It's unsafe to include inode numbers in the IVs if the filesystem can + * potentially renumber inodes, e.g. via filesystem shrinking. + */ + if (!sb->s_cop->has_stable_inodes || + !sb->s_cop->has_stable_inodes(sb)) { + fscrypt_warn(inode, + "Can't use %s policy on filesystem '%s' because it doesn't have stable inode numbers", + type, sb->s_id); + return false; + } + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + if (ino_bits > max_ino_bits) { + fscrypt_warn(inode, + "Can't use %s policy on filesystem '%s' because its inode numbers are too long", + type, sb->s_id); + return false; + } + if (lblk_bits > max_lblk_bits) { + fscrypt_warn(inode, + "Can't use %s policy on filesystem '%s' because its block numbers are too long", + type, sb->s_id); + return false; + } + return true; +} + +static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, + const struct inode *inode) +{ + if (!fscrypt_valid_enc_modes_v1(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { + fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && + !supported_direct_key_modes(inode, policy->contents_encryption_mode, + policy->filenames_encryption_mode)) + return false; + + if (IS_CASEFOLDED(inode)) { + /* With v1, there's no way to derive dirhash keys. */ + fscrypt_warn(inode, + "v1 policies can't be used on casefolded directories"); + return false; + } + + return true; +} + +static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, + const struct inode *inode) +{ + int count = 0; + + if (!fscrypt_valid_enc_modes_v2(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | + FSCRYPT_POLICY_FLAG_DIRECT_KEY | + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 | + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) { + fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY); + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64); + count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32); + if (count > 1) { + fscrypt_warn(inode, "Mutually exclusive encryption flags (0x%02x)", + policy->flags); + return false; + } + + if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && + !supported_direct_key_modes(inode, policy->contents_encryption_mode, + policy->filenames_encryption_mode)) + return false; + + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) && + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_64", + 32, 32)) + return false; + + /* + * IV_INO_LBLK_32 hashes the inode number, so in principle it can + * support any ino_bits. However, currently the inode number is gotten + * from inode::i_ino which is 'unsigned long'. So for now the + * implementation limit is 32 bits. + */ + if ((policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + !supported_iv_ino_lblk_policy(policy, inode, "IV_INO_LBLK_32", + 32, 32)) + return false; + + if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { + fscrypt_warn(inode, "Reserved bits set in encryption policy"); + return false; + } + + return true; +} + +/** + * fscrypt_supported_policy() - check whether an encryption policy is supported + * @policy_u: the encryption policy + * @inode: the inode on which the policy will be used + * + * Given an encryption policy, check whether all its encryption modes and other + * settings are supported by this kernel on the given inode. (But we don't + * currently don't check for crypto API support here, so attempting to use an + * algorithm not configured into the crypto API will still fail later.) + * + * Return: %true if supported, else %false + */ +bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, + const struct inode *inode) +{ + switch (policy_u->version) { + case FSCRYPT_POLICY_V1: + return fscrypt_supported_v1_policy(&policy_u->v1, inode); + case FSCRYPT_POLICY_V2: + return fscrypt_supported_v2_policy(&policy_u->v2, inode); + } + return false; +} + +/** + * fscrypt_new_context() - create a new fscrypt_context + * @ctx_u: output context + * @policy_u: input policy + * @nonce: nonce to use + * + * Create an fscrypt_context for an inode that is being assigned the given + * encryption policy. @nonce must be a new random nonce. + * + * Return: the size of the new context in bytes. + */ +static int fscrypt_new_context(union fscrypt_context *ctx_u, + const union fscrypt_policy *policy_u, + const u8 nonce[FSCRYPT_FILE_NONCE_SIZE]) +{ + memset(ctx_u, 0, sizeof(*ctx_u)); + + switch (policy_u->version) { + case FSCRYPT_POLICY_V1: { + const struct fscrypt_policy_v1 *policy = &policy_u->v1; + struct fscrypt_context_v1 *ctx = &ctx_u->v1; + + ctx->version = FSCRYPT_CONTEXT_V1; + ctx->contents_encryption_mode = + policy->contents_encryption_mode; + ctx->filenames_encryption_mode = + policy->filenames_encryption_mode; + ctx->flags = policy->flags; + memcpy(ctx->master_key_descriptor, + policy->master_key_descriptor, + sizeof(ctx->master_key_descriptor)); + memcpy(ctx->nonce, nonce, FSCRYPT_FILE_NONCE_SIZE); + return sizeof(*ctx); + } + case FSCRYPT_POLICY_V2: { + const struct fscrypt_policy_v2 *policy = &policy_u->v2; + struct fscrypt_context_v2 *ctx = &ctx_u->v2; + + ctx->version = FSCRYPT_CONTEXT_V2; + ctx->contents_encryption_mode = + policy->contents_encryption_mode; + ctx->filenames_encryption_mode = + policy->filenames_encryption_mode; + ctx->flags = policy->flags; + memcpy(ctx->master_key_identifier, + policy->master_key_identifier, + sizeof(ctx->master_key_identifier)); + memcpy(ctx->nonce, nonce, FSCRYPT_FILE_NONCE_SIZE); + return sizeof(*ctx); + } + } + BUG(); +} + +/** + * fscrypt_policy_from_context() - convert an fscrypt_context to + * an fscrypt_policy + * @policy_u: output policy + * @ctx_u: input context + * @ctx_size: size of input context in bytes + * + * Given an fscrypt_context, build the corresponding fscrypt_policy. + * + * Return: 0 on success, or -EINVAL if the fscrypt_context has an unrecognized + * version number or size. + * + * This does *not* validate the settings within the policy itself, e.g. the + * modes, flags, and reserved bits. Use fscrypt_supported_policy() for that. + */ +int fscrypt_policy_from_context(union fscrypt_policy *policy_u, + const union fscrypt_context *ctx_u, + int ctx_size) +{ + memset(policy_u, 0, sizeof(*policy_u)); + + if (!fscrypt_context_is_valid(ctx_u, ctx_size)) + return -EINVAL; + + switch (ctx_u->version) { + case FSCRYPT_CONTEXT_V1: { + const struct fscrypt_context_v1 *ctx = &ctx_u->v1; + struct fscrypt_policy_v1 *policy = &policy_u->v1; + + policy->version = FSCRYPT_POLICY_V1; + policy->contents_encryption_mode = + ctx->contents_encryption_mode; + policy->filenames_encryption_mode = + ctx->filenames_encryption_mode; + policy->flags = ctx->flags; + memcpy(policy->master_key_descriptor, + ctx->master_key_descriptor, + sizeof(policy->master_key_descriptor)); + return 0; + } + case FSCRYPT_CONTEXT_V2: { + const struct fscrypt_context_v2 *ctx = &ctx_u->v2; + struct fscrypt_policy_v2 *policy = &policy_u->v2; + + policy->version = FSCRYPT_POLICY_V2; + policy->contents_encryption_mode = + ctx->contents_encryption_mode; + policy->filenames_encryption_mode = + ctx->filenames_encryption_mode; + policy->flags = ctx->flags; + memcpy(policy->__reserved, ctx->__reserved, + sizeof(policy->__reserved)); + memcpy(policy->master_key_identifier, + ctx->master_key_identifier, + sizeof(policy->master_key_identifier)); + return 0; + } + } + /* unreachable */ + return -EINVAL; +} + +/* Retrieve an inode's encryption policy */ +static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy) +{ + const struct fscrypt_info *ci; + union fscrypt_context ctx; + int ret; + + ci = fscrypt_get_info(inode); + if (ci) { + /* key available, use the cached policy */ + *policy = ci->ci_policy; + return 0; + } + + if (!IS_ENCRYPTED(inode)) + return -ENODATA; + + ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (ret < 0) + return (ret == -ERANGE) ? -EINVAL : ret; + + return fscrypt_policy_from_context(policy, &ctx, ret); +} + +static int set_encryption_policy(struct inode *inode, + const union fscrypt_policy *policy) +{ + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; + union fscrypt_context ctx; + int ctxsize; + int err; + + if (!fscrypt_supported_policy(policy, inode)) + return -EINVAL; + + switch (policy->version) { + case FSCRYPT_POLICY_V1: + /* + * The original encryption policy version provided no way of + * verifying that the correct master key was supplied, which was + * insecure in scenarios where multiple users have access to the + * same encrypted files (even just read-only access). The new + * encryption policy version fixes this and also implies use of + * an improved key derivation function and allows non-root users + * to securely remove keys. So as long as compatibility with + * old kernels isn't required, it is recommended to use the new + * policy version for all new encrypted directories. + */ + pr_warn_once("%s (pid %d) is setting deprecated v1 encryption policy; recommend upgrading to v2.\n", + current->comm, current->pid); + break; + case FSCRYPT_POLICY_V2: + err = fscrypt_verify_key_added(inode->i_sb, + policy->v2.master_key_identifier); + if (err) + return err; + if (policy->v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + pr_warn_once("%s (pid %d) is setting an IV_INO_LBLK_32 encryption policy. This should only be used if there are certain hardware limitations.\n", + current->comm, current->pid); + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + get_random_bytes(nonce, FSCRYPT_FILE_NONCE_SIZE); + ctxsize = fscrypt_new_context(&ctx, policy, nonce); + + return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, NULL); +} + +int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) +{ + union fscrypt_policy policy; + union fscrypt_policy existing_policy; + struct inode *inode = file_inode(filp); + u8 version; + int size; + int ret; + + if (get_user(policy.version, (const u8 __user *)arg)) + return -EFAULT; + + size = fscrypt_policy_size(&policy); + if (size <= 0) + return -EINVAL; + + /* + * We should just copy the remaining 'size - 1' bytes here, but a + * bizarre bug in gcc 7 and earlier (fixed by gcc r255731) causes gcc to + * think that size can be 0 here (despite the check above!) *and* that + * it's a compile-time constant. Thus it would think copy_from_user() + * is passed compile-time constant ULONG_MAX, causing the compile-time + * buffer overflow check to fail, breaking the build. This only occurred + * when building an i386 kernel with -Os and branch profiling enabled. + * + * Work around it by just copying the first byte again... + */ + version = policy.version; + if (copy_from_user(&policy, arg, size)) + return -EFAULT; + policy.version = version; + + if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) + return -EACCES; + + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + inode_lock(inode); + + ret = fscrypt_get_policy(inode, &existing_policy); + if (ret == -ENODATA) { + if (!S_ISDIR(inode->i_mode)) + ret = -ENOTDIR; + else if (IS_DEADDIR(inode)) + ret = -ENOENT; + else if (!inode->i_sb->s_cop->empty_dir(inode)) + ret = -ENOTEMPTY; + else + ret = set_encryption_policy(inode, &policy); + } else if (ret == -EINVAL || + (ret == 0 && !fscrypt_policies_equal(&policy, + &existing_policy))) { + /* The file already uses a different encryption policy. */ + ret = -EEXIST; + } + + inode_unlock(inode); + + mnt_drop_write_file(filp); + return ret; +} +EXPORT_SYMBOL(fscrypt_ioctl_set_policy); + +/* Original ioctl version; can only get the original policy version */ +int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) +{ + union fscrypt_policy policy; + int err; + + err = fscrypt_get_policy(file_inode(filp), &policy); + if (err) + return err; + + if (policy.version != FSCRYPT_POLICY_V1) + return -EINVAL; + + if (copy_to_user(arg, &policy, sizeof(policy.v1))) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL(fscrypt_ioctl_get_policy); + +/* Extended ioctl version; can get policies of any version */ +int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg) +{ + struct fscrypt_get_policy_ex_arg arg; + union fscrypt_policy *policy = (union fscrypt_policy *)&arg.policy; + size_t policy_size; + int err; + + /* arg is policy_size, then policy */ + BUILD_BUG_ON(offsetof(typeof(arg), policy_size) != 0); + BUILD_BUG_ON(offsetofend(typeof(arg), policy_size) != + offsetof(typeof(arg), policy)); + BUILD_BUG_ON(sizeof(arg.policy) != sizeof(*policy)); + + err = fscrypt_get_policy(file_inode(filp), policy); + if (err) + return err; + policy_size = fscrypt_policy_size(policy); + + if (copy_from_user(&arg, uarg, sizeof(arg.policy_size))) + return -EFAULT; + + if (policy_size > arg.policy_size) + return -EOVERFLOW; + arg.policy_size = policy_size; + + if (copy_to_user(uarg, &arg, sizeof(arg.policy_size) + policy_size)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex); + +/* FS_IOC_GET_ENCRYPTION_NONCE: retrieve file's encryption nonce for testing */ +int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) +{ + struct inode *inode = file_inode(filp); + union fscrypt_context ctx; + int ret; + + ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (ret < 0) + return ret; + if (!fscrypt_context_is_valid(&ctx, ret)) + return -EINVAL; + if (copy_to_user(arg, fscrypt_context_nonce(&ctx), + FSCRYPT_FILE_NONCE_SIZE)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_nonce); + +/** + * fscrypt_has_permitted_context() - is a file's encryption policy permitted + * within its directory? + * + * @parent: inode for parent directory + * @child: inode for file being looked up, opened, or linked into @parent + * + * Filesystems must call this before permitting access to an inode in a + * situation where the parent directory is encrypted (either before allowing + * ->lookup() to succeed, or for a regular file before allowing it to be opened) + * and before any operation that involves linking an inode into an encrypted + * directory, including link, rename, and cross rename. It enforces the + * constraint that within a given encrypted directory tree, all files use the + * same encryption policy. The pre-access check is needed to detect potentially + * malicious offline violations of this constraint, while the link and rename + * checks are needed to prevent online violations of this constraint. + * + * Return: 1 if permitted, 0 if forbidden. + */ +int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) +{ + union fscrypt_policy parent_policy, child_policy; + int err, err1, err2; + + /* No restrictions on file types which are never encrypted */ + if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && + !S_ISLNK(child->i_mode)) + return 1; + + /* No restrictions if the parent directory is unencrypted */ + if (!IS_ENCRYPTED(parent)) + return 1; + + /* Encrypted directories must not contain unencrypted files */ + if (!IS_ENCRYPTED(child)) + return 0; + + /* + * Both parent and child are encrypted, so verify they use the same + * encryption policy. Compare the fscrypt_info structs if the keys are + * available, otherwise retrieve and compare the fscrypt_contexts. + * + * Note that the fscrypt_context retrieval will be required frequently + * when accessing an encrypted directory tree without the key. + * Performance-wise this is not a big deal because we already don't + * really optimize for file access without the key (to the extent that + * such access is even possible), given that any attempted access + * already causes a fscrypt_context retrieval and keyring search. + * + * In any case, if an unexpected error occurs, fall back to "forbidden". + */ + + err = fscrypt_get_encryption_info(parent, true); + if (err) + return 0; + err = fscrypt_get_encryption_info(child, true); + if (err) + return 0; + + err1 = fscrypt_get_policy(parent, &parent_policy); + err2 = fscrypt_get_policy(child, &child_policy); + + /* + * Allow the case where the parent and child both have an unrecognized + * encryption policy, so that files with an unrecognized encryption + * policy can be deleted. + */ + if (err1 == -EINVAL && err2 == -EINVAL) + return 1; + + if (err1 || err2) + return 0; + + return fscrypt_policies_equal(&parent_policy, &child_policy); +} +EXPORT_SYMBOL(fscrypt_has_permitted_context); + +/* + * Return the encryption policy that new files in the directory will inherit, or + * NULL if none, or an ERR_PTR() on error. If the directory is encrypted, also + * ensure that its key is set up, so that the new filename can be encrypted. + */ +const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir) +{ + int err; + + if (IS_ENCRYPTED(dir)) { + err = fscrypt_require_key(dir); + if (err) + return ERR_PTR(err); + return &dir->i_crypt_info->ci_policy; + } + + return fscrypt_get_dummy_policy(dir->i_sb); +} + +/** + * fscrypt_context_for_new_inode() - create an encryption context for a new inode + * @ctx: where context should be written + * @inode: inode from which to fetch policy and nonce + * + * Given an in-core "prepared" (via fscrypt_prepare_new_inode) inode, + * generate a new context and write it to ctx. ctx _must_ be at least + * FSCRYPT_SET_CONTEXT_MAX_SIZE bytes. + * + * Return: size of the resulting context or a negative error code. + */ +int fscrypt_context_for_new_inode(void *ctx, struct inode *inode) +{ + struct fscrypt_info *ci = inode->i_crypt_info; + + BUILD_BUG_ON(sizeof(union fscrypt_context) != + FSCRYPT_SET_CONTEXT_MAX_SIZE); + + /* fscrypt_prepare_new_inode() should have set up the key already. */ + if (WARN_ON_ONCE(!ci)) + return -ENOKEY; + + return fscrypt_new_context(ctx, &ci->ci_policy, ci->ci_nonce); +} +EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode); + +/** + * fscrypt_set_context() - Set the fscrypt context of a new inode + * @inode: a new inode + * @fs_data: private data given by FS and passed to ->set_context() + * + * This should be called after fscrypt_prepare_new_inode(), generally during a + * filesystem transaction. Everything here must be %GFP_NOFS-safe. + * + * Return: 0 on success, -errno on failure + */ +int fscrypt_set_context(struct inode *inode, void *fs_data) +{ + struct fscrypt_info *ci = inode->i_crypt_info; + union fscrypt_context ctx; + int ctxsize; + + ctxsize = fscrypt_context_for_new_inode(&ctx, inode); + if (ctxsize < 0) + return ctxsize; + + /* + * This may be the first time the inode number is available, so do any + * delayed key setup that requires the inode number. + */ + if (ci->ci_policy.version == FSCRYPT_POLICY_V2 && + (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) + fscrypt_hash_inode_number(ci, ci->ci_master_key); + + return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data); +} +EXPORT_SYMBOL_GPL(fscrypt_set_context); + +/** + * fscrypt_parse_test_dummy_encryption() - parse the test_dummy_encryption mount option + * @param: the mount option + * @dummy_policy: (input/output) the place to write the dummy policy that will + * result from parsing the option. Zero-initialize this. If a policy is + * already set here (due to test_dummy_encryption being given multiple + * times), then this function will verify that the policies are the same. + * + * Return: 0 on success; -EINVAL if the argument is invalid; -EEXIST if the + * argument conflicts with one already specified; or -ENOMEM. + */ +int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param, + struct fscrypt_dummy_policy *dummy_policy) +{ + const char *arg = "v2"; + union fscrypt_policy *policy; + int err; + + if (param->type == fs_value_is_string && *param->string) + arg = param->string; + + policy = kzalloc(sizeof(*policy), GFP_KERNEL); + if (!policy) + return -ENOMEM; + + if (!strcmp(arg, "v1")) { + policy->version = FSCRYPT_POLICY_V1; + policy->v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + policy->v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memset(policy->v1.master_key_descriptor, 0x42, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + } else if (!strcmp(arg, "v2")) { + policy->version = FSCRYPT_POLICY_V2; + policy->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + policy->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + err = fscrypt_get_test_dummy_key_identifier( + policy->v2.master_key_identifier); + if (err) + goto out; + } else { + err = -EINVAL; + goto out; + } + + if (dummy_policy->policy) { + if (fscrypt_policies_equal(policy, dummy_policy->policy)) + err = 0; + else + err = -EEXIST; + goto out; + } + dummy_policy->policy = policy; + policy = NULL; + err = 0; +out: + kfree(policy); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_parse_test_dummy_encryption); + +/** + * fscrypt_dummy_policies_equal() - check whether two dummy policies are equal + * @p1: the first test dummy policy (may be unset) + * @p2: the second test dummy policy (may be unset) + * + * Return: %true if the dummy policies are both set and equal, or both unset. + */ +bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1, + const struct fscrypt_dummy_policy *p2) +{ + if (!p1->policy && !p2->policy) + return true; + if (!p1->policy || !p2->policy) + return false; + return fscrypt_policies_equal(p1->policy, p2->policy); +} +EXPORT_SYMBOL_GPL(fscrypt_dummy_policies_equal); + +/** + * fscrypt_show_test_dummy_encryption() - show '-o test_dummy_encryption' + * @seq: the seq_file to print the option to + * @sep: the separator character to use + * @sb: the filesystem whose options are being shown + * + * Show the test_dummy_encryption mount option, if it was specified. + * This is mainly used for /proc/mounts. + */ +void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, + struct super_block *sb) +{ + const union fscrypt_policy *policy = fscrypt_get_dummy_policy(sb); + int vers; + + if (!policy) + return; + + vers = policy->version; + if (vers == FSCRYPT_POLICY_V1) /* Handle numbering quirk */ + vers = 1; + + seq_printf(seq, "%ctest_dummy_encryption=v%d", sep, vers); +} +EXPORT_SYMBOL_GPL(fscrypt_show_test_dummy_encryption); |