summaryrefslogtreecommitdiffstats
path: root/security/keys
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /security/keys
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'security/keys')
-rw-r--r--security/keys/Kconfig125
-rw-r--r--security/keys/Makefile32
-rw-r--r--security/keys/big_key.c294
-rw-r--r--security/keys/compat.c132
-rw-r--r--security/keys/compat_dh.c36
-rw-r--r--security/keys/dh.c423
-rw-r--r--security/keys/encrypted-keys/Makefile11
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c77
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.h27
-rw-r--r--security/keys/encrypted-keys/encrypted.c1010
-rw-r--r--security/keys/encrypted-keys/encrypted.h67
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c43
-rw-r--r--security/keys/gc.c380
-rw-r--r--security/keys/internal.h385
-rw-r--r--security/keys/key.c1213
-rw-r--r--security/keys/keyctl.c2026
-rw-r--r--security/keys/keyctl_pkey.c329
-rw-r--r--security/keys/keyring.c1794
-rw-r--r--security/keys/permission.c123
-rw-r--r--security/keys/persistent.c167
-rw-r--r--security/keys/proc.c323
-rw-r--r--security/keys/process_keys.c956
-rw-r--r--security/keys/request_key.c821
-rw-r--r--security/keys/request_key_auth.c283
-rw-r--r--security/keys/sysctl.c70
-rw-r--r--security/keys/trusted-keys/Makefile8
-rw-r--r--security/keys/trusted-keys/trusted_tpm1.c1299
-rw-r--r--security/keys/trusted-keys/trusted_tpm2.c331
-rw-r--r--security/keys/user_defined.c207
29 files changed, 12992 insertions, 0 deletions
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
new file mode 100644
index 000000000..c161642a8
--- /dev/null
+++ b/security/keys/Kconfig
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Key management configuration
+#
+
+config KEYS
+ bool "Enable access key retention support"
+ select ASSOCIATIVE_ARRAY
+ help
+ This option provides support for retaining authentication tokens and
+ access keys in the kernel.
+
+ It also includes provision of methods by which such keys might be
+ associated with a process so that network filesystems, encryption
+ support and the like can find them.
+
+ Furthermore, a special type of key is available that acts as keyring:
+ a searchable sequence of keys. Each process is equipped with access
+ to five standard keyrings: UID-specific, GID-specific, session,
+ process and thread.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEYS_REQUEST_CACHE
+ bool "Enable temporary caching of the last request_key() result"
+ depends on KEYS
+ help
+ This option causes the result of the last successful request_key()
+ call that didn't upcall to the kernel to be cached temporarily in the
+ task_struct. The cache is cleared by exit and just prior to the
+ resumption of userspace.
+
+ This allows the key used for multiple step processes where each step
+ wants to request a key that is likely the same as the one requested
+ by the last step to save on the searching.
+
+ An example of such a process is a pathwalk through a network
+ filesystem in which each method needs to request an authentication
+ key. Pathwalk will call multiple methods for each dentry traversed
+ (permission, d_revalidate, lookup, getxattr, getacl, ...).
+
+config PERSISTENT_KEYRINGS
+ bool "Enable register of persistent per-UID keyrings"
+ depends on KEYS
+ help
+ This option provides a register of persistent per-UID keyrings,
+ primarily aimed at Kerberos key storage. The keyrings are persistent
+ in the sense that they stay around after all processes of that UID
+ have exited, not that they survive the machine being rebooted.
+
+ A particular keyring may be accessed by either the user whose keyring
+ it is or by a process with administrative privileges. The active
+ LSMs gets to rule on which admin-level processes get to access the
+ cache.
+
+ Keyrings are created and added into the register upon demand and get
+ removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+ depends on CRYPTO_LIB_CHACHA20POLY1305 = y
+ help
+ This option provides support for holding large keys within the kernel
+ (for example Kerberos ticket caches). The data may be stored out to
+ swapspace by tmpfs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config TRUSTED_KEYS
+ tristate "TRUSTED KEYS"
+ depends on KEYS && TCG_TPM
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ help
+ This option provides support for creating, sealing, and unsealing
+ keys in the kernel. Trusted keys are random number symmetric keys,
+ generated and RSA-sealed by the TPM. The TPM only unseals the keys,
+ if the boot PCRs and other criteria match. Userspace will only ever
+ see encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config ENCRYPTED_KEYS
+ tristate "ENCRYPTED KEYS"
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_SHA256
+ select CRYPTO_RNG
+ help
+ This option provides support for create/encrypting/decrypting keys
+ in the kernel. Encrypted keys are kernel generated random numbers,
+ which are encrypted/decrypted with a 'master' symmetric key. The
+ 'master' key can be either a trusted-key or user-key type.
+ Userspace only ever sees/stores encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEY_DH_OPERATIONS
+ bool "Diffie-Hellman operations on retained keys"
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_DH
+ help
+ This option provides support for calculating Diffie-Hellman
+ public keys and shared secrets using values stored as keys
+ in the kernel.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEY_NOTIFICATIONS
+ bool "Provide key/keyring change notifications"
+ depends on KEYS && WATCH_QUEUE
+ help
+ This option provides support for getting change notifications
+ on keys and keyrings on which the caller has View permission.
+ This makes use of pipes to handle the notification buffer and
+ provides KEYCTL_WATCH_KEY to enable/disable watches.
diff --git a/security/keys/Makefile b/security/keys/Makefile
new file mode 100644
index 000000000..5f40807f0
--- /dev/null
+++ b/security/keys/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for key management
+#
+
+#
+# Core
+#
+obj-y := \
+ gc.o \
+ key.o \
+ keyring.o \
+ keyctl.o \
+ permission.o \
+ process_keys.o \
+ request_key.o \
+ request_key_auth.o \
+ user_defined.o
+compat-obj-$(CONFIG_KEY_DH_OPERATIONS) += compat_dh.o
+obj-$(CONFIG_COMPAT) += compat.o $(compat-obj-y)
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
+obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o
+obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
+
+#
+# Key types
+#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted-keys/
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 000000000..691347dea
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Large capacity key type
+ *
+ * Copyright (C) 2017-2020 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define pr_fmt(fmt) "big_key: "fmt
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+#include <crypto/chacha20poly1305.h>
+
+/*
+ * Layout of key payload words.
+ */
+enum {
+ big_key_data,
+ big_key_path,
+ big_key_path_2nd_part,
+ big_key_len,
+};
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+ .name = "big_key",
+ .preparse = big_key_preparse,
+ .free_preparse = big_key_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .revoke = big_key_revoke,
+ .destroy = big_key_destroy,
+ .describe = big_key_describe,
+ .read = big_key_read,
+ .update = big_key_update,
+};
+
+/*
+ * Preparse a big key
+ */
+int big_key_preparse(struct key_preparsed_payload *prep)
+{
+ struct path *path = (struct path *)&prep->payload.data[big_key_path];
+ struct file *file;
+ u8 *buf, *enckey;
+ ssize_t written;
+ size_t datalen = prep->datalen;
+ size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
+ int ret;
+
+ if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+ return -EINVAL;
+
+ /* Set an arbitrary quota */
+ prep->quotalen = 16;
+
+ prep->payload.data[big_key_len] = (void *)(unsigned long)datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ /* Create a shmem file to store the data in. This will permit the data
+ * to be swapped out if needed.
+ *
+ * File content is stored encrypted with randomly generated key.
+ * Since the key is random for each file, we can set the nonce
+ * to zero, provided we never define a ->update() call.
+ */
+ loff_t pos = 0;
+
+ buf = kvmalloc(enclen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* generate random key */
+ enckey = kmalloc(CHACHA20POLY1305_KEY_SIZE, GFP_KERNEL);
+ if (!enckey) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = get_random_bytes_wait(enckey, CHACHA20POLY1305_KEY_SIZE);
+ if (unlikely(ret))
+ goto err_enckey;
+
+ /* encrypt data */
+ chacha20poly1305_encrypt(buf, prep->data, datalen, NULL, 0,
+ 0, enckey);
+
+ /* save aligned data to file */
+ file = shmem_kernel_file_setup("", enclen, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_enckey;
+ }
+
+ written = kernel_write(file, buf, enclen, &pos);
+ if (written != enclen) {
+ ret = written;
+ if (written >= 0)
+ ret = -EIO;
+ goto err_fput;
+ }
+
+ /* Pin the mount and dentry to the key so that we can open it again
+ * later
+ */
+ prep->payload.data[big_key_data] = enckey;
+ *path = file->f_path;
+ path_get(path);
+ fput(file);
+ memzero_explicit(buf, enclen);
+ kvfree(buf);
+ } else {
+ /* Just store the data in a buffer */
+ void *data = kmalloc(datalen, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ prep->payload.data[big_key_data] = data;
+ memcpy(data, prep->data, prep->datalen);
+ }
+ return 0;
+
+err_fput:
+ fput(file);
+err_enckey:
+ kfree_sensitive(enckey);
+error:
+ memzero_explicit(buf, enclen);
+ kvfree(buf);
+ return ret;
+}
+
+/*
+ * Clear preparsement.
+ */
+void big_key_free_preparse(struct key_preparsed_payload *prep)
+{
+ if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&prep->payload.data[big_key_path];
+
+ path_put(path);
+ }
+ kfree_sensitive(prep->payload.data[big_key_data]);
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+ struct path *path = (struct path *)&key->payload.data[big_key_path];
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+ if (key_is_positive(key) &&
+ (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+ size_t datalen = (size_t)key->payload.data[big_key_len];
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data[big_key_path];
+
+ path_put(path);
+ path->mnt = NULL;
+ path->dentry = NULL;
+ }
+ kfree_sensitive(key->payload.data[big_key_data]);
+ key->payload.data[big_key_data] = NULL;
+}
+
+/*
+ * Update a big key
+ */
+int big_key_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ int ret;
+
+ ret = key_payload_reserve(key, prep->datalen);
+ if (ret < 0)
+ return ret;
+
+ if (key_is_positive(key))
+ big_key_destroy(key);
+
+ return generic_key_instantiate(key, prep);
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+ size_t datalen = (size_t)key->payload.data[big_key_len];
+
+ seq_puts(m, key->description);
+
+ if (key_is_positive(key))
+ seq_printf(m, ": %zu [%s]",
+ datalen,
+ datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char *buffer, size_t buflen)
+{
+ size_t datalen = (size_t)key->payload.data[big_key_len];
+ long ret;
+
+ if (!buffer || buflen < datalen)
+ return datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data[big_key_path];
+ struct file *file;
+ u8 *buf, *enckey = (u8 *)key->payload.data[big_key_data];
+ size_t enclen = datalen + CHACHA20POLY1305_AUTHTAG_SIZE;
+ loff_t pos = 0;
+
+ buf = kvmalloc(enclen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ file = dentry_open(path, O_RDONLY, current_cred());
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto error;
+ }
+
+ /* read file to kernel and decrypt */
+ ret = kernel_read(file, buf, enclen, &pos);
+ if (ret != enclen) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto err_fput;
+ }
+
+ ret = chacha20poly1305_decrypt(buf, buf, enclen, NULL, 0, 0,
+ enckey) ? 0 : -EBADMSG;
+ if (unlikely(ret))
+ goto err_fput;
+
+ ret = datalen;
+
+ /* copy out decrypted data */
+ memcpy(buffer, buf, datalen);
+
+err_fput:
+ fput(file);
+error:
+ memzero_explicit(buf, enclen);
+ kvfree(buf);
+ } else {
+ ret = datalen;
+ memcpy(buffer, key->payload.data[big_key_data], datalen);
+ }
+
+ return ret;
+}
+
+/*
+ * Register key type
+ */
+static int __init big_key_init(void)
+{
+ return register_key_type(&key_type_big_key);
+}
+
+late_initcall(big_key_init);
diff --git a/security/keys/compat.c b/security/keys/compat.c
new file mode 100644
index 000000000..1545efdca
--- /dev/null
+++ b/security/keys/compat.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/syscalls.h>
+#include <linux/keyctl.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+/*
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ */
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+ u32, arg2, u32, arg3, u32, arg4, u32, arg5)
+{
+ switch (option) {
+ case KEYCTL_GET_KEYRING_ID:
+ return keyctl_get_keyring_ID(arg2, arg3);
+
+ case KEYCTL_JOIN_SESSION_KEYRING:
+ return keyctl_join_session_keyring(compat_ptr(arg2));
+
+ case KEYCTL_UPDATE:
+ return keyctl_update_key(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_REVOKE:
+ return keyctl_revoke_key(arg2);
+
+ case KEYCTL_DESCRIBE:
+ return keyctl_describe_key(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_CLEAR:
+ return keyctl_keyring_clear(arg2);
+
+ case KEYCTL_LINK:
+ return keyctl_keyring_link(arg2, arg3);
+
+ case KEYCTL_UNLINK:
+ return keyctl_keyring_unlink(arg2, arg3);
+
+ case KEYCTL_SEARCH:
+ return keyctl_keyring_search(arg2, compat_ptr(arg3),
+ compat_ptr(arg4), arg5);
+
+ case KEYCTL_READ:
+ return keyctl_read_key(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_CHOWN:
+ return keyctl_chown_key(arg2, arg3, arg4);
+
+ case KEYCTL_SETPERM:
+ return keyctl_setperm_key(arg2, arg3);
+
+ case KEYCTL_INSTANTIATE:
+ return keyctl_instantiate_key(arg2, compat_ptr(arg3), arg4,
+ arg5);
+
+ case KEYCTL_NEGATE:
+ return keyctl_negate_key(arg2, arg3, arg4);
+
+ case KEYCTL_SET_REQKEY_KEYRING:
+ return keyctl_set_reqkey_keyring(arg2);
+
+ case KEYCTL_SET_TIMEOUT:
+ return keyctl_set_timeout(arg2, arg3);
+
+ case KEYCTL_ASSUME_AUTHORITY:
+ return keyctl_assume_authority(arg2);
+
+ case KEYCTL_GET_SECURITY:
+ return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
+
+ case KEYCTL_SESSION_TO_PARENT:
+ return keyctl_session_to_parent();
+
+ case KEYCTL_REJECT:
+ return keyctl_reject_key(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_INSTANTIATE_IOV:
+ return keyctl_instantiate_key_iov(arg2, compat_ptr(arg3), arg4,
+ arg5);
+
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key(arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent(arg2, arg3);
+
+ case KEYCTL_DH_COMPUTE:
+ return compat_keyctl_dh_compute(compat_ptr(arg2),
+ compat_ptr(arg3),
+ arg4, compat_ptr(arg5));
+
+ case KEYCTL_RESTRICT_KEYRING:
+ return keyctl_restrict_keyring(arg2, compat_ptr(arg3),
+ compat_ptr(arg4));
+
+ case KEYCTL_PKEY_QUERY:
+ if (arg3 != 0)
+ return -EINVAL;
+ return keyctl_pkey_query(arg2,
+ compat_ptr(arg4),
+ compat_ptr(arg5));
+
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ case KEYCTL_PKEY_SIGN:
+ return keyctl_pkey_e_d_s(option,
+ compat_ptr(arg2), compat_ptr(arg3),
+ compat_ptr(arg4), compat_ptr(arg5));
+
+ case KEYCTL_PKEY_VERIFY:
+ return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3),
+ compat_ptr(arg4), compat_ptr(arg5));
+
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities(compat_ptr(arg2), arg3);
+
+ case KEYCTL_WATCH_KEY:
+ return keyctl_watch_key(arg2, arg3, arg4);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/security/keys/compat_dh.c b/security/keys/compat_dh.c
new file mode 100644
index 000000000..19384e7e9
--- /dev/null
+++ b/security/keys/compat_dh.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* 32-bit compatibility syscall for 64-bit systems for DH operations
+ *
+ * Copyright (C) 2016 Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <linux/uaccess.h>
+
+#include "internal.h"
+
+/*
+ * Perform the DH computation or DH based key derivation.
+ *
+ * If successful, 0 will be returned.
+ */
+long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct compat_keyctl_kdf_params __user *kdf)
+{
+ struct keyctl_kdf_params kdfcopy;
+ struct compat_keyctl_kdf_params compat_kdfcopy;
+
+ if (!kdf)
+ return __keyctl_dh_compute(params, buffer, buflen, NULL);
+
+ if (copy_from_user(&compat_kdfcopy, kdf, sizeof(compat_kdfcopy)) != 0)
+ return -EFAULT;
+
+ kdfcopy.hashname = compat_ptr(compat_kdfcopy.hashname);
+ kdfcopy.otherinfo = compat_ptr(compat_kdfcopy.otherinfo);
+ kdfcopy.otherinfolen = compat_kdfcopy.otherinfolen;
+ memcpy(kdfcopy.__spare, compat_kdfcopy.__spare,
+ sizeof(kdfcopy.__spare));
+
+ return __keyctl_dh_compute(params, buffer, buflen, &kdfcopy);
+}
diff --git a/security/keys/dh.c b/security/keys/dh.c
new file mode 100644
index 000000000..1abfa70ed
--- /dev/null
+++ b/security/keys/dh.c
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Crypto operations using stored keys
+ *
+ * Copyright (c) 2016, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include <crypto/dh.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
+{
+ struct key *key;
+ key_ref_t key_ref;
+ long status;
+ ssize_t ret;
+
+ key_ref = lookup_user_key(keyid, 0, KEY_NEED_READ);
+ if (IS_ERR(key_ref)) {
+ ret = -ENOKEY;
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ ret = -EOPNOTSUPP;
+ if (key->type == &key_type_user) {
+ down_read(&key->sem);
+ status = key_validate(key);
+ if (status == 0) {
+ const struct user_key_payload *payload;
+ uint8_t *duplicate;
+
+ payload = user_key_payload_locked(key);
+
+ duplicate = kmemdup(payload->data, payload->datalen,
+ GFP_KERNEL);
+ if (duplicate) {
+ *data = duplicate;
+ ret = payload->datalen;
+ } else {
+ ret = -ENOMEM;
+ }
+ }
+ up_read(&key->sem);
+ }
+
+ key_put(key);
+error:
+ return ret;
+}
+
+static void dh_free_data(struct dh *dh)
+{
+ kfree_sensitive(dh->key);
+ kfree_sensitive(dh->p);
+ kfree_sensitive(dh->g);
+}
+
+struct dh_completion {
+ struct completion completion;
+ int err;
+};
+
+static void dh_crypto_done(struct crypto_async_request *req, int err)
+{
+ struct dh_completion *compl = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ compl->err = err;
+ complete(&compl->completion);
+}
+
+struct kdf_sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
+{
+ struct crypto_shash *tfm;
+ struct kdf_sdesc *sdesc;
+ int size;
+ int err;
+
+ /* allocate synchronous hash */
+ tfm = crypto_alloc_shash(hashname, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_info("could not allocate digest TFM handle %s\n", hashname);
+ return PTR_ERR(tfm);
+ }
+
+ err = -EINVAL;
+ if (crypto_shash_digestsize(tfm) == 0)
+ goto out_free_tfm;
+
+ err = -ENOMEM;
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ goto out_free_tfm;
+ sdesc->shash.tfm = tfm;
+
+ *sdesc_ret = sdesc;
+
+ return 0;
+
+out_free_tfm:
+ crypto_free_shash(tfm);
+ return err;
+}
+
+static void kdf_dealloc(struct kdf_sdesc *sdesc)
+{
+ if (!sdesc)
+ return;
+
+ if (sdesc->shash.tfm)
+ crypto_free_shash(sdesc->shash.tfm);
+
+ kfree_sensitive(sdesc);
+}
+
+/*
+ * Implementation of the KDF in counter mode according to SP800-108 section 5.1
+ * as well as SP800-56A section 5.8.1 (Single-step KDF).
+ *
+ * SP800-56A:
+ * The src pointer is defined as Z || other info where Z is the shared secret
+ * from DH and other info is an arbitrary string (see SP800-56A section
+ * 5.8.1.2).
+ *
+ * 'dlen' must be a multiple of the digest size.
+ */
+static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int dlen, unsigned int zlen)
+{
+ struct shash_desc *desc = &sdesc->shash;
+ unsigned int h = crypto_shash_digestsize(desc->tfm);
+ int err = 0;
+ u8 *dst_orig = dst;
+ __be32 counter = cpu_to_be32(1);
+
+ while (dlen) {
+ err = crypto_shash_init(desc);
+ if (err)
+ goto err;
+
+ err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
+ if (err)
+ goto err;
+
+ if (zlen && h) {
+ u8 tmpbuffer[32];
+ size_t chunk = min_t(size_t, zlen, sizeof(tmpbuffer));
+ memset(tmpbuffer, 0, chunk);
+
+ do {
+ err = crypto_shash_update(desc, tmpbuffer,
+ chunk);
+ if (err)
+ goto err;
+
+ zlen -= chunk;
+ chunk = min_t(size_t, zlen, sizeof(tmpbuffer));
+ } while (zlen);
+ }
+
+ if (src && slen) {
+ err = crypto_shash_update(desc, src, slen);
+ if (err)
+ goto err;
+ }
+
+ err = crypto_shash_final(desc, dst);
+ if (err)
+ goto err;
+
+ dlen -= h;
+ dst += h;
+ counter = cpu_to_be32(be32_to_cpu(counter) + 1);
+ }
+
+ return 0;
+
+err:
+ memzero_explicit(dst_orig, dlen);
+ return err;
+}
+
+static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
+ char __user *buffer, size_t buflen,
+ uint8_t *kbuf, size_t kbuflen, size_t lzero)
+{
+ uint8_t *outbuf = NULL;
+ int ret;
+ size_t outbuf_len = roundup(buflen,
+ crypto_shash_digestsize(sdesc->shash.tfm));
+
+ outbuf = kmalloc(outbuf_len, GFP_KERNEL);
+ if (!outbuf) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, outbuf_len, lzero);
+ if (ret)
+ goto err;
+
+ ret = buflen;
+ if (copy_to_user(buffer, outbuf, buflen) != 0)
+ ret = -EFAULT;
+
+err:
+ kfree_sensitive(outbuf);
+ return ret;
+}
+
+long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params *kdfcopy)
+{
+ long ret;
+ ssize_t dlen;
+ int secretlen;
+ int outlen;
+ struct keyctl_dh_params pcopy;
+ struct dh dh_inputs;
+ struct scatterlist outsg;
+ struct dh_completion compl;
+ struct crypto_kpp *tfm;
+ struct kpp_request *req;
+ uint8_t *secret;
+ uint8_t *outbuf;
+ struct kdf_sdesc *sdesc = NULL;
+
+ if (!params || (!buffer && buflen)) {
+ ret = -EINVAL;
+ goto out1;
+ }
+ if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
+ ret = -EFAULT;
+ goto out1;
+ }
+
+ if (kdfcopy) {
+ char *hashname;
+
+ if (memchr_inv(kdfcopy->__spare, 0, sizeof(kdfcopy->__spare))) {
+ ret = -EINVAL;
+ goto out1;
+ }
+
+ if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
+ kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
+ ret = -EMSGSIZE;
+ goto out1;
+ }
+
+ /* get KDF name string */
+ hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
+ if (IS_ERR(hashname)) {
+ ret = PTR_ERR(hashname);
+ goto out1;
+ }
+
+ /* allocate KDF from the kernel crypto API */
+ ret = kdf_alloc(&sdesc, hashname);
+ kfree(hashname);
+ if (ret)
+ goto out1;
+ }
+
+ memset(&dh_inputs, 0, sizeof(dh_inputs));
+
+ dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out1;
+ }
+ dh_inputs.p_size = dlen;
+
+ dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out2;
+ }
+ dh_inputs.g_size = dlen;
+
+ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+ if (dlen < 0) {
+ ret = dlen;
+ goto out2;
+ }
+ dh_inputs.key_size = dlen;
+
+ secretlen = crypto_dh_key_len(&dh_inputs);
+ secret = kmalloc(secretlen, GFP_KERNEL);
+ if (!secret) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+ ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
+ if (ret)
+ goto out3;
+
+ tfm = crypto_alloc_kpp("dh", 0, 0);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto out3;
+ }
+
+ ret = crypto_kpp_set_secret(tfm, secret, secretlen);
+ if (ret)
+ goto out4;
+
+ outlen = crypto_kpp_maxsize(tfm);
+
+ if (!kdfcopy) {
+ /*
+ * When not using a KDF, buflen 0 is used to read the
+ * required buffer length
+ */
+ if (buflen == 0) {
+ ret = outlen;
+ goto out4;
+ } else if (outlen > buflen) {
+ ret = -EOVERFLOW;
+ goto out4;
+ }
+ }
+
+ outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
+ GFP_KERNEL);
+ if (!outbuf) {
+ ret = -ENOMEM;
+ goto out4;
+ }
+
+ sg_init_one(&outsg, outbuf, outlen);
+
+ req = kpp_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out5;
+ }
+
+ kpp_request_set_input(req, NULL, 0);
+ kpp_request_set_output(req, &outsg, outlen);
+ init_completion(&compl.completion);
+ kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ dh_crypto_done, &compl);
+
+ /*
+ * For DH, generate_public_key and generate_shared_secret are
+ * the same calculation
+ */
+ ret = crypto_kpp_generate_public_key(req);
+ if (ret == -EINPROGRESS) {
+ wait_for_completion(&compl.completion);
+ ret = compl.err;
+ if (ret)
+ goto out6;
+ }
+
+ if (kdfcopy) {
+ /*
+ * Concatenate SP800-56A otherinfo past DH shared secret -- the
+ * input to the KDF is (DH shared secret || otherinfo)
+ */
+ if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
+ kdfcopy->otherinfolen) != 0) {
+ ret = -EFAULT;
+ goto out6;
+ }
+
+ ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf,
+ req->dst_len + kdfcopy->otherinfolen,
+ outlen - req->dst_len);
+ } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
+ ret = req->dst_len;
+ } else {
+ ret = -EFAULT;
+ }
+
+out6:
+ kpp_request_free(req);
+out5:
+ kfree_sensitive(outbuf);
+out4:
+ crypto_free_kpp(tfm);
+out3:
+ kfree_sensitive(secret);
+out2:
+ dh_free_data(&dh_inputs);
+out1:
+ kdf_dealloc(sdesc);
+ return ret;
+}
+
+long keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params __user *kdf)
+{
+ struct keyctl_kdf_params kdfcopy;
+
+ if (!kdf)
+ return __keyctl_dh_compute(params, buffer, buflen, NULL);
+
+ if (copy_from_user(&kdfcopy, kdf, sizeof(kdfcopy)) != 0)
+ return -EFAULT;
+
+ return __keyctl_dh_compute(params, buffer, buflen, &kdfcopy);
+}
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 000000000..7a44dce6f
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
+
+encrypted-keys-y := encrypted.o ecryptfs_format.o
+masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
+masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
+encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
new file mode 100644
index 000000000..8fdd76105
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ecryptfs_format.c: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+#include "ecryptfs_format.h"
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok)
+{
+ return auth_tok->token.password.session_key_encryption_key;
+}
+EXPORT_SYMBOL(ecryptfs_get_auth_tok_key);
+
+/*
+ * ecryptfs_get_versions()
+ *
+ * Source code taken from the software 'ecryptfs-utils' version 83.
+ *
+ */
+void ecryptfs_get_versions(int *major, int *minor, int *file_version)
+{
+ *major = ECRYPTFS_VERSION_MAJOR;
+ *minor = ECRYPTFS_VERSION_MINOR;
+ if (file_version)
+ *file_version = ECRYPTFS_SUPPORTED_FILE_VERSION;
+}
+EXPORT_SYMBOL(ecryptfs_get_versions);
+
+/*
+ * ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure
+ *
+ * Fill the ecryptfs_auth_tok structure with required ecryptfs data.
+ * The source code is inspired to the original function generate_payload()
+ * shipped with the software 'ecryptfs-utils' version 83.
+ *
+ */
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+ const char *key_desc)
+{
+ int major, minor;
+
+ ecryptfs_get_versions(&major, &minor, NULL);
+ auth_tok->version = (((uint16_t)(major << 8) & 0xFF00)
+ | ((uint16_t)minor & 0x00FF));
+ auth_tok->token_type = ECRYPTFS_PASSWORD;
+ strncpy((char *)auth_tok->token.password.signature, key_desc,
+ ECRYPTFS_PASSWORD_SIG_SIZE);
+ auth_tok->token.password.session_key_encryption_key_bytes =
+ ECRYPTFS_MAX_KEY_BYTES;
+ /*
+ * Removed auth_tok->token.password.salt and
+ * auth_tok->token.password.session_key_encryption_key
+ * initialization from the original code
+ */
+ /* TODO: Make the hash parameterizable via policy */
+ auth_tok->token.password.flags |=
+ ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET;
+ /* The kernel code will encrypt the session key. */
+ auth_tok->session_key.encrypted_key[0] = 0;
+ auth_tok->session_key.encrypted_key_size = 0;
+ /* Default; subject to change by kernel eCryptfs */
+ auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512;
+ auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD);
+ return 0;
+}
+EXPORT_SYMBOL(ecryptfs_fill_auth_tok);
diff --git a/security/keys/encrypted-keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
new file mode 100644
index 000000000..ed8466578
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ecryptfs_format.h: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ */
+
+#ifndef __KEYS_ECRYPTFS_H
+#define __KEYS_ECRYPTFS_H
+
+#include <linux/ecryptfs.h>
+
+#define PGP_DIGEST_ALGO_SHA512 10
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok);
+void ecryptfs_get_versions(int *major, int *minor, int *file_version);
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+ const char *key_desc);
+
+#endif /* __KEYS_ECRYPTFS_H */
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
new file mode 100644
index 000000000..192e531c1
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -0,0 +1,1010 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/key-type.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/scatterlist.h>
+#include <linux/ctype.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+
+#include "encrypted.h"
+#include "ecryptfs_format.h"
+
+static const char KEY_TRUSTED_PREFIX[] = "trusted:";
+static const char KEY_USER_PREFIX[] = "user:";
+static const char hash_alg[] = "sha256";
+static const char hmac_alg[] = "hmac(sha256)";
+static const char blkcipher_alg[] = "cbc(aes)";
+static const char key_format_default[] = "default";
+static const char key_format_ecryptfs[] = "ecryptfs";
+static const char key_format_enc32[] = "enc32";
+static unsigned int ivsize;
+static int blksize;
+
+#define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1)
+#define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1)
+#define KEY_ECRYPTFS_DESC_LEN 16
+#define HASH_SIZE SHA256_DIGEST_SIZE
+#define MAX_DATA_SIZE 4096
+#define MIN_DATA_SIZE 20
+#define KEY_ENC32_PAYLOAD_LEN 32
+
+static struct crypto_shash *hash_tfm;
+
+enum {
+ Opt_new, Opt_load, Opt_update, Opt_err
+};
+
+enum {
+ Opt_default, Opt_ecryptfs, Opt_enc32, Opt_error
+};
+
+static const match_table_t key_format_tokens = {
+ {Opt_default, "default"},
+ {Opt_ecryptfs, "ecryptfs"},
+ {Opt_enc32, "enc32"},
+ {Opt_error, NULL}
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_err, NULL}
+};
+
+static int aes_get_sizes(void)
+{
+ struct crypto_skcipher *tfm;
+
+ tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ ivsize = crypto_skcipher_ivsize(tfm);
+ blksize = crypto_skcipher_blocksize(tfm);
+ crypto_free_skcipher(tfm);
+ return 0;
+}
+
+/*
+ * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key
+ *
+ * The description of a encrypted key with format 'ecryptfs' must contain
+ * exactly 16 hexadecimal characters.
+ *
+ */
+static int valid_ecryptfs_desc(const char *ecryptfs_desc)
+{
+ int i;
+
+ if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) {
+ pr_err("encrypted_key: key description must be %d hexadecimal "
+ "characters long\n", KEY_ECRYPTFS_DESC_LEN);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) {
+ if (!isxdigit(ecryptfs_desc[i])) {
+ pr_err("encrypted_key: key description must contain "
+ "only hexadecimal characters\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key
+ *
+ * key-type:= "trusted:" | "user:"
+ * desc:= master-key description
+ *
+ * Verify that 'key-type' is valid and that 'desc' exists. On key update,
+ * only the master key description is permitted to change, not the key-type.
+ * The key-type remains constant.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int valid_master_desc(const char *new_desc, const char *orig_desc)
+{
+ int prefix_len;
+
+ if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
+ prefix_len = KEY_TRUSTED_PREFIX_LEN;
+ else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
+ prefix_len = KEY_USER_PREFIX_LEN;
+ else
+ return -EINVAL;
+
+ if (!new_desc[prefix_len])
+ return -EINVAL;
+
+ if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * datablob_parse - parse the keyctl data
+ *
+ * datablob format:
+ * new [<format>] <master-key name> <decrypted data length>
+ * load [<format>] <master-key name> <decrypted data length>
+ * <encrypted iv + data>
+ * update <new-master-key name>
+ *
+ * Tokenizes a copy of the keyctl data, returning a pointer to each token,
+ * which is null terminated.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, const char **format,
+ char **master_desc, char **decrypted_datalen,
+ char **hex_encoded_iv)
+{
+ substring_t args[MAX_OPT_ARGS];
+ int ret = -EINVAL;
+ int key_cmd;
+ int key_format;
+ char *p, *keyword;
+
+ keyword = strsep(&datablob, " \t");
+ if (!keyword) {
+ pr_info("encrypted_key: insufficient parameters specified\n");
+ return ret;
+ }
+ key_cmd = match_token(keyword, key_tokens, args);
+
+ /* Get optional format: default | ecryptfs */
+ p = strsep(&datablob, " \t");
+ if (!p) {
+ pr_err("encrypted_key: insufficient parameters specified\n");
+ return ret;
+ }
+
+ key_format = match_token(p, key_format_tokens, args);
+ switch (key_format) {
+ case Opt_ecryptfs:
+ case Opt_enc32:
+ case Opt_default:
+ *format = p;
+ *master_desc = strsep(&datablob, " \t");
+ break;
+ case Opt_error:
+ *master_desc = p;
+ break;
+ }
+
+ if (!*master_desc) {
+ pr_info("encrypted_key: master key parameter is missing\n");
+ goto out;
+ }
+
+ if (valid_master_desc(*master_desc, NULL) < 0) {
+ pr_info("encrypted_key: master key parameter \'%s\' "
+ "is invalid\n", *master_desc);
+ goto out;
+ }
+
+ if (decrypted_datalen) {
+ *decrypted_datalen = strsep(&datablob, " \t");
+ if (!*decrypted_datalen) {
+ pr_info("encrypted_key: keylen parameter is missing\n");
+ goto out;
+ }
+ }
+
+ switch (key_cmd) {
+ case Opt_new:
+ if (!decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .update method\n", keyword);
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_load:
+ if (!decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .update method\n", keyword);
+ break;
+ }
+ *hex_encoded_iv = strsep(&datablob, " \t");
+ if (!*hex_encoded_iv) {
+ pr_info("encrypted_key: hex blob is missing\n");
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_update:
+ if (decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .instantiate method\n",
+ keyword);
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_err:
+ pr_info("encrypted_key: keyword \'%s\' not recognized\n",
+ keyword);
+ break;
+ }
+out:
+ return ret;
+}
+
+/*
+ * datablob_format - format as an ascii string, before copying to userspace
+ */
+static char *datablob_format(struct encrypted_key_payload *epayload,
+ size_t asciiblob_len)
+{
+ char *ascii_buf, *bufp;
+ u8 *iv = epayload->iv;
+ int len;
+ int i;
+
+ ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL);
+ if (!ascii_buf)
+ goto out;
+
+ ascii_buf[asciiblob_len] = '\0';
+
+ /* copy datablob master_desc and datalen strings */
+ len = sprintf(ascii_buf, "%s %s %s ", epayload->format,
+ epayload->master_desc, epayload->datalen);
+
+ /* convert the hex encoded iv, encrypted-data and HMAC to ascii */
+ bufp = &ascii_buf[len];
+ for (i = 0; i < (asciiblob_len - len) / 2; i++)
+ bufp = hex_byte_pack(bufp, iv[i]);
+out:
+ return ascii_buf;
+}
+
+/*
+ * request_user_key - request the user key
+ *
+ * Use a user provided key to encrypt/decrypt an encrypted-key.
+ */
+static struct key *request_user_key(const char *master_desc, const u8 **master_key,
+ size_t *master_keylen)
+{
+ const struct user_key_payload *upayload;
+ struct key *ukey;
+
+ ukey = request_key(&key_type_user, master_desc, NULL);
+ if (IS_ERR(ukey))
+ goto error;
+
+ down_read(&ukey->sem);
+ upayload = user_key_payload_locked(ukey);
+ if (!upayload) {
+ /* key was revoked before we acquired its semaphore */
+ up_read(&ukey->sem);
+ key_put(ukey);
+ ukey = ERR_PTR(-EKEYREVOKED);
+ goto error;
+ }
+ *master_key = upayload->data;
+ *master_keylen = upayload->datalen;
+error:
+ return ukey;
+}
+
+static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+ const u8 *buf, unsigned int buflen)
+{
+ struct crypto_shash *tfm;
+ int err;
+
+ tfm = crypto_alloc_shash(hmac_alg, 0, 0);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: can't alloc %s transform: %ld\n",
+ hmac_alg, PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+
+ err = crypto_shash_setkey(tfm, key, keylen);
+ if (!err)
+ err = crypto_shash_tfm_digest(tfm, buf, buflen, digest);
+ crypto_free_shash(tfm);
+ return err;
+}
+
+enum derived_key_type { ENC_KEY, AUTH_KEY };
+
+/* Derive authentication/encryption key from trusted key */
+static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
+ const u8 *master_key, size_t master_keylen)
+{
+ u8 *derived_buf;
+ unsigned int derived_buf_len;
+ int ret;
+
+ derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen;
+ if (derived_buf_len < HASH_SIZE)
+ derived_buf_len = HASH_SIZE;
+
+ derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
+ if (!derived_buf)
+ return -ENOMEM;
+
+ if (key_type)
+ strcpy(derived_buf, "AUTH_KEY");
+ else
+ strcpy(derived_buf, "ENC_KEY");
+
+ memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
+ master_keylen);
+ ret = crypto_shash_tfm_digest(hash_tfm, derived_buf, derived_buf_len,
+ derived_key);
+ kfree_sensitive(derived_buf);
+ return ret;
+}
+
+static struct skcipher_request *init_skcipher_req(const u8 *key,
+ unsigned int key_len)
+{
+ struct skcipher_request *req;
+ struct crypto_skcipher *tfm;
+ int ret;
+
+ tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: failed to load %s transform (%ld)\n",
+ blkcipher_alg, PTR_ERR(tfm));
+ return ERR_CAST(tfm);
+ }
+
+ ret = crypto_skcipher_setkey(tfm, key, key_len);
+ if (ret < 0) {
+ pr_err("encrypted_key: failed to setkey (%d)\n", ret);
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(ret);
+ }
+
+ req = skcipher_request_alloc(tfm, GFP_KERNEL);
+ if (!req) {
+ pr_err("encrypted_key: failed to allocate request for %s\n",
+ blkcipher_alg);
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ skcipher_request_set_callback(req, 0, NULL, NULL);
+ return req;
+}
+
+static struct key *request_master_key(struct encrypted_key_payload *epayload,
+ const u8 **master_key, size_t *master_keylen)
+{
+ struct key *mkey = ERR_PTR(-EINVAL);
+
+ if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+ KEY_TRUSTED_PREFIX_LEN)) {
+ mkey = request_trusted_key(epayload->master_desc +
+ KEY_TRUSTED_PREFIX_LEN,
+ master_key, master_keylen);
+ } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX,
+ KEY_USER_PREFIX_LEN)) {
+ mkey = request_user_key(epayload->master_desc +
+ KEY_USER_PREFIX_LEN,
+ master_key, master_keylen);
+ } else
+ goto out;
+
+ if (IS_ERR(mkey)) {
+ int ret = PTR_ERR(mkey);
+
+ if (ret == -ENOTSUPP)
+ pr_info("encrypted_key: key %s not supported",
+ epayload->master_desc);
+ else
+ pr_info("encrypted_key: key %s not found",
+ epayload->master_desc);
+ goto out;
+ }
+
+ dump_master_key(*master_key, *master_keylen);
+out:
+ return mkey;
+}
+
+/* Before returning data to userspace, encrypt decrypted data. */
+static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+ const u8 *derived_key,
+ unsigned int derived_keylen)
+{
+ struct scatterlist sg_in[2];
+ struct scatterlist sg_out[1];
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ unsigned int encrypted_datalen;
+ u8 iv[AES_BLOCK_SIZE];
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+
+ req = init_skcipher_req(derived_key, derived_keylen);
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+ dump_decrypted_data(epayload);
+
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
+
+ sg_init_table(sg_out, 1);
+ sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
+
+ memcpy(iv, epayload->iv, sizeof(iv));
+ skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
+ ret = crypto_skcipher_encrypt(req);
+ tfm = crypto_skcipher_reqtfm(req);
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ if (ret < 0)
+ pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
+ else
+ dump_encrypted_data(epayload, encrypted_datalen);
+out:
+ return ret;
+}
+
+static int datablob_hmac_append(struct encrypted_key_payload *epayload,
+ const u8 *master_key, size_t master_keylen)
+{
+ u8 derived_key[HASH_SIZE];
+ u8 *digest;
+ int ret;
+
+ ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ digest = epayload->format + epayload->datablob_len;
+ ret = calc_hmac(digest, derived_key, sizeof derived_key,
+ epayload->format, epayload->datablob_len);
+ if (!ret)
+ dump_hmac(NULL, digest, HASH_SIZE);
+out:
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+/* verify HMAC before decrypting encrypted key */
+static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
+ const u8 *format, const u8 *master_key,
+ size_t master_keylen)
+{
+ u8 derived_key[HASH_SIZE];
+ u8 digest[HASH_SIZE];
+ int ret;
+ char *p;
+ unsigned short len;
+
+ ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ len = epayload->datablob_len;
+ if (!format) {
+ p = epayload->master_desc;
+ len -= strlen(epayload->format) + 1;
+ } else
+ p = epayload->format;
+
+ ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
+ if (ret < 0)
+ goto out;
+ ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
+ sizeof(digest));
+ if (ret) {
+ ret = -EINVAL;
+ dump_hmac("datablob",
+ epayload->format + epayload->datablob_len,
+ HASH_SIZE);
+ dump_hmac("calc", digest, HASH_SIZE);
+ }
+out:
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+ const u8 *derived_key,
+ unsigned int derived_keylen)
+{
+ struct scatterlist sg_in[1];
+ struct scatterlist sg_out[2];
+ struct crypto_skcipher *tfm;
+ struct skcipher_request *req;
+ unsigned int encrypted_datalen;
+ u8 iv[AES_BLOCK_SIZE];
+ u8 *pad;
+ int ret;
+
+ /* Throwaway buffer to hold the unused zero padding at the end */
+ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+ if (!pad)
+ return -ENOMEM;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ req = init_skcipher_req(derived_key, derived_keylen);
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+ dump_encrypted_data(epayload, encrypted_datalen);
+
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+ sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
+ sg_set_buf(&sg_out[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
+
+ memcpy(iv, epayload->iv, sizeof(iv));
+ skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
+ ret = crypto_skcipher_decrypt(req);
+ tfm = crypto_skcipher_reqtfm(req);
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ if (ret < 0)
+ goto out;
+ dump_decrypted_data(epayload);
+out:
+ kfree(pad);
+ return ret;
+}
+
+/* Allocate memory for decrypted key and datablob. */
+static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+ const char *format,
+ const char *master_desc,
+ const char *datalen)
+{
+ struct encrypted_key_payload *epayload = NULL;
+ unsigned short datablob_len;
+ unsigned short decrypted_datalen;
+ unsigned short payload_datalen;
+ unsigned int encrypted_datalen;
+ unsigned int format_len;
+ long dlen;
+ int ret;
+
+ ret = kstrtol(datalen, 10, &dlen);
+ if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ format_len = (!format) ? strlen(key_format_default) : strlen(format);
+ decrypted_datalen = dlen;
+ payload_datalen = decrypted_datalen;
+ if (format) {
+ if (!strcmp(format, key_format_ecryptfs)) {
+ if (dlen != ECRYPTFS_MAX_KEY_BYTES) {
+ pr_err("encrypted_key: keylen for the ecryptfs format must be equal to %d bytes\n",
+ ECRYPTFS_MAX_KEY_BYTES);
+ return ERR_PTR(-EINVAL);
+ }
+ decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES;
+ payload_datalen = sizeof(struct ecryptfs_auth_tok);
+ } else if (!strcmp(format, key_format_enc32)) {
+ if (decrypted_datalen != KEY_ENC32_PAYLOAD_LEN) {
+ pr_err("encrypted_key: enc32 key payload incorrect length: %d\n",
+ decrypted_datalen);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ }
+
+ encrypted_datalen = roundup(decrypted_datalen, blksize);
+
+ datablob_len = format_len + 1 + strlen(master_desc) + 1
+ + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen;
+
+ ret = key_payload_reserve(key, payload_datalen + datablob_len
+ + HASH_SIZE + 1);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ epayload = kzalloc(sizeof(*epayload) + payload_datalen +
+ datablob_len + HASH_SIZE + 1, GFP_KERNEL);
+ if (!epayload)
+ return ERR_PTR(-ENOMEM);
+
+ epayload->payload_datalen = payload_datalen;
+ epayload->decrypted_datalen = decrypted_datalen;
+ epayload->datablob_len = datablob_len;
+ return epayload;
+}
+
+static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
+ const char *format, const char *hex_encoded_iv)
+{
+ struct key *mkey;
+ u8 derived_key[HASH_SIZE];
+ const u8 *master_key;
+ u8 *hmac;
+ const char *hex_encoded_data;
+ unsigned int encrypted_datalen;
+ size_t master_keylen;
+ size_t asciilen;
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2;
+ if (strlen(hex_encoded_iv) != asciilen)
+ return -EINVAL;
+
+ hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
+ ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+ if (ret < 0)
+ return -EINVAL;
+ ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+ encrypted_datalen);
+ if (ret < 0)
+ return -EINVAL;
+
+ hmac = epayload->format + epayload->datablob_len;
+ ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+ HASH_SIZE);
+ if (ret < 0)
+ return -EINVAL;
+
+ mkey = request_master_key(epayload, &master_key, &master_keylen);
+ if (IS_ERR(mkey))
+ return PTR_ERR(mkey);
+
+ ret = datablob_hmac_verify(epayload, format, master_key, master_keylen);
+ if (ret < 0) {
+ pr_err("encrypted_key: bad hmac (%d)\n", ret);
+ goto out;
+ }
+
+ ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key);
+ if (ret < 0)
+ pr_err("encrypted_key: failed to decrypt key (%d)\n", ret);
+out:
+ up_read(&mkey->sem);
+ key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+static void __ekey_init(struct encrypted_key_payload *epayload,
+ const char *format, const char *master_desc,
+ const char *datalen)
+{
+ unsigned int format_len;
+
+ format_len = (!format) ? strlen(key_format_default) : strlen(format);
+ epayload->format = epayload->payload_data + epayload->payload_datalen;
+ epayload->master_desc = epayload->format + format_len + 1;
+ epayload->datalen = epayload->master_desc + strlen(master_desc) + 1;
+ epayload->iv = epayload->datalen + strlen(datalen) + 1;
+ epayload->encrypted_data = epayload->iv + ivsize + 1;
+ epayload->decrypted_data = epayload->payload_data;
+
+ if (!format)
+ memcpy(epayload->format, key_format_default, format_len);
+ else {
+ if (!strcmp(format, key_format_ecryptfs))
+ epayload->decrypted_data =
+ ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data);
+
+ memcpy(epayload->format, format, format_len);
+ }
+
+ memcpy(epayload->master_desc, master_desc, strlen(master_desc));
+ memcpy(epayload->datalen, datalen, strlen(datalen));
+}
+
+/*
+ * encrypted_init - initialize an encrypted key
+ *
+ * For a new key, use a random number for both the iv and data
+ * itself. For an old key, decrypt the hex encoded data.
+ */
+static int encrypted_init(struct encrypted_key_payload *epayload,
+ const char *key_desc, const char *format,
+ const char *master_desc, const char *datalen,
+ const char *hex_encoded_iv)
+{
+ int ret = 0;
+
+ if (format && !strcmp(format, key_format_ecryptfs)) {
+ ret = valid_ecryptfs_desc(key_desc);
+ if (ret < 0)
+ return ret;
+
+ ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data,
+ key_desc);
+ }
+
+ __ekey_init(epayload, format, master_desc, datalen);
+ if (!hex_encoded_iv) {
+ get_random_bytes(epayload->iv, ivsize);
+
+ get_random_bytes(epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ } else
+ ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv);
+ return ret;
+}
+
+/*
+ * encrypted_instantiate - instantiate an encrypted key
+ *
+ * Decrypt an existing encrypted datablob or create a new encrypted key
+ * based on a kernel random number.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct encrypted_key_payload *epayload = NULL;
+ char *datablob = NULL;
+ const char *format = NULL;
+ char *master_desc = NULL;
+ char *decrypted_datalen = NULL;
+ char *hex_encoded_iv = NULL;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ datablob[datalen] = 0;
+ memcpy(datablob, prep->data, datalen);
+ ret = datablob_parse(datablob, &format, &master_desc,
+ &decrypted_datalen, &hex_encoded_iv);
+ if (ret < 0)
+ goto out;
+
+ epayload = encrypted_key_alloc(key, format, master_desc,
+ decrypted_datalen);
+ if (IS_ERR(epayload)) {
+ ret = PTR_ERR(epayload);
+ goto out;
+ }
+ ret = encrypted_init(epayload, key->description, format, master_desc,
+ decrypted_datalen, hex_encoded_iv);
+ if (ret < 0) {
+ kfree_sensitive(epayload);
+ goto out;
+ }
+
+ rcu_assign_keypointer(key, epayload);
+out:
+ kfree_sensitive(datablob);
+ return ret;
+}
+
+static void encrypted_rcu_free(struct rcu_head *rcu)
+{
+ struct encrypted_key_payload *epayload;
+
+ epayload = container_of(rcu, struct encrypted_key_payload, rcu);
+ kfree_sensitive(epayload);
+}
+
+/*
+ * encrypted_update - update the master key description
+ *
+ * Change the master key description for an existing encrypted key.
+ * The next read will return an encrypted datablob using the new
+ * master key description.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct encrypted_key_payload *epayload = key->payload.data[0];
+ struct encrypted_key_payload *new_epayload;
+ char *buf;
+ char *new_master_desc = NULL;
+ const char *format = NULL;
+ size_t datalen = prep->datalen;
+ int ret = 0;
+
+ if (key_is_negative(key))
+ return -ENOKEY;
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ buf = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[datalen] = 0;
+ memcpy(buf, prep->data, datalen);
+ ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = valid_master_desc(new_master_desc, epayload->master_desc);
+ if (ret < 0)
+ goto out;
+
+ new_epayload = encrypted_key_alloc(key, epayload->format,
+ new_master_desc, epayload->datalen);
+ if (IS_ERR(new_epayload)) {
+ ret = PTR_ERR(new_epayload);
+ goto out;
+ }
+
+ __ekey_init(new_epayload, epayload->format, new_master_desc,
+ epayload->datalen);
+
+ memcpy(new_epayload->iv, epayload->iv, ivsize);
+ memcpy(new_epayload->payload_data, epayload->payload_data,
+ epayload->payload_datalen);
+
+ rcu_assign_keypointer(key, new_epayload);
+ call_rcu(&epayload->rcu, encrypted_rcu_free);
+out:
+ kfree_sensitive(buf);
+ return ret;
+}
+
+/*
+ * encrypted_read - format and copy out the encrypted data
+ *
+ * The resulting datablob format is:
+ * <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
+ *
+ * On success, return to userspace the encrypted key datablob size.
+ */
+static long encrypted_read(const struct key *key, char *buffer,
+ size_t buflen)
+{
+ struct encrypted_key_payload *epayload;
+ struct key *mkey;
+ const u8 *master_key;
+ size_t master_keylen;
+ char derived_key[HASH_SIZE];
+ char *ascii_buf;
+ size_t asciiblob_len;
+ int ret;
+
+ epayload = dereference_key_locked(key);
+
+ /* returns the hex encoded iv, encrypted-data, and hmac as ascii */
+ asciiblob_len = epayload->datablob_len + ivsize + 1
+ + roundup(epayload->decrypted_datalen, blksize)
+ + (HASH_SIZE * 2);
+
+ if (!buffer || buflen < asciiblob_len)
+ return asciiblob_len;
+
+ mkey = request_master_key(epayload, &master_key, &master_keylen);
+ if (IS_ERR(mkey))
+ return PTR_ERR(mkey);
+
+ ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key);
+ if (ret < 0)
+ goto out;
+
+ ret = datablob_hmac_append(epayload, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ascii_buf = datablob_format(epayload, asciiblob_len);
+ if (!ascii_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ up_read(&mkey->sem);
+ key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
+
+ memcpy(buffer, ascii_buf, asciiblob_len);
+ kfree_sensitive(ascii_buf);
+
+ return asciiblob_len;
+out:
+ up_read(&mkey->sem);
+ key_put(mkey);
+ memzero_explicit(derived_key, sizeof(derived_key));
+ return ret;
+}
+
+/*
+ * encrypted_destroy - clear and free the key's payload
+ */
+static void encrypted_destroy(struct key *key)
+{
+ kfree_sensitive(key->payload.data[0]);
+}
+
+struct key_type key_type_encrypted = {
+ .name = "encrypted",
+ .instantiate = encrypted_instantiate,
+ .update = encrypted_update,
+ .destroy = encrypted_destroy,
+ .describe = user_describe,
+ .read = encrypted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_encrypted);
+
+static int __init init_encrypted(void)
+{
+ int ret;
+
+ hash_tfm = crypto_alloc_shash(hash_alg, 0, 0);
+ if (IS_ERR(hash_tfm)) {
+ pr_err("encrypted_key: can't allocate %s transform: %ld\n",
+ hash_alg, PTR_ERR(hash_tfm));
+ return PTR_ERR(hash_tfm);
+ }
+
+ ret = aes_get_sizes();
+ if (ret < 0)
+ goto out;
+ ret = register_key_type(&key_type_encrypted);
+ if (ret < 0)
+ goto out;
+ return 0;
+out:
+ crypto_free_shash(hash_tfm);
+ return ret;
+
+}
+
+static void __exit cleanup_encrypted(void)
+{
+ crypto_free_shash(hash_tfm);
+ unregister_key_type(&key_type_encrypted);
+}
+
+late_initcall(init_encrypted);
+module_exit(cleanup_encrypted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
new file mode 100644
index 000000000..1809995db
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ENCRYPTED_KEY_H
+#define __ENCRYPTED_KEY_H
+
+#define ENCRYPTED_DEBUG 0
+#if defined(CONFIG_TRUSTED_KEYS) || \
+ (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
+extern struct key *request_trusted_key(const char *trusted_desc,
+ const u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+ const u8 **master_key,
+ size_t *master_keylen)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+#if ENCRYPTED_DEBUG
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+ print_hex_dump(KERN_ERR, "master key: ", DUMP_PREFIX_NONE, 32, 1,
+ master_key, master_keylen, 0);
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+ print_hex_dump(KERN_ERR, "decrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+ epayload->decrypted_data,
+ epayload->decrypted_datalen, 0);
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+ unsigned int encrypted_datalen)
+{
+ print_hex_dump(KERN_ERR, "encrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+ epayload->encrypted_data, encrypted_datalen, 0);
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+ unsigned int hmac_size)
+{
+ if (str)
+ pr_info("encrypted_key: %s", str);
+ print_hex_dump(KERN_ERR, "hmac: ", DUMP_PREFIX_NONE, 32, 1, digest,
+ hmac_size, 0);
+}
+#else
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+ unsigned int encrypted_datalen)
+{
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+ unsigned int hmac_size)
+{
+}
+#endif
+#endif
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 000000000..e6d22ce77
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- https://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <linux/uaccess.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include "encrypted.h"
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+ const u8 **master_key, size_t *master_keylen)
+{
+ struct trusted_key_payload *tpayload;
+ struct key *tkey;
+
+ tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+ if (IS_ERR(tkey))
+ goto error;
+
+ down_read(&tkey->sem);
+ tpayload = tkey->payload.data[0];
+ *master_key = tpayload->key;
+ *master_keylen = tpayload->key_len;
+error:
+ return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
new file mode 100644
index 000000000..eaddaceda
--- /dev/null
+++ b/security/keys/gc.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Key garbage collector
+ *
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <keys/keyring-type.h>
+#include "internal.h"
+
+/*
+ * Delay between key revocation/expiry in seconds
+ */
+unsigned key_gc_delay = 5 * 60;
+
+/*
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
+ */
+static void key_gc_timer_func(struct timer_list *);
+static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
+
+static time64_t key_gc_next_run = TIME64_MAX;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
+
+
+/*
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+ .name = ".dead",
+};
+
+/*
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
+ */
+void key_schedule_gc(time64_t gc_at)
+{
+ unsigned long expires;
+ time64_t now = ktime_get_real_seconds();
+
+ kenter("%lld", gc_at - now);
+
+ if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+ kdebug("IMMEDIATE");
+ schedule_work(&key_gc_work);
+ } else if (gc_at < key_gc_next_run) {
+ kdebug("DEFERRED");
+ key_gc_next_run = gc_at;
+ expires = jiffies + (gc_at - now) * HZ;
+ mod_timer(&key_gc_timer, expires);
+ }
+}
+
+/*
+ * Set the expiration time on a key.
+ */
+void key_set_expiry(struct key *key, time64_t expiry)
+{
+ key->expiry = expiry;
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ key_schedule_gc(expiry);
+ }
+}
+
+/*
+ * Schedule a dead links collection run.
+ */
+void key_schedule_gc_links(void)
+{
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ schedule_work(&key_gc_work);
+}
+
+/*
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
+ */
+static void key_gc_timer_func(struct timer_list *unused)
+{
+ kenter("");
+ key_gc_next_run = TIME64_MAX;
+ key_schedule_gc_links();
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys. We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+ kenter("%s", ktype->name);
+
+ key_gc_dead_keytype = ktype;
+ set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ smp_mb();
+ set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+ kdebug("schedule");
+ schedule_work(&key_gc_work);
+
+ kdebug("sleep");
+ wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
+ TASK_UNINTERRUPTIBLE);
+
+ key_gc_dead_keytype = NULL;
+ kleave("");
+}
+
+/*
+ * Garbage collect a list of unreferenced, detached keys
+ */
+static noinline void key_gc_unused_keys(struct list_head *keys)
+{
+ while (!list_empty(keys)) {
+ struct key *key =
+ list_entry(keys->next, struct key, graveyard_link);
+ short state = key->state;
+
+ list_del(&key->graveyard_link);
+
+ kdebug("- %u", key->serial);
+ key_check(key);
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ remove_watch_list(key->watchers, key->serial);
+ key->watchers = NULL;
+#endif
+
+ /* Throw away the key data if the key is instantiated */
+ if (state == KEY_IS_POSITIVE && key->type->destroy)
+ key->type->destroy(key);
+
+ security_key_free(key);
+
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
+
+ atomic_dec(&key->user->nkeys);
+ if (state != KEY_IS_UNINSTANTIATED)
+ atomic_dec(&key->user->nikeys);
+
+ key_user_put(key->user);
+ key_put_tag(key->domain_tag);
+ kfree(key->description);
+
+ memzero_explicit(key, sizeof(*key));
+ kmem_cache_free(key_jar, key);
+ }
+}
+
+/*
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place. key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
+ */
+static void key_garbage_collector(struct work_struct *work)
+{
+ static LIST_HEAD(graveyard);
+ static u8 gc_state; /* Internal persistent state */
+#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
+
+ struct rb_node *cursor;
+ struct key *key;
+ time64_t new_timer, limit, expiry;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = ktime_get_real_seconds();
+
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_LINKS;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+ kdebug("new pass %x", gc_state);
+
+ new_timer = TIME64_MAX;
+
+ /* As only this function is permitted to remove things from the key
+ * serial tree, if cursor is non-NULL then it will always point to a
+ * valid node in the tree - even if lock got dropped.
+ */
+ spin_lock(&key_serial_lock);
+ cursor = rb_first(&key_serial_tree);
+
+continue_scanning:
+ while (cursor) {
+ key = rb_entry(cursor, struct key, serial_node);
+ cursor = rb_next(cursor);
+
+ if (refcount_read(&key->usage) == 0)
+ goto found_unreferenced_key;
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ if (key->type == key_gc_dead_keytype) {
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ key->perm = 0;
+ goto skip_dead_key;
+ } else if (key->type == &key_type_keyring &&
+ key->restrict_link) {
+ goto found_restricted_keyring;
+ }
+ }
+
+ expiry = key->expiry;
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ if (expiry > limit && expiry < new_timer) {
+ kdebug("will expire %x in %lld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+ if (key->type == key_gc_dead_keytype)
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+
+ if ((gc_state & KEY_GC_REAPING_LINKS) ||
+ unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ if (key->type == &key_type_keyring)
+ goto found_keyring;
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+ if (key->type == key_gc_dead_keytype)
+ goto destroy_dead_key;
+
+ skip_dead_key:
+ if (spin_is_contended(&key_serial_lock) || need_resched())
+ goto contended;
+ }
+
+contended:
+ spin_unlock(&key_serial_lock);
+
+maybe_resched:
+ if (cursor) {
+ cond_resched();
+ spin_lock(&key_serial_lock);
+ goto continue_scanning;
+ }
+
+ /* We've completed the pass. Set the timer if we need to and queue a
+ * new cycle if necessary. We keep executing cycles until we find one
+ * where we didn't reap any keys.
+ */
+ kdebug("pass complete");
+
+ if (new_timer != TIME64_MAX) {
+ new_timer += key_gc_delay;
+ key_schedule_gc(new_timer);
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
+ !list_empty(&graveyard)) {
+ /* Make sure that all pending keyring payload destructions are
+ * fulfilled and that people aren't now looking at dead or
+ * dying keys that they don't have a reference upon or a link
+ * to.
+ */
+ kdebug("gc sync");
+ synchronize_rcu();
+ }
+
+ if (!list_empty(&graveyard)) {
+ kdebug("gc keys");
+ key_gc_unused_keys(&graveyard);
+ }
+
+ if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+ KEY_GC_REAPING_DEAD_2))) {
+ if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+ /* No remaining dead keys: short circuit the remaining
+ * keytype reap cycles.
+ */
+ kdebug("dead short");
+ gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+ gc_state |= KEY_GC_REAPING_DEAD_3;
+ } else {
+ gc_state |= KEY_GC_REAP_AGAIN;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+ kdebug("dead wake");
+ smp_mb();
+ clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+ }
+
+ if (gc_state & KEY_GC_REAP_AGAIN)
+ schedule_work(&key_gc_work);
+ kleave(" [end %x]", gc_state);
+ return;
+
+ /* We found an unreferenced key - once we've removed it from the tree,
+ * we can safely drop the lock.
+ */
+found_unreferenced_key:
+ kdebug("unrefd key %d", key->serial);
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ list_add_tail(&key->graveyard_link, &graveyard);
+ gc_state |= KEY_GC_REAP_AGAIN;
+ goto maybe_resched;
+
+ /* We found a restricted keyring and need to update the restriction if
+ * it is associated with the dead key type.
+ */
+found_restricted_keyring:
+ spin_unlock(&key_serial_lock);
+ keyring_restriction_gc(key, key_gc_dead_keytype);
+ goto maybe_resched;
+
+ /* We found a keyring and we need to check the payload for links to
+ * dead or expired keys. We don't flag another reap immediately as we
+ * have to wait for the old payload to be destroyed by RCU before we
+ * can reap the keys to which it refers.
+ */
+found_keyring:
+ spin_unlock(&key_serial_lock);
+ keyring_gc(key, limit);
+ goto maybe_resched;
+
+ /* We found a dead key that is still referenced. Reset its type and
+ * destroy its payload with its semaphore held.
+ */
+destroy_dead_key:
+ spin_unlock(&key_serial_lock);
+ kdebug("destroy key %d", key->serial);
+ down_write(&key->sem);
+ key->type = &key_type_dead;
+ if (key_gc_dead_keytype->destroy)
+ key_gc_dead_keytype->destroy(key);
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+ up_write(&key->sem);
+ goto maybe_resched;
+}
diff --git a/security/keys/internal.h b/security/keys/internal.h
new file mode 100644
index 000000000..bede6c71f
--- /dev/null
+++ b/security/keys/internal.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Authentication token and access key management internal defs
+ *
+ * Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _INTERNAL_H
+#define _INTERNAL_H
+
+#include <linux/sched.h>
+#include <linux/wait_bit.h>
+#include <linux/cred.h>
+#include <linux/key-type.h>
+#include <linux/task_work.h>
+#include <linux/keyctl.h>
+#include <linux/refcount.h>
+#include <linux/watch_queue.h>
+#include <linux/compat.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+struct iovec;
+
+#ifdef __KDEBUG
+#define kenter(FMT, ...) \
+ printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ printk(KERN_DEBUG " "FMT"\n", ##__VA_ARGS__)
+#else
+#define kenter(FMT, ...) \
+ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
+
+extern struct key_type key_type_dead;
+extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
+
+/*****************************************************************************/
+/*
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
+ */
+struct key_user {
+ struct rb_node node;
+ struct mutex cons_lock; /* construction initiation lock */
+ spinlock_t lock;
+ refcount_t usage; /* for accessing qnkeys & qnbytes */
+ atomic_t nkeys; /* number of keys */
+ atomic_t nikeys; /* number of instantiated keys */
+ kuid_t uid;
+ int qnkeys; /* number of keys allocated to this user */
+ int qnbytes; /* number of bytes allocated to this user */
+};
+
+extern struct rb_root key_user_tree;
+extern spinlock_t key_user_lock;
+extern struct key_user root_key_user;
+
+extern struct key_user *key_user_lookup(kuid_t uid);
+extern void key_user_put(struct key_user *user);
+
+/*
+ * Key quota limits.
+ * - root has its own separate limits to everyone else
+ */
+extern unsigned key_quota_root_maxkeys;
+extern unsigned key_quota_root_maxbytes;
+extern unsigned key_quota_maxkeys;
+extern unsigned key_quota_maxbytes;
+
+#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+
+
+extern struct kmem_cache *key_jar;
+extern struct rb_root key_serial_tree;
+extern spinlock_t key_serial_lock;
+extern struct mutex key_construction_mutex;
+extern wait_queue_head_t request_key_conswq;
+
+extern void key_set_index_key(struct keyring_index_key *index_key);
+extern struct key_type *key_type_lookup(const char *type);
+extern void key_type_put(struct key_type *ktype);
+
+extern int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key);
+extern int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key);
+extern int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit);
+extern int __key_link_check_live_key(struct key *keyring, struct key *key);
+extern void __key_link(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit);
+extern void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit);
+
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key);
+
+extern struct key *keyring_search_instkey(struct key *keyring,
+ key_serial_t target_id);
+
+extern int iterate_over_keyring(const struct key *keyring,
+ int (*func)(const struct key *key, void *data),
+ void *data);
+
+struct keyring_search_context {
+ struct keyring_index_key index_key;
+ const struct cred *cred;
+ struct key_match_data match_data;
+ unsigned flags;
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0001 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0002 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
+#define KEYRING_SEARCH_RECURSE 0x0040 /* Search child keyrings also */
+
+ int (*iterator)(const void *object, void *iterator_data);
+
+ /* Internal stuff */
+ int skipped_ret;
+ bool possessed;
+ key_ref_t result;
+ time64_t now;
+};
+
+extern bool key_default_cmp(const struct key *key,
+ const struct key_match_data *match_data);
+extern key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx);
+
+extern key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx);
+
+extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+
+extern int look_up_user_keyrings(struct key **, struct key **);
+extern struct key *get_user_session_keyring_rcu(const struct cred *);
+extern int install_thread_keyring_to_cred(struct cred *);
+extern int install_process_keyring_to_cred(struct cred *);
+extern int install_session_keyring_to_cred(struct cred *, struct key *);
+
+extern struct key *request_key_and_link(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags);
+
+extern bool lookup_user_key_possessed(const struct key *key,
+ const struct key_match_data *match_data);
+#define KEY_LOOKUP_CREATE 0x01
+#define KEY_LOOKUP_PARTIAL 0x02
+
+extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct callback_head *twork);
+
+extern struct work_struct key_gc_work;
+extern unsigned key_gc_delay;
+extern void keyring_gc(struct key *keyring, time64_t limit);
+extern void keyring_restriction_gc(struct key *keyring,
+ struct key_type *dead_type);
+void key_set_expiry(struct key *key, time64_t expiry);
+extern void key_schedule_gc(time64_t gc_at);
+extern void key_schedule_gc_links(void);
+extern void key_gc_keytype(struct key_type *ktype);
+
+extern int key_task_permission(const key_ref_t key_ref,
+ const struct cred *cred,
+ enum key_need_perm need_perm);
+
+static inline void notify_key(struct key *key,
+ enum key_notification_subtype subtype, u32 aux)
+{
+#ifdef CONFIG_KEY_NOTIFICATIONS
+ struct key_notification n = {
+ .watch.type = WATCH_TYPE_KEY_NOTIFY,
+ .watch.subtype = subtype,
+ .watch.info = watch_sizeof(n),
+ .key_id = key_serial(key),
+ .aux = aux,
+ };
+
+ post_watch_notification(key->watchers, &n.watch, current_cred(),
+ n.key_id);
+#endif
+}
+
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
+static inline int key_permission(const key_ref_t key_ref,
+ enum key_need_perm need_perm)
+{
+ return key_task_permission(key_ref, current_cred(), need_perm);
+}
+
+extern struct key_type key_type_request_key_auth;
+extern struct key *request_key_auth_new(struct key *target,
+ const char *op,
+ const void *callout_info,
+ size_t callout_len,
+ struct key *dest_keyring);
+
+extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
+
+/*
+ * Determine whether a key is dead.
+ */
+static inline bool key_is_dead(const struct key *key, time64_t limit)
+{
+ time64_t expiry = key->expiry;
+
+ if (expiry != TIME64_MAX) {
+ if (!(key->type->flags & KEY_TYPE_INSTANT_REAP))
+ expiry += key_gc_delay;
+ if (expiry <= limit)
+ return true;
+ }
+
+ return
+ key->flags & ((1 << KEY_FLAG_DEAD) |
+ (1 << KEY_FLAG_INVALIDATED)) ||
+ key->domain_tag->removed;
+}
+
+/*
+ * keyctl() functions
+ */
+extern long keyctl_get_keyring_ID(key_serial_t, int);
+extern long keyctl_join_session_keyring(const char __user *);
+extern long keyctl_update_key(key_serial_t, const void __user *, size_t);
+extern long keyctl_revoke_key(key_serial_t);
+extern long keyctl_keyring_clear(key_serial_t);
+extern long keyctl_keyring_link(key_serial_t, key_serial_t);
+extern long keyctl_keyring_move(key_serial_t, key_serial_t, key_serial_t, unsigned int);
+extern long keyctl_keyring_unlink(key_serial_t, key_serial_t);
+extern long keyctl_describe_key(key_serial_t, char __user *, size_t);
+extern long keyctl_keyring_search(key_serial_t, const char __user *,
+ const char __user *, key_serial_t);
+extern long keyctl_read_key(key_serial_t, char __user *, size_t);
+extern long keyctl_chown_key(key_serial_t, uid_t, gid_t);
+extern long keyctl_setperm_key(key_serial_t, key_perm_t);
+extern long keyctl_instantiate_key(key_serial_t, const void __user *,
+ size_t, key_serial_t);
+extern long keyctl_negate_key(key_serial_t, unsigned, key_serial_t);
+extern long keyctl_set_reqkey_keyring(int);
+extern long keyctl_set_timeout(key_serial_t, unsigned);
+extern long keyctl_assume_authority(key_serial_t);
+extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
+ size_t buflen);
+extern long keyctl_session_to_parent(void);
+extern long keyctl_reject_key(key_serial_t, unsigned, unsigned, key_serial_t);
+extern long keyctl_instantiate_key_iov(key_serial_t,
+ const struct iovec __user *,
+ unsigned, key_serial_t);
+extern long keyctl_invalidate_key(key_serial_t);
+extern long keyctl_restrict_keyring(key_serial_t id,
+ const char __user *_type,
+ const char __user *_restriction);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_KEY_DH_OPERATIONS
+extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
+ size_t, struct keyctl_kdf_params __user *);
+extern long __keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *,
+ size_t, struct keyctl_kdf_params *);
+#ifdef CONFIG_COMPAT
+extern long compat_keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct compat_keyctl_kdf_params __user *kdf);
+#endif
+#define KEYCTL_KDF_MAX_OUTPUT_LEN 1024 /* max length of KDF output */
+#define KEYCTL_KDF_MAX_OI_LEN 64 /* max length of otherinfo */
+#else
+static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params __user *kdf)
+{
+ return -EOPNOTSUPP;
+}
+
+#ifdef CONFIG_COMPAT
+static inline long compat_keyctl_dh_compute(
+ struct keyctl_dh_params __user *params,
+ char __user *buffer, size_t buflen,
+ struct keyctl_kdf_params __user *kdf)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif
+
+#ifdef CONFIG_ASYMMETRIC_KEY_TYPE
+extern long keyctl_pkey_query(key_serial_t,
+ const char __user *,
+ struct keyctl_pkey_query __user *);
+
+extern long keyctl_pkey_verify(const struct keyctl_pkey_params __user *,
+ const char __user *,
+ const void __user *, const void __user *);
+
+extern long keyctl_pkey_e_d_s(int,
+ const struct keyctl_pkey_params __user *,
+ const char __user *,
+ const void __user *, void __user *);
+#else
+static inline long keyctl_pkey_query(key_serial_t id,
+ const char __user *_info,
+ struct keyctl_pkey_query __user *_res)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_verify(const struct keyctl_pkey_params __user *params,
+ const char __user *_info,
+ const void __user *_in,
+ const void __user *_in2)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_e_d_s(int op,
+ const struct keyctl_pkey_params __user *params,
+ const char __user *_info,
+ const void __user *_in,
+ void __user *_out)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+extern long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen);
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+extern long keyctl_watch_key(key_serial_t, int, int);
+#else
+static inline long keyctl_watch_key(key_serial_t key_id, int watch_fd, int watch_id)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+/*
+ * Debugging key validation
+ */
+#ifdef KEY_DEBUGGING
+extern void __key_check(const struct key *);
+
+static inline void key_check(const struct key *key)
+{
+ if (key && (IS_ERR(key) || key->magic != KEY_DEBUG_MAGIC))
+ __key_check(key);
+}
+
+#else
+
+#define key_check(key) do {} while(0)
+
+#endif
+#endif /* _INTERNAL_H */
diff --git a/security/keys/key.c b/security/keys/key.c
new file mode 100644
index 000000000..67ad0826e
--- /dev/null
+++ b/security/keys/key.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Basic authentication token and access key management
+ *
+ * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/workqueue.h>
+#include <linux/random.h>
+#include <linux/ima.h>
+#include <linux/err.h>
+#include "internal.h"
+
+struct kmem_cache *key_jar;
+struct rb_root key_serial_tree; /* tree of keys indexed by serial */
+DEFINE_SPINLOCK(key_serial_lock);
+
+struct rb_root key_user_tree; /* tree of quota records indexed by UID */
+DEFINE_SPINLOCK(key_user_lock);
+
+unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */
+unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */
+unsigned int key_quota_maxkeys = 200; /* general key count quota */
+unsigned int key_quota_maxbytes = 20000; /* general key space quota */
+
+static LIST_HEAD(key_types_list);
+static DECLARE_RWSEM(key_types_sem);
+
+/* We serialise key instantiation and link */
+DEFINE_MUTEX(key_construction_mutex);
+
+#ifdef KEY_DEBUGGING
+void __key_check(const struct key *key)
+{
+ printk("__key_check: key %p {%08x} should be {%08x}\n",
+ key, key->magic, KEY_DEBUG_MAGIC);
+ BUG();
+}
+#endif
+
+/*
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
+ */
+struct key_user *key_user_lookup(kuid_t uid)
+{
+ struct key_user *candidate = NULL, *user;
+ struct rb_node *parent, **p;
+
+try_again:
+ parent = NULL;
+ p = &key_user_tree.rb_node;
+ spin_lock(&key_user_lock);
+
+ /* search the tree for a user record with a matching UID */
+ while (*p) {
+ parent = *p;
+ user = rb_entry(parent, struct key_user, node);
+
+ if (uid_lt(uid, user->uid))
+ p = &(*p)->rb_left;
+ else if (uid_gt(uid, user->uid))
+ p = &(*p)->rb_right;
+ else
+ goto found;
+ }
+
+ /* if we get here, we failed to find a match in the tree */
+ if (!candidate) {
+ /* allocate a candidate user record if we don't already have
+ * one */
+ spin_unlock(&key_user_lock);
+
+ user = NULL;
+ candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
+ if (unlikely(!candidate))
+ goto out;
+
+ /* the allocation may have scheduled, so we need to repeat the
+ * search lest someone else added the record whilst we were
+ * asleep */
+ goto try_again;
+ }
+
+ /* if we get here, then the user record still hadn't appeared on the
+ * second pass - so we use the candidate record */
+ refcount_set(&candidate->usage, 1);
+ atomic_set(&candidate->nkeys, 0);
+ atomic_set(&candidate->nikeys, 0);
+ candidate->uid = uid;
+ candidate->qnkeys = 0;
+ candidate->qnbytes = 0;
+ spin_lock_init(&candidate->lock);
+ mutex_init(&candidate->cons_lock);
+
+ rb_link_node(&candidate->node, parent, p);
+ rb_insert_color(&candidate->node, &key_user_tree);
+ spin_unlock(&key_user_lock);
+ user = candidate;
+ goto out;
+
+ /* okay - we found a user record for this UID */
+found:
+ refcount_inc(&user->usage);
+ spin_unlock(&key_user_lock);
+ kfree(candidate);
+out:
+ return user;
+}
+
+/*
+ * Dispose of a user structure
+ */
+void key_user_put(struct key_user *user)
+{
+ if (refcount_dec_and_lock(&user->usage, &key_user_lock)) {
+ rb_erase(&user->node, &key_user_tree);
+ spin_unlock(&key_user_lock);
+
+ kfree(user);
+ }
+}
+
+/*
+ * Allocate a serial number for a key. These are assigned randomly to avoid
+ * security issues through covert channel problems.
+ */
+static inline void key_alloc_serial(struct key *key)
+{
+ struct rb_node *parent, **p;
+ struct key *xkey;
+
+ /* propose a random serial number and look for a hole for it in the
+ * serial number tree */
+ do {
+ get_random_bytes(&key->serial, sizeof(key->serial));
+
+ key->serial >>= 1; /* negative numbers are not permitted */
+ } while (key->serial < 3);
+
+ spin_lock(&key_serial_lock);
+
+attempt_insertion:
+ parent = NULL;
+ p = &key_serial_tree.rb_node;
+
+ while (*p) {
+ parent = *p;
+ xkey = rb_entry(parent, struct key, serial_node);
+
+ if (key->serial < xkey->serial)
+ p = &(*p)->rb_left;
+ else if (key->serial > xkey->serial)
+ p = &(*p)->rb_right;
+ else
+ goto serial_exists;
+ }
+
+ /* we've found a suitable hole - arrange for this key to occupy it */
+ rb_link_node(&key->serial_node, parent, p);
+ rb_insert_color(&key->serial_node, &key_serial_tree);
+
+ spin_unlock(&key_serial_lock);
+ return;
+
+ /* we found a key with the proposed serial number - walk the tree from
+ * that point looking for the next unused serial number */
+serial_exists:
+ for (;;) {
+ key->serial++;
+ if (key->serial < 3) {
+ key->serial = 3;
+ goto attempt_insertion;
+ }
+
+ parent = rb_next(parent);
+ if (!parent)
+ goto attempt_insertion;
+
+ xkey = rb_entry(parent, struct key, serial_node);
+ if (key->serial < xkey->serial)
+ goto attempt_insertion;
+ }
+}
+
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ * @restrict_link: Optional link restriction for new keyrings.
+ *
+ * Allocate a key of the specified type with the attributes given. The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The restrict_link structure (if not NULL) will be freed when the
+ * keyring is destroyed, so it must be dynamically allocated.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved. The
+ * instantiation function should amend this as necessary. If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem. Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
+ */
+struct key *key_alloc(struct key_type *type, const char *desc,
+ kuid_t uid, kgid_t gid, const struct cred *cred,
+ key_perm_t perm, unsigned long flags,
+ struct key_restriction *restrict_link)
+{
+ struct key_user *user = NULL;
+ struct key *key;
+ size_t desclen, quotalen;
+ int ret;
+
+ key = ERR_PTR(-EINVAL);
+ if (!desc || !*desc)
+ goto error;
+
+ if (type->vet_description) {
+ ret = type->vet_description(desc);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ desclen = strlen(desc);
+ quotalen = desclen + 1 + type->def_datalen;
+
+ /* get hold of the key tracking for this user */
+ user = key_user_lookup(uid);
+ if (!user)
+ goto no_memory_1;
+
+ /* check that the user's quota permits allocation of another key and
+ * its description */
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxkeys : key_quota_maxkeys;
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ spin_lock(&user->lock);
+ if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
+ if (user->qnkeys + 1 > maxkeys ||
+ user->qnbytes + quotalen > maxbytes ||
+ user->qnbytes + quotalen < user->qnbytes)
+ goto no_quota;
+ }
+
+ user->qnkeys++;
+ user->qnbytes += quotalen;
+ spin_unlock(&user->lock);
+ }
+
+ /* allocate and initialise the key and its description */
+ key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
+ if (!key)
+ goto no_memory_2;
+
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
+ if (!key->index_key.description)
+ goto no_memory_3;
+ key->index_key.type = type;
+ key_set_index_key(&key->index_key);
+
+ refcount_set(&key->usage, 1);
+ init_rwsem(&key->sem);
+ lockdep_set_class(&key->sem, &type->lock_class);
+ key->user = user;
+ key->quotalen = quotalen;
+ key->datalen = type->def_datalen;
+ key->uid = uid;
+ key->gid = gid;
+ key->perm = perm;
+ key->expiry = TIME64_MAX;
+ key->restrict_link = restrict_link;
+ key->last_used_at = ktime_get_real_seconds();
+
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
+ key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+ if (flags & KEY_ALLOC_BUILT_IN)
+ key->flags |= 1 << KEY_FLAG_BUILTIN;
+ if (flags & KEY_ALLOC_UID_KEYRING)
+ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ if (flags & KEY_ALLOC_SET_KEEP)
+ key->flags |= 1 << KEY_FLAG_KEEP;
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC;
+#endif
+
+ /* let the security module know about the key */
+ ret = security_key_alloc(key, cred, flags);
+ if (ret < 0)
+ goto security_error;
+
+ /* publish the key by giving it a serial number */
+ refcount_inc(&key->domain_tag->usage);
+ atomic_inc(&user->nkeys);
+ key_alloc_serial(key);
+
+error:
+ return key;
+
+security_error:
+ kfree(key->description);
+ kmem_cache_free(key_jar, key);
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+ spin_lock(&user->lock);
+ user->qnkeys--;
+ user->qnbytes -= quotalen;
+ spin_unlock(&user->lock);
+ }
+ key_user_put(user);
+ key = ERR_PTR(ret);
+ goto error;
+
+no_memory_3:
+ kmem_cache_free(key_jar, key);
+no_memory_2:
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+ spin_lock(&user->lock);
+ user->qnkeys--;
+ user->qnbytes -= quotalen;
+ spin_unlock(&user->lock);
+ }
+ key_user_put(user);
+no_memory_1:
+ key = ERR_PTR(-ENOMEM);
+ goto error;
+
+no_quota:
+ spin_unlock(&user->lock);
+ key_user_put(user);
+ key = ERR_PTR(-EDQUOT);
+ goto error;
+}
+EXPORT_SYMBOL(key_alloc);
+
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
+ */
+int key_payload_reserve(struct key *key, size_t datalen)
+{
+ int delta = (int)datalen - key->datalen;
+ int ret = 0;
+
+ key_check(key);
+
+ /* contemplate the quota adjustment */
+ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ spin_lock(&key->user->lock);
+
+ if (delta > 0 &&
+ (key->user->qnbytes + delta > maxbytes ||
+ key->user->qnbytes + delta < key->user->qnbytes)) {
+ ret = -EDQUOT;
+ }
+ else {
+ key->user->qnbytes += delta;
+ key->quotalen += delta;
+ }
+ spin_unlock(&key->user->lock);
+ }
+
+ /* change the recorded data length if that didn't generate an error */
+ if (ret == 0)
+ key->datalen = datalen;
+
+ return ret;
+}
+EXPORT_SYMBOL(key_payload_reserve);
+
+/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+ /* Commit the payload before setting the state; barrier versus
+ * key_read_state().
+ */
+ smp_store_release(&key->state,
+ (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
+ * Instantiate a key and link it into the target keyring atomically. Must be
+ * called with the target keyring's semaphore writelocked. The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
+ */
+static int __key_instantiate_and_link(struct key *key,
+ struct key_preparsed_payload *prep,
+ struct key *keyring,
+ struct key *authkey,
+ struct assoc_array_edit **_edit)
+{
+ int ret, awaken;
+
+ key_check(key);
+ key_check(keyring);
+
+ awaken = 0;
+ ret = -EBUSY;
+
+ mutex_lock(&key_construction_mutex);
+
+ /* can't instantiate twice */
+ if (key->state == KEY_IS_UNINSTANTIATED) {
+ /* instantiate the key */
+ ret = key->type->instantiate(key, prep);
+
+ if (ret == 0) {
+ /* mark the key as being instantiated */
+ atomic_inc(&key->user->nikeys);
+ mark_key_instantiated(key, 0);
+ notify_key(key, NOTIFY_KEY_INSTANTIATED, 0);
+
+ if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ awaken = 1;
+
+ /* and link it into the destination keyring */
+ if (keyring) {
+ if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
+ set_bit(KEY_FLAG_KEEP, &key->flags);
+
+ __key_link(keyring, key, _edit);
+ }
+
+ /* disable the authorisation key */
+ if (authkey)
+ key_invalidate(authkey);
+
+ key_set_expiry(key, prep->expiry);
+ }
+ }
+
+ mutex_unlock(&key_construction_mutex);
+
+ /* wake up anyone waiting for a key to be constructed */
+ if (awaken)
+ wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
+
+ return ret;
+}
+
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
+ */
+int key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring,
+ struct key *authkey)
+{
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit = NULL;
+ int ret;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = data;
+ prep.datalen = datalen;
+ prep.quotalen = key->type->def_datalen;
+ prep.expiry = TIME64_MAX;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
+ }
+
+ if (keyring) {
+ ret = __key_link_lock(keyring, &key->index_key);
+ if (ret < 0)
+ goto error;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_link_end;
+
+ if (keyring->restrict_link && keyring->restrict_link->check) {
+ struct key_restriction *keyres = keyring->restrict_link;
+
+ ret = keyres->check(keyring, key->type, &prep.payload,
+ keyres->key);
+ if (ret < 0)
+ goto error_link_end;
+ }
+ }
+
+ ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
+
+error_link_end:
+ if (keyring)
+ __key_link_end(keyring, &key->index_key, edit);
+
+error:
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+ return ret;
+}
+
+EXPORT_SYMBOL(key_instantiate_and_link);
+
+/**
+ * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @error: The error to return when the key is hit.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and stored error and link it in to the
+ * destination keyring if one is supplied. The key and any links to the key
+ * will be automatically garbage collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the stored error code (typically ENOKEY) until the negative
+ * key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
+ */
+int key_reject_and_link(struct key *key,
+ unsigned timeout,
+ unsigned error,
+ struct key *keyring,
+ struct key *authkey)
+{
+ struct assoc_array_edit *edit = NULL;
+ int ret, awaken, link_ret = 0;
+
+ key_check(key);
+ key_check(keyring);
+
+ awaken = 0;
+ ret = -EBUSY;
+
+ if (keyring) {
+ if (keyring->restrict_link)
+ return -EPERM;
+
+ link_ret = __key_link_lock(keyring, &key->index_key);
+ if (link_ret == 0) {
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (link_ret < 0)
+ __key_link_end(keyring, &key->index_key, edit);
+ }
+ }
+
+ mutex_lock(&key_construction_mutex);
+
+ /* can't instantiate twice */
+ if (key->state == KEY_IS_UNINSTANTIATED) {
+ /* mark the key as being negatively instantiated */
+ atomic_inc(&key->user->nikeys);
+ mark_key_instantiated(key, -error);
+ notify_key(key, NOTIFY_KEY_INSTANTIATED, -error);
+ key_set_expiry(key, ktime_get_real_seconds() + timeout);
+
+ if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
+ awaken = 1;
+
+ ret = 0;
+
+ /* and link it into the destination keyring */
+ if (keyring && link_ret == 0)
+ __key_link(keyring, key, &edit);
+
+ /* disable the authorisation key */
+ if (authkey)
+ key_invalidate(authkey);
+ }
+
+ mutex_unlock(&key_construction_mutex);
+
+ if (keyring && link_ret == 0)
+ __key_link_end(keyring, &key->index_key, edit);
+
+ /* wake up anyone waiting for a key to be constructed */
+ if (awaken)
+ wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
+
+ return ret == 0 ? link_ret : ret;
+}
+EXPORT_SYMBOL(key_reject_and_link);
+
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
+ */
+void key_put(struct key *key)
+{
+ if (key) {
+ key_check(key);
+
+ if (refcount_dec_and_test(&key->usage))
+ schedule_work(&key_gc_work);
+ }
+}
+EXPORT_SYMBOL(key_put);
+
+/*
+ * Find a key by its serial number.
+ */
+struct key *key_lookup(key_serial_t id)
+{
+ struct rb_node *n;
+ struct key *key;
+
+ spin_lock(&key_serial_lock);
+
+ /* search the tree for the specified key */
+ n = key_serial_tree.rb_node;
+ while (n) {
+ key = rb_entry(n, struct key, serial_node);
+
+ if (id < key->serial)
+ n = n->rb_left;
+ else if (id > key->serial)
+ n = n->rb_right;
+ else
+ goto found;
+ }
+
+not_found:
+ key = ERR_PTR(-ENOKEY);
+ goto error;
+
+found:
+ /* A key is allowed to be looked up only if someone still owns a
+ * reference to it - otherwise it's awaiting the gc.
+ */
+ if (!refcount_inc_not_zero(&key->usage))
+ goto not_found;
+
+error:
+ spin_unlock(&key_serial_lock);
+ return key;
+}
+
+/*
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful. If the type wasn't
+ * available -ENOKEY is returned instead.
+ */
+struct key_type *key_type_lookup(const char *type)
+{
+ struct key_type *ktype;
+
+ down_read(&key_types_sem);
+
+ /* look up the key type to see if it's one of the registered kernel
+ * types */
+ list_for_each_entry(ktype, &key_types_list, link) {
+ if (strcmp(ktype->name, type) == 0)
+ goto found_kernel_type;
+ }
+
+ up_read(&key_types_sem);
+ ktype = ERR_PTR(-ENOKEY);
+
+found_kernel_type:
+ return ktype;
+}
+
+void key_set_timeout(struct key *key, unsigned timeout)
+{
+ time64_t expiry = TIME64_MAX;
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ if (timeout > 0)
+ expiry = ktime_get_real_seconds() + timeout;
+ key_set_expiry(key, expiry);
+
+ up_write(&key->sem);
+}
+EXPORT_SYMBOL_GPL(key_set_timeout);
+
+/*
+ * Unlock a key type locked by key_type_lookup().
+ */
+void key_type_put(struct key_type *ktype)
+{
+ up_read(&key_types_sem);
+}
+
+/*
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
+ */
+static inline key_ref_t __key_update(key_ref_t key_ref,
+ struct key_preparsed_payload *prep)
+{
+ struct key *key = key_ref_to_ptr(key_ref);
+ int ret;
+
+ /* need write permission on the key to update it */
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
+ if (ret < 0)
+ goto error;
+
+ ret = -EEXIST;
+ if (!key->type->update)
+ goto error;
+
+ down_write(&key->sem);
+
+ ret = key->type->update(key, prep);
+ if (ret == 0) {
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
+ notify_key(key, NOTIFY_KEY_UPDATED, 0);
+ }
+
+ up_write(&key->sem);
+
+ if (ret < 0)
+ goto error;
+out:
+ return key_ref;
+
+error:
+ key_put(key);
+ key_ref = ERR_PTR(ret);
+ goto out;
+}
+
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
+ */
+key_ref_t key_create_or_update(key_ref_t keyring_ref,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags)
+{
+ struct keyring_index_key index_key = {
+ .description = description,
+ };
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit = NULL;
+ const struct cred *cred = current_cred();
+ struct key *keyring, *key = NULL;
+ key_ref_t key_ref;
+ int ret;
+ struct key_restriction *restrict_link = NULL;
+
+ /* look up the key type to see if it's one of the registered kernel
+ * types */
+ index_key.type = key_type_lookup(type);
+ if (IS_ERR(index_key.type)) {
+ key_ref = ERR_PTR(-ENODEV);
+ goto error;
+ }
+
+ key_ref = ERR_PTR(-EINVAL);
+ if (!index_key.type->instantiate ||
+ (!index_key.description && !index_key.type->preparse))
+ goto error_put_type;
+
+ keyring = key_ref_to_ptr(keyring_ref);
+
+ key_check(keyring);
+
+ if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION))
+ restrict_link = keyring->restrict_link;
+
+ key_ref = ERR_PTR(-ENOTDIR);
+ if (keyring->type != &key_type_keyring)
+ goto error_put_type;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = index_key.type->def_datalen;
+ prep.expiry = TIME64_MAX;
+ if (index_key.type->preparse) {
+ ret = index_key.type->preparse(&prep);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ if (!index_key.description)
+ index_key.description = prep.description;
+ key_ref = ERR_PTR(-EINVAL);
+ if (!index_key.description)
+ goto error_free_prep;
+ }
+ index_key.desc_len = strlen(index_key.description);
+ key_set_index_key(&index_key);
+
+ ret = __key_link_lock(keyring, &index_key);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+
+ ret = __key_link_begin(keyring, &index_key, &edit);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
+ if (restrict_link && restrict_link->check) {
+ ret = restrict_link->check(keyring, index_key.type,
+ &prep.payload, restrict_link->key);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+ }
+
+ /* if we're going to allocate a new key, we're going to have
+ * to modify the keyring */
+ ret = key_permission(keyring_ref, KEY_NEED_WRITE);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
+ /* if it's possible to update this type of key, search for an existing
+ * key of the same type and description in the destination keyring and
+ * update that instead if possible
+ */
+ if (index_key.type->update) {
+ key_ref = find_key_to_update(keyring_ref, &index_key);
+ if (key_ref)
+ goto found_matching_key;
+ }
+
+ /* if the client doesn't provide, decide on the permissions we want */
+ if (perm == KEY_PERM_UNDEF) {
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+
+ if (index_key.type->read)
+ perm |= KEY_POS_READ;
+
+ if (index_key.type == &key_type_keyring ||
+ index_key.type->update)
+ perm |= KEY_POS_WRITE;
+ }
+
+ /* allocate a new key */
+ key = key_alloc(index_key.type, index_key.description,
+ cred->fsuid, cred->fsgid, cred, perm, flags, NULL);
+ if (IS_ERR(key)) {
+ key_ref = ERR_CAST(key);
+ goto error_link_end;
+ }
+
+ /* instantiate it and link it into the target keyring */
+ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
+ if (ret < 0) {
+ key_put(key);
+ key_ref = ERR_PTR(ret);
+ goto error_link_end;
+ }
+
+ ima_post_key_create_or_update(keyring, key, payload, plen,
+ flags, true);
+
+ key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
+
+error_link_end:
+ __key_link_end(keyring, &index_key, edit);
+error_free_prep:
+ if (index_key.type->preparse)
+ index_key.type->free_preparse(&prep);
+error_put_type:
+ key_type_put(index_key.type);
+error:
+ return key_ref;
+
+ found_matching_key:
+ /* we found a matching key, so we're going to try to update it
+ * - we can drop the locks first as we have the key pinned
+ */
+ __key_link_end(keyring, &index_key, edit);
+
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+ ret = wait_for_key_construction(key, true);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ }
+
+ key_ref = __key_update(key_ref, &prep);
+
+ if (!IS_ERR(key_ref))
+ ima_post_key_create_or_update(keyring, key,
+ payload, plen,
+ flags, false);
+
+ goto error_free_prep;
+}
+EXPORT_SYMBOL(key_create_or_update);
+
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data. The
+ * caller must be granted Write permission on the key. Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating. The key type may return other errors.
+ */
+int key_update(key_ref_t key_ref, const void *payload, size_t plen)
+{
+ struct key_preparsed_payload prep;
+ struct key *key = key_ref_to_ptr(key_ref);
+ int ret;
+
+ key_check(key);
+
+ /* the key must be writable */
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
+ if (ret < 0)
+ return ret;
+
+ /* attempt to update it if supported */
+ if (!key->type->update)
+ return -EOPNOTSUPP;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = key->type->def_datalen;
+ prep.expiry = TIME64_MAX;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
+ }
+
+ down_write(&key->sem);
+
+ ret = key->type->update(key, &prep);
+ if (ret == 0) {
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
+ notify_key(key, NOTIFY_KEY_UPDATED, 0);
+ }
+
+ up_write(&key->sem);
+
+error:
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+ return ret;
+}
+EXPORT_SYMBOL(key_update);
+
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources. The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
+ */
+void key_revoke(struct key *key)
+{
+ time64_t time;
+
+ key_check(key);
+
+ /* make sure no one's trying to change or use the key when we mark it
+ * - we tell lockdep that we might nest because we might be revoking an
+ * authorisation key whilst holding the sem on a key we've just
+ * instantiated
+ */
+ down_write_nested(&key->sem, 1);
+ if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags)) {
+ notify_key(key, NOTIFY_KEY_REVOKED, 0);
+ if (key->type->revoke)
+ key->type->revoke(key);
+
+ /* set the death time to no more than the expiry time */
+ time = ktime_get_real_seconds();
+ if (key->revoked_at == 0 || key->revoked_at > time) {
+ key->revoked_at = time;
+ key_schedule_gc(key->revoked_at + key_gc_delay);
+ }
+ }
+
+ up_write(&key->sem);
+}
+EXPORT_SYMBOL(key_revoke);
+
+/**
+ * key_invalidate - Invalidate a key.
+ * @key: The key to be invalidated.
+ *
+ * Mark a key as being invalidated and have it cleaned up immediately. The key
+ * is ignored by all searches and other operations from this point.
+ */
+void key_invalidate(struct key *key)
+{
+ kenter("%d", key_serial(key));
+
+ key_check(key);
+
+ if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ down_write_nested(&key->sem, 1);
+ if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ notify_key(key, NOTIFY_KEY_INVALIDATED, 0);
+ key_schedule_gc_links();
+ }
+ up_write(&key->sem);
+ }
+}
+EXPORT_SYMBOL(key_invalidate);
+
+/**
+ * generic_key_instantiate - Simple instantiation of a key from preparsed data
+ * @key: The key to be instantiated
+ * @prep: The preparsed data to load.
+ *
+ * Instantiate a key from preparsed data. We assume we can just copy the data
+ * in directly and clear the old pointers.
+ *
+ * This can be pointed to directly by the key type instantiate op pointer.
+ */
+int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ int ret;
+
+ pr_devel("==>%s()\n", __func__);
+
+ ret = key_payload_reserve(key, prep->quotalen);
+ if (ret == 0) {
+ rcu_assign_keypointer(key, prep->payload.data[0]);
+ key->payload.data[1] = prep->payload.data[1];
+ key->payload.data[2] = prep->payload.data[2];
+ key->payload.data[3] = prep->payload.data[3];
+ prep->payload.data[0] = NULL;
+ prep->payload.data[1] = NULL;
+ prep->payload.data[2] = NULL;
+ prep->payload.data[3] = NULL;
+ }
+ pr_devel("<==%s() = %d\n", __func__, ret);
+ return ret;
+}
+EXPORT_SYMBOL(generic_key_instantiate);
+
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
+ */
+int register_key_type(struct key_type *ktype)
+{
+ struct key_type *p;
+ int ret;
+
+ memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
+
+ ret = -EEXIST;
+ down_write(&key_types_sem);
+
+ /* disallow key types with the same name */
+ list_for_each_entry(p, &key_types_list, link) {
+ if (strcmp(p->name, ktype->name) == 0)
+ goto out;
+ }
+
+ /* store the type */
+ list_add(&ktype->link, &key_types_list);
+
+ pr_notice("Key type %s registered\n", ktype->name);
+ ret = 0;
+
+out:
+ up_write(&key_types_sem);
+ return ret;
+}
+EXPORT_SYMBOL(register_key_type);
+
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
+ */
+void unregister_key_type(struct key_type *ktype)
+{
+ down_write(&key_types_sem);
+ list_del_init(&ktype->link);
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ pr_notice("Key type %s unregistered\n", ktype->name);
+ up_read(&key_types_sem);
+}
+EXPORT_SYMBOL(unregister_key_type);
+
+/*
+ * Initialise the key management state.
+ */
+void __init key_init(void)
+{
+ /* allocate a slab in which we can store keys */
+ key_jar = kmem_cache_create("key_jar", sizeof(struct key),
+ 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+
+ /* add the special key types */
+ list_add_tail(&key_type_keyring.link, &key_types_list);
+ list_add_tail(&key_type_dead.link, &key_types_list);
+ list_add_tail(&key_type_user.link, &key_types_list);
+ list_add_tail(&key_type_logon.link, &key_types_list);
+
+ /* record the root user tracking */
+ rb_link_node(&root_key_user.node,
+ NULL,
+ &key_user_tree.rb_node);
+
+ rb_insert_color(&root_key_user.node,
+ &key_user_tree);
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
new file mode 100644
index 000000000..e3ffaf5ad
--- /dev/null
+++ b/security/keys/keyctl.c
@@ -0,0 +1,2026 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Userspace key control operations
+ *
+ * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/capability.h>
+#include <linux/cred.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <keys/request_key_auth-type.h>
+#include "internal.h"
+
+#define KEY_MAX_DESC_SIZE 4096
+
+static const unsigned char keyrings_capabilities[2] = {
+ [0] = (KEYCTL_CAPS0_CAPABILITIES |
+ (IS_ENABLED(CONFIG_PERSISTENT_KEYRINGS) ? KEYCTL_CAPS0_PERSISTENT_KEYRINGS : 0) |
+ (IS_ENABLED(CONFIG_KEY_DH_OPERATIONS) ? KEYCTL_CAPS0_DIFFIE_HELLMAN : 0) |
+ (IS_ENABLED(CONFIG_ASYMMETRIC_KEY_TYPE) ? KEYCTL_CAPS0_PUBLIC_KEY : 0) |
+ (IS_ENABLED(CONFIG_BIG_KEYS) ? KEYCTL_CAPS0_BIG_KEY : 0) |
+ KEYCTL_CAPS0_INVALIDATE |
+ KEYCTL_CAPS0_RESTRICT_KEYRING |
+ KEYCTL_CAPS0_MOVE
+ ),
+ [1] = (KEYCTL_CAPS1_NS_KEYRING_NAME |
+ KEYCTL_CAPS1_NS_KEY_TAG |
+ (IS_ENABLED(CONFIG_KEY_NOTIFICATIONS) ? KEYCTL_CAPS1_NOTIFICATIONS : 0)
+ ),
+};
+
+static int key_get_type_from_user(char *type,
+ const char __user *_type,
+ unsigned len)
+{
+ int ret;
+
+ ret = strncpy_from_user(type, _type, len);
+ if (ret < 0)
+ return ret;
+ if (ret == 0 || ret >= len)
+ return -EINVAL;
+ if (type[0] == '.')
+ return -EPERM;
+ type[len - 1] = '\0';
+ return 0;
+}
+
+/*
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * If the description is NULL or an empty string, the key type is asked to
+ * generate one from the payload.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
+ */
+SYSCALL_DEFINE5(add_key, const char __user *, _type,
+ const char __user *, _description,
+ const void __user *, _payload,
+ size_t, plen,
+ key_serial_t, ringid)
+{
+ key_ref_t keyring_ref, key_ref;
+ char type[32], *description;
+ void *payload;
+ long ret;
+
+ ret = -EINVAL;
+ if (plen > 1024 * 1024 - 1)
+ goto error;
+
+ /* draw all the data into kernel space */
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ description = NULL;
+ if (_description) {
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+ if (!*description) {
+ kfree(description);
+ description = NULL;
+ } else if ((description[0] == '.') &&
+ (strncmp(type, "keyring", 7) == 0)) {
+ ret = -EPERM;
+ goto error2;
+ }
+ }
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+ if (plen) {
+ ret = -ENOMEM;
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error2;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error3;
+ }
+
+ /* find the target keyring (which must be writable) */
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error3;
+ }
+
+ /* create or update the requested key and add it to the target
+ * keyring */
+ key_ref = key_create_or_update(keyring_ref, type, description,
+ payload, plen, KEY_PERM_UNDEF,
+ KEY_ALLOC_IN_QUOTA);
+ if (!IS_ERR(key_ref)) {
+ ret = key_ref_to_ptr(key_ref)->serial;
+ key_ref_put(key_ref);
+ }
+ else {
+ ret = PTR_ERR(key_ref);
+ }
+
+ key_ref_put(keyring_ref);
+ error3:
+ kvfree_sensitive(payload, plen);
+ error2:
+ kfree(description);
+ error:
+ return ret;
+}
+
+/*
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key. Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key. The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request. If the
+ * _callout_info string is "" then it will be changed to "-".
+ */
+SYSCALL_DEFINE4(request_key, const char __user *, _type,
+ const char __user *, _description,
+ const char __user *, _callout_info,
+ key_serial_t, destringid)
+{
+ struct key_type *ktype;
+ struct key *key;
+ key_ref_t dest_ref;
+ size_t callout_len;
+ char type[32], *description, *callout_info;
+ long ret;
+
+ /* pull the type into kernel space */
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ /* pull the description into kernel space */
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+
+ /* pull the callout info into kernel space */
+ callout_info = NULL;
+ callout_len = 0;
+ if (_callout_info) {
+ callout_info = strndup_user(_callout_info, PAGE_SIZE);
+ if (IS_ERR(callout_info)) {
+ ret = PTR_ERR(callout_info);
+ goto error2;
+ }
+ callout_len = strlen(callout_info);
+ }
+
+ /* get the destination keyring if specified */
+ dest_ref = NULL;
+ if (destringid) {
+ dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+ KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref)) {
+ ret = PTR_ERR(dest_ref);
+ goto error3;
+ }
+ }
+
+ /* find the key type */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ ret = PTR_ERR(ktype);
+ goto error4;
+ }
+
+ /* do the search */
+ key = request_key_and_link(ktype, description, NULL, callout_info,
+ callout_len, NULL, key_ref_to_ptr(dest_ref),
+ KEY_ALLOC_IN_QUOTA);
+ if (IS_ERR(key)) {
+ ret = PTR_ERR(key);
+ goto error5;
+ }
+
+ /* wait for the key to finish being constructed */
+ ret = wait_for_key_construction(key, 1);
+ if (ret < 0)
+ goto error6;
+
+ ret = key->serial;
+
+error6:
+ key_put(key);
+error5:
+ key_type_put(ktype);
+error4:
+ key_ref_put(dest_ref);
+error3:
+ kfree(callout_info);
+error2:
+ kfree(description);
+error:
+ return ret;
+}
+
+/*
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
+ */
+long keyctl_get_keyring_ID(key_serial_t id, int create)
+{
+ key_ref_t key_ref;
+ unsigned long lflags;
+ long ret;
+
+ lflags = create ? KEY_LOOKUP_CREATE : 0;
+ key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ ret = key_ref_to_ptr(key_ref)->serial;
+ key_ref_put(key_ref);
+error:
+ return ret;
+}
+
+/*
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary. A named session keyring must have Search
+ * permission for it to be joined. Session keyrings without this permit will
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
+ */
+long keyctl_join_session_keyring(const char __user *_name)
+{
+ char *name;
+ long ret;
+
+ /* fetch the name from userspace */
+ name = NULL;
+ if (_name) {
+ name = strndup_user(_name, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(name)) {
+ ret = PTR_ERR(name);
+ goto error;
+ }
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
+ }
+
+ /* join the session */
+ ret = join_session_keyring(name);
+error_name:
+ kfree(name);
+error:
+ return ret;
+}
+
+/*
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work. A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned. If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
+ */
+long keyctl_update_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen)
+{
+ key_ref_t key_ref;
+ void *payload;
+ long ret;
+
+ ret = -EINVAL;
+ if (plen > PAGE_SIZE)
+ goto error;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+ if (plen) {
+ ret = -ENOMEM;
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_from_user(payload, _payload, plen) != 0)
+ goto error2;
+ }
+
+ /* find the target key (which must be writable) */
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error2;
+ }
+
+ /* update the key */
+ ret = key_update(key_ref, payload, plen);
+
+ key_ref_put(key_ref);
+error2:
+ kvfree_sensitive(payload, plen);
+error:
+ return ret;
+}
+
+/*
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work. The key type should give up its quota claim when revoked. The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * Keys with KEY_FLAG_KEEP set should not be revoked.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_revoke_key(key_serial_t id)
+{
+ key_ref_t key_ref;
+ struct key *key;
+ long ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ if (ret != -EACCES)
+ goto error;
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ ret = 0;
+ if (test_bit(KEY_FLAG_KEEP, &key->flags))
+ ret = -EPERM;
+ else
+ key_revoke(key);
+
+ key_ref_put(key_ref);
+error:
+ return ret;
+}
+
+/*
+ * Invalidate a key.
+ *
+ * The key must be grant the caller Invalidate permission for this to work.
+ * The key and any links to the key will be automatically garbage collected
+ * immediately.
+ *
+ * Keys with KEY_FLAG_KEEP set should not be invalidated.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_invalidate_key(key_serial_t id)
+{
+ key_ref_t key_ref;
+ struct key *key;
+ long ret;
+
+ kenter("%d", id);
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+
+ /* Root is permitted to invalidate certain special keys */
+ if (capable(CAP_SYS_ADMIN)) {
+ key_ref = lookup_user_key(id, 0, KEY_SYSADMIN_OVERRIDE);
+ if (IS_ERR(key_ref))
+ goto error;
+ if (test_bit(KEY_FLAG_ROOT_CAN_INVAL,
+ &key_ref_to_ptr(key_ref)->flags))
+ goto invalidate;
+ goto error_put;
+ }
+
+ goto error;
+ }
+
+invalidate:
+ key = key_ref_to_ptr(key_ref);
+ ret = 0;
+ if (test_bit(KEY_FLAG_KEEP, &key->flags))
+ ret = -EPERM;
+ else
+ key_invalidate(key);
+error_put:
+ key_ref_put(key_ref);
+error:
+ kleave(" = %ld", ret);
+ return ret;
+}
+
+/*
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission and not have
+ * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned.
+ */
+long keyctl_keyring_clear(key_serial_t ringid)
+{
+ key_ref_t keyring_ref;
+ struct key *keyring;
+ long ret;
+
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+
+ /* Root is permitted to invalidate certain special keyrings */
+ if (capable(CAP_SYS_ADMIN)) {
+ keyring_ref = lookup_user_key(ringid, 0,
+ KEY_SYSADMIN_OVERRIDE);
+ if (IS_ERR(keyring_ref))
+ goto error;
+ if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
+ &key_ref_to_ptr(keyring_ref)->flags))
+ goto clear;
+ goto error_put;
+ }
+
+ goto error;
+ }
+
+clear:
+ keyring = key_ref_to_ptr(keyring_ref);
+ if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
+ ret = -EPERM;
+ else
+ ret = keyring_clear(keyring);
+error_put:
+ key_ref_put(keyring_ref);
+error:
+ return ret;
+}
+
+/*
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the the keyring must grant
+ * the caller Write permission. Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
+{
+ key_ref_t keyring_ref, key_ref;
+ long ret;
+
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error;
+ }
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error2;
+ }
+
+ ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
+
+ key_ref_put(key_ref);
+error2:
+ key_ref_put(keyring_ref);
+error:
+ return ret;
+}
+
+/*
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything. If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
+{
+ key_ref_t keyring_ref, key_ref;
+ struct key *keyring, *key;
+ long ret;
+
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error;
+ }
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, KEY_NEED_UNLINK);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error2;
+ }
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_KEEP, &keyring->flags) &&
+ test_bit(KEY_FLAG_KEEP, &key->flags))
+ ret = -EPERM;
+ else
+ ret = key_unlink(keyring, key);
+
+ key_ref_put(key_ref);
+error2:
+ key_ref_put(keyring_ref);
+error:
+ return ret;
+}
+
+/*
+ * Move a link to a key from one keyring to another, displacing any matching
+ * key from the destination keyring.
+ *
+ * The key must grant the caller Link permission and both keyrings must grant
+ * the caller Write permission. There must also be a link in the from keyring
+ * to the key. If both keyrings are the same, nothing is done.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_keyring_move(key_serial_t id, key_serial_t from_ringid,
+ key_serial_t to_ringid, unsigned int flags)
+{
+ key_ref_t key_ref, from_ref, to_ref;
+ long ret;
+
+ if (flags & ~KEYCTL_MOVE_EXCL)
+ return -EINVAL;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+
+ from_ref = lookup_user_key(from_ringid, 0, KEY_NEED_WRITE);
+ if (IS_ERR(from_ref)) {
+ ret = PTR_ERR(from_ref);
+ goto error2;
+ }
+
+ to_ref = lookup_user_key(to_ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(to_ref)) {
+ ret = PTR_ERR(to_ref);
+ goto error3;
+ }
+
+ ret = key_move(key_ref_to_ptr(key_ref), key_ref_to_ptr(from_ref),
+ key_ref_to_ptr(to_ref), flags);
+
+ key_ref_put(to_ref);
+error3:
+ key_ref_put(from_ref);
+error2:
+ key_ref_put(key_ref);
+ return ret;
+}
+
+/*
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
+ * type;uid;gid;perm;description<NUL>
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
+ */
+long keyctl_describe_key(key_serial_t keyid,
+ char __user *buffer,
+ size_t buflen)
+{
+ struct key *key, *instkey;
+ key_ref_t key_ref;
+ char *infobuf;
+ long ret;
+ int desclen, infolen;
+
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
+ if (IS_ERR(key_ref)) {
+ /* viewing a key under construction is permitted if we have the
+ * authorisation token handy */
+ if (PTR_ERR(key_ref) == -EACCES) {
+ instkey = key_get_instantiation_authkey(keyid);
+ if (!IS_ERR(instkey)) {
+ key_put(instkey);
+ key_ref = lookup_user_key(keyid,
+ KEY_LOOKUP_PARTIAL,
+ KEY_AUTHTOKEN_OVERRIDE);
+ if (!IS_ERR(key_ref))
+ goto okay;
+ }
+ }
+
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+okay:
+ key = key_ref_to_ptr(key_ref);
+ desclen = strlen(key->description);
+
+ /* calculate how much information we're going to return */
+ ret = -ENOMEM;
+ infobuf = kasprintf(GFP_KERNEL,
+ "%s;%d;%d;%08x;",
+ key->type->name,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
+ key->perm);
+ if (!infobuf)
+ goto error2;
+ infolen = strlen(infobuf);
+ ret = infolen + desclen + 1;
+
+ /* consider returning the data */
+ if (buffer && buflen >= ret) {
+ if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+ copy_to_user(buffer + infolen, key->description,
+ desclen + 1) != 0)
+ ret = -EFAULT;
+ }
+
+ kfree(infobuf);
+error2:
+ key_ref_put(key_ref);
+error:
+ return ret;
+}
+
+/*
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key. Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring). Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
+ */
+long keyctl_keyring_search(key_serial_t ringid,
+ const char __user *_type,
+ const char __user *_description,
+ key_serial_t destringid)
+{
+ struct key_type *ktype;
+ key_ref_t keyring_ref, key_ref, dest_ref;
+ char type[32], *description;
+ long ret;
+
+ /* pull the type and description into kernel space */
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+
+ /* get the keyring at which to begin the search */
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(keyring_ref)) {
+ ret = PTR_ERR(keyring_ref);
+ goto error2;
+ }
+
+ /* get the destination keyring if specified */
+ dest_ref = NULL;
+ if (destringid) {
+ dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
+ KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref)) {
+ ret = PTR_ERR(dest_ref);
+ goto error3;
+ }
+ }
+
+ /* find the key type */
+ ktype = key_type_lookup(type);
+ if (IS_ERR(ktype)) {
+ ret = PTR_ERR(ktype);
+ goto error4;
+ }
+
+ /* do the search */
+ key_ref = keyring_search(keyring_ref, ktype, description, true);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+
+ /* treat lack or presence of a negative key the same */
+ if (ret == -EAGAIN)
+ ret = -ENOKEY;
+ goto error5;
+ }
+
+ /* link the resulting key to the destination keyring if we can */
+ if (dest_ref) {
+ ret = key_permission(key_ref, KEY_NEED_LINK);
+ if (ret < 0)
+ goto error6;
+
+ ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
+ if (ret < 0)
+ goto error6;
+ }
+
+ ret = key_ref_to_ptr(key_ref)->serial;
+
+error6:
+ key_ref_put(key_ref);
+error5:
+ key_type_put(ktype);
+error4:
+ key_ref_put(dest_ref);
+error3:
+ key_ref_put(keyring_ref);
+error2:
+ kfree(description);
+error:
+ return ret;
+}
+
+/*
+ * Call the read method
+ */
+static long __keyctl_read_key(struct key *key, char *buffer, size_t buflen)
+{
+ long ret;
+
+ down_read(&key->sem);
+ ret = key_validate(key);
+ if (ret == 0)
+ ret = key->type->read(key, buffer, buflen);
+ up_read(&key->sem);
+ return ret;
+}
+
+/*
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
+ */
+long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+{
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+ char *key_data = NULL;
+ size_t key_data_len;
+
+ /* find the key first */
+ key_ref = lookup_user_key(keyid, 0, KEY_DEFER_PERM_CHECK);
+ if (IS_ERR(key_ref)) {
+ ret = -ENOKEY;
+ goto out;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ ret = key_read_state(key);
+ if (ret < 0)
+ goto key_put_out; /* Negatively instantiated */
+
+ /* see if we can read it directly */
+ ret = key_permission(key_ref, KEY_NEED_READ);
+ if (ret == 0)
+ goto can_read_key;
+ if (ret != -EACCES)
+ goto key_put_out;
+
+ /* we can't; see if it's searchable from this process's keyrings
+ * - we automatically take account of the fact that it may be
+ * dangling off an instantiation key
+ */
+ if (!is_key_possessed(key_ref)) {
+ ret = -EACCES;
+ goto key_put_out;
+ }
+
+ /* the key is probably readable - now try to read it */
+can_read_key:
+ if (!key->type->read) {
+ ret = -EOPNOTSUPP;
+ goto key_put_out;
+ }
+
+ if (!buffer || !buflen) {
+ /* Get the key length from the read method */
+ ret = __keyctl_read_key(key, NULL, 0);
+ goto key_put_out;
+ }
+
+ /*
+ * Read the data with the semaphore held (since we might sleep)
+ * to protect against the key being updated or revoked.
+ *
+ * Allocating a temporary buffer to hold the keys before
+ * transferring them to user buffer to avoid potential
+ * deadlock involving page fault and mmap_lock.
+ *
+ * key_data_len = (buflen <= PAGE_SIZE)
+ * ? buflen : actual length of key data
+ *
+ * This prevents allocating arbitrary large buffer which can
+ * be much larger than the actual key length. In the latter case,
+ * at least 2 passes of this loop is required.
+ */
+ key_data_len = (buflen <= PAGE_SIZE) ? buflen : 0;
+ for (;;) {
+ if (key_data_len) {
+ key_data = kvmalloc(key_data_len, GFP_KERNEL);
+ if (!key_data) {
+ ret = -ENOMEM;
+ goto key_put_out;
+ }
+ }
+
+ ret = __keyctl_read_key(key, key_data, key_data_len);
+
+ /*
+ * Read methods will just return the required length without
+ * any copying if the provided length isn't large enough.
+ */
+ if (ret <= 0 || ret > buflen)
+ break;
+
+ /*
+ * The key may change (unlikely) in between 2 consecutive
+ * __keyctl_read_key() calls. In this case, we reallocate
+ * a larger buffer and redo the key read when
+ * key_data_len < ret <= buflen.
+ */
+ if (ret > key_data_len) {
+ if (unlikely(key_data))
+ kvfree_sensitive(key_data, key_data_len);
+ key_data_len = ret;
+ continue; /* Allocate buffer */
+ }
+
+ if (copy_to_user(buffer, key_data, ret))
+ ret = -EFAULT;
+ break;
+ }
+ kvfree_sensitive(key_data, key_data_len);
+
+key_put_out:
+ key_put(key);
+out:
+ return ret;
+}
+
+/*
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability. If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key. The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
+{
+ struct key_user *newowner, *zapowner = NULL;
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+ kuid_t uid;
+ kgid_t gid;
+
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+ ret = -EINVAL;
+ if ((user != (uid_t) -1) && !uid_valid(uid))
+ goto error;
+ if ((group != (gid_t) -1) && !gid_valid(gid))
+ goto error;
+
+ ret = 0;
+ if (user == (uid_t) -1 && group == (gid_t) -1)
+ goto error;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* make the changes with the locks held to prevent chown/chown races */
+ ret = -EACCES;
+ down_write(&key->sem);
+
+ {
+ bool is_privileged_op = false;
+
+ /* only the sysadmin can chown a key to some other UID */
+ if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
+ is_privileged_op = true;
+
+ /* only the sysadmin can set the key's GID to a group other
+ * than one of those that the current process subscribes to */
+ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
+ is_privileged_op = true;
+
+ if (is_privileged_op && !capable(CAP_SYS_ADMIN))
+ goto error_put;
+ }
+
+ /* change the UID */
+ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
+ ret = -ENOMEM;
+ newowner = key_user_lookup(uid);
+ if (!newowner)
+ goto error_put;
+
+ /* transfer the quota burden to the new user */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxkeys : key_quota_maxkeys;
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ spin_lock(&newowner->lock);
+ if (newowner->qnkeys + 1 > maxkeys ||
+ newowner->qnbytes + key->quotalen > maxbytes ||
+ newowner->qnbytes + key->quotalen <
+ newowner->qnbytes)
+ goto quota_overrun;
+
+ newowner->qnkeys++;
+ newowner->qnbytes += key->quotalen;
+ spin_unlock(&newowner->lock);
+
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
+
+ atomic_dec(&key->user->nkeys);
+ atomic_inc(&newowner->nkeys);
+
+ if (key->state != KEY_IS_UNINSTANTIATED) {
+ atomic_dec(&key->user->nikeys);
+ atomic_inc(&newowner->nikeys);
+ }
+
+ zapowner = key->user;
+ key->user = newowner;
+ key->uid = uid;
+ }
+
+ /* change the GID */
+ if (group != (gid_t) -1)
+ key->gid = gid;
+
+ notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ ret = 0;
+
+error_put:
+ up_write(&key->sem);
+ key_put(key);
+ if (zapowner)
+ key_user_put(zapowner);
+error:
+ return ret;
+
+quota_overrun:
+ spin_unlock(&newowner->lock);
+ zapowner = newowner;
+ ret = -EDQUOT;
+ goto error_put;
+}
+
+/*
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
+ */
+long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
+{
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+
+ ret = -EINVAL;
+ if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
+ goto error;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* make the changes with the locks held to prevent chown/chmod races */
+ ret = -EACCES;
+ down_write(&key->sem);
+
+ /* if we're not the sysadmin, we can only change a key that we own */
+ if (uid_eq(key->uid, current_fsuid()) || capable(CAP_SYS_ADMIN)) {
+ key->perm = perm;
+ notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ ret = 0;
+ }
+
+ up_write(&key->sem);
+ key_put(key);
+error:
+ return ret;
+}
+
+/*
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
+ */
+static long get_instantiation_keyring(key_serial_t ringid,
+ struct request_key_auth *rka,
+ struct key **_dest_keyring)
+{
+ key_ref_t dkref;
+
+ *_dest_keyring = NULL;
+
+ /* just return a NULL pointer if we weren't asked to make a link */
+ if (ringid == 0)
+ return 0;
+
+ /* if a specific keyring is nominated by ID, then use that */
+ if (ringid > 0) {
+ dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(dkref))
+ return PTR_ERR(dkref);
+ *_dest_keyring = key_ref_to_ptr(dkref);
+ return 0;
+ }
+
+ if (ringid == KEY_SPEC_REQKEY_AUTH_KEY)
+ return -EINVAL;
+
+ /* otherwise specify the destination keyring recorded in the
+ * authorisation key (any KEY_SPEC_*_KEYRING) */
+ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) {
+ *_dest_keyring = key_get(rka->dest_keyring);
+ return 0;
+ }
+
+ return -ENOKEY;
+}
+
+/*
+ * Change the request_key authorisation key on the current process.
+ */
+static int keyctl_change_reqkey_auth(struct key *key)
+{
+ struct cred *new;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ key_put(new->request_key_auth);
+ new->request_key_auth = key_get(key);
+
+ return commit_creds(new);
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+static long keyctl_instantiate_key_common(key_serial_t id,
+ struct iov_iter *from,
+ key_serial_t ringid)
+{
+ const struct cred *cred = current_cred();
+ struct request_key_auth *rka;
+ struct key *instkey, *dest_keyring;
+ size_t plen = from ? iov_iter_count(from) : 0;
+ void *payload;
+ long ret;
+
+ kenter("%d,,%zu,%d", id, plen, ringid);
+
+ if (!plen)
+ from = NULL;
+
+ ret = -EINVAL;
+ if (plen > 1024 * 1024 - 1)
+ goto error;
+
+ /* the appropriate instantiation authorisation key must have been
+ * assumed before calling this */
+ ret = -EPERM;
+ instkey = cred->request_key_auth;
+ if (!instkey)
+ goto error;
+
+ rka = instkey->payload.data[0];
+ if (rka->target_key->serial != id)
+ goto error;
+
+ /* pull the payload in if one was supplied */
+ payload = NULL;
+
+ if (from) {
+ ret = -ENOMEM;
+ payload = kvmalloc(plen, GFP_KERNEL);
+ if (!payload)
+ goto error;
+
+ ret = -EFAULT;
+ if (!copy_from_iter_full(payload, plen, from))
+ goto error2;
+ }
+
+ /* find the destination keyring amongst those belonging to the
+ * requesting task */
+ ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
+ if (ret < 0)
+ goto error2;
+
+ /* instantiate the key and link it into a keyring */
+ ret = key_instantiate_and_link(rka->target_key, payload, plen,
+ dest_keyring, instkey);
+
+ key_put(dest_keyring);
+
+ /* discard the assumed authority if it's just been disabled by
+ * instantiation of the key */
+ if (ret == 0)
+ keyctl_change_reqkey_auth(NULL);
+
+error2:
+ kvfree_sensitive(payload, plen);
+error:
+ return ret;
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t ringid)
+{
+ if (_payload && plen) {
+ struct iovec iov;
+ struct iov_iter from;
+ int ret;
+
+ ret = import_single_range(WRITE, (void __user *)_payload, plen,
+ &iov, &from);
+ if (unlikely(ret))
+ return ret;
+
+ return keyctl_instantiate_key_common(id, &from, ringid);
+ }
+
+ return keyctl_instantiate_key_common(id, NULL, ringid);
+}
+
+/*
+ * Instantiate a key with the specified multipart payload and link the key into
+ * the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key_iov(key_serial_t id,
+ const struct iovec __user *_payload_iov,
+ unsigned ioc,
+ key_serial_t ringid)
+{
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ struct iov_iter from;
+ long ret;
+
+ if (!_payload_iov)
+ ioc = 0;
+
+ ret = import_iovec(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), &iov, &from);
+ if (ret < 0)
+ return ret;
+ ret = keyctl_instantiate_key_common(id, &from, ringid);
+ kfree(iov);
+ return ret;
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
+{
+ return keyctl_reject_key(id, timeout, ENOKEY, ringid);
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and error
+ * code and link the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the specified error code until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
+ key_serial_t ringid)
+{
+ const struct cred *cred = current_cred();
+ struct request_key_auth *rka;
+ struct key *instkey, *dest_keyring;
+ long ret;
+
+ kenter("%d,%u,%u,%d", id, timeout, error, ringid);
+
+ /* must be a valid error code and mustn't be a kernel special */
+ if (error <= 0 ||
+ error >= MAX_ERRNO ||
+ error == ERESTARTSYS ||
+ error == ERESTARTNOINTR ||
+ error == ERESTARTNOHAND ||
+ error == ERESTART_RESTARTBLOCK)
+ return -EINVAL;
+
+ /* the appropriate instantiation authorisation key must have been
+ * assumed before calling this */
+ ret = -EPERM;
+ instkey = cred->request_key_auth;
+ if (!instkey)
+ goto error;
+
+ rka = instkey->payload.data[0];
+ if (rka->target_key->serial != id)
+ goto error;
+
+ /* find the destination keyring if present (which must also be
+ * writable) */
+ ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
+ if (ret < 0)
+ goto error;
+
+ /* instantiate the key and link it into a keyring */
+ ret = key_reject_and_link(rka->target_key, timeout, error,
+ dest_keyring, instkey);
+
+ key_put(dest_keyring);
+
+ /* discard the assumed authority if it's just been disabled by
+ * instantiation of the key */
+ if (ret == 0)
+ keyctl_change_reqkey_auth(NULL);
+
+error:
+ return ret;
+}
+
+/*
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
+ */
+long keyctl_set_reqkey_keyring(int reqkey_defl)
+{
+ struct cred *new;
+ int ret, old_setting;
+
+ old_setting = current_cred_xxx(jit_keyring);
+
+ if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE)
+ return old_setting;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ switch (reqkey_defl) {
+ case KEY_REQKEY_DEFL_THREAD_KEYRING:
+ ret = install_thread_keyring_to_cred(new);
+ if (ret < 0)
+ goto error;
+ goto set;
+
+ case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ ret = install_process_keyring_to_cred(new);
+ if (ret < 0)
+ goto error;
+ goto set;
+
+ case KEY_REQKEY_DEFL_DEFAULT:
+ case KEY_REQKEY_DEFL_SESSION_KEYRING:
+ case KEY_REQKEY_DEFL_USER_KEYRING:
+ case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+ case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
+ goto set;
+
+ case KEY_REQKEY_DEFL_NO_CHANGE:
+ case KEY_REQKEY_DEFL_GROUP_KEYRING:
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+
+set:
+ new->jit_keyring = reqkey_defl;
+ commit_creds(new);
+ return old_setting;
+error:
+ abort_creds(new);
+ return ret;
+}
+
+/*
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time. The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * Keys with KEY_FLAG_KEEP set should not be timed out.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_set_timeout(key_serial_t id, unsigned timeout)
+{
+ struct key *key, *instkey;
+ key_ref_t key_ref;
+ long ret;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
+ KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref)) {
+ /* setting the timeout on a key under construction is permitted
+ * if we have the authorisation token handy */
+ if (PTR_ERR(key_ref) == -EACCES) {
+ instkey = key_get_instantiation_authkey(id);
+ if (!IS_ERR(instkey)) {
+ key_put(instkey);
+ key_ref = lookup_user_key(id,
+ KEY_LOOKUP_PARTIAL,
+ KEY_AUTHTOKEN_OVERRIDE);
+ if (!IS_ERR(key_ref))
+ goto okay;
+ }
+ }
+
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+okay:
+ key = key_ref_to_ptr(key_ref);
+ ret = 0;
+ if (test_bit(KEY_FLAG_KEEP, &key->flags)) {
+ ret = -EPERM;
+ } else {
+ key_set_timeout(key, timeout);
+ notify_key(key, NOTIFY_KEY_SETATTR, 0);
+ }
+ key_put(key);
+
+error:
+ return ret;
+}
+
+/*
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated. It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned. The authorisation key can be read to get
+ * the callout information passed to request_key().
+ */
+long keyctl_assume_authority(key_serial_t id)
+{
+ struct key *authkey;
+ long ret;
+
+ /* special key IDs aren't permitted */
+ ret = -EINVAL;
+ if (id < 0)
+ goto error;
+
+ /* we divest ourselves of authority if given an ID of 0 */
+ if (id == 0) {
+ ret = keyctl_change_reqkey_auth(NULL);
+ goto error;
+ }
+
+ /* attempt to assume the authority temporarily granted to us whilst we
+ * instantiate the specified key
+ * - the authorisation key must be in the current task's keyrings
+ * somewhere
+ */
+ authkey = key_get_instantiation_authkey(id);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ goto error;
+ }
+
+ ret = keyctl_change_reqkey_auth(authkey);
+ if (ret == 0)
+ ret = authkey->serial;
+ key_put(authkey);
+error:
+ return ret;
+}
+
+/*
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
+ */
+long keyctl_get_security(key_serial_t keyid,
+ char __user *buffer,
+ size_t buflen)
+{
+ struct key *key, *instkey;
+ key_ref_t key_ref;
+ char *context;
+ long ret;
+
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
+ if (IS_ERR(key_ref)) {
+ if (PTR_ERR(key_ref) != -EACCES)
+ return PTR_ERR(key_ref);
+
+ /* viewing a key under construction is also permitted if we
+ * have the authorisation token handy */
+ instkey = key_get_instantiation_authkey(keyid);
+ if (IS_ERR(instkey))
+ return PTR_ERR(instkey);
+ key_put(instkey);
+
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL,
+ KEY_AUTHTOKEN_OVERRIDE);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ ret = security_key_getsecurity(key, &context);
+ if (ret == 0) {
+ /* if no information was returned, give userspace an empty
+ * string */
+ ret = 1;
+ if (buffer && buflen > 0 &&
+ copy_to_user(buffer, "", 1) != 0)
+ ret = -EFAULT;
+ } else if (ret > 0) {
+ /* return as much data as there's room for */
+ if (buffer && buflen > 0) {
+ if (buflen > ret)
+ buflen = ret;
+
+ if (copy_to_user(buffer, context, buflen) != 0)
+ ret = -EFAULT;
+ }
+
+ kfree(context);
+ }
+
+ key_ref_put(key_ref);
+ return ret;
+}
+
+/*
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_session_to_parent(void)
+{
+ struct task_struct *me, *parent;
+ const struct cred *mycred, *pcred;
+ struct callback_head *newwork, *oldwork;
+ key_ref_t keyring_r;
+ struct cred *cred;
+ int ret;
+
+ keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
+ if (IS_ERR(keyring_r))
+ return PTR_ERR(keyring_r);
+
+ ret = -ENOMEM;
+
+ /* our parent is going to need a new cred struct, a new tgcred struct
+ * and new security data, so we allocate them here to prevent ENOMEM in
+ * our parent */
+ cred = cred_alloc_blank();
+ if (!cred)
+ goto error_keyring;
+ newwork = &cred->rcu;
+
+ cred->session_keyring = key_ref_to_ptr(keyring_r);
+ keyring_r = NULL;
+ init_task_work(newwork, key_change_session_keyring);
+
+ me = current;
+ rcu_read_lock();
+ write_lock_irq(&tasklist_lock);
+
+ ret = -EPERM;
+ oldwork = NULL;
+ parent = rcu_dereference_protected(me->real_parent,
+ lockdep_is_held(&tasklist_lock));
+
+ /* the parent mustn't be init and mustn't be a kernel thread */
+ if (parent->pid <= 1 || !parent->mm)
+ goto unlock;
+
+ /* the parent must be single threaded */
+ if (!thread_group_empty(parent))
+ goto unlock;
+
+ /* the parent and the child must have different session keyrings or
+ * there's no point */
+ mycred = current_cred();
+ pcred = __task_cred(parent);
+ if (mycred == pcred ||
+ mycred->session_keyring == pcred->session_keyring) {
+ ret = 0;
+ goto unlock;
+ }
+
+ /* the parent must have the same effective ownership and mustn't be
+ * SUID/SGID */
+ if (!uid_eq(pcred->uid, mycred->euid) ||
+ !uid_eq(pcred->euid, mycred->euid) ||
+ !uid_eq(pcred->suid, mycred->euid) ||
+ !gid_eq(pcred->gid, mycred->egid) ||
+ !gid_eq(pcred->egid, mycred->egid) ||
+ !gid_eq(pcred->sgid, mycred->egid))
+ goto unlock;
+
+ /* the keyrings must have the same UID */
+ if ((pcred->session_keyring &&
+ !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+ !uid_eq(mycred->session_keyring->uid, mycred->euid))
+ goto unlock;
+
+ /* cancel an already pending keyring replacement */
+ oldwork = task_work_cancel(parent, key_change_session_keyring);
+
+ /* the replacement session keyring is applied just prior to userspace
+ * restarting */
+ ret = task_work_add(parent, newwork, TWA_RESUME);
+ if (!ret)
+ newwork = NULL;
+unlock:
+ write_unlock_irq(&tasklist_lock);
+ rcu_read_unlock();
+ if (oldwork)
+ put_cred(container_of(oldwork, struct cred, rcu));
+ if (newwork)
+ put_cred(cred);
+ return ret;
+
+error_keyring:
+ key_ref_put(keyring_r);
+ return ret;
+}
+
+/*
+ * Apply a restriction to a given keyring.
+ *
+ * The caller must have Setattr permission to change keyring restrictions.
+ *
+ * The requested type name may be a NULL pointer to reject all attempts
+ * to link to the keyring. In this case, _restriction must also be NULL.
+ * Otherwise, both _type and _restriction must be non-NULL.
+ *
+ * Returns 0 if successful.
+ */
+long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
+ const char __user *_restriction)
+{
+ key_ref_t key_ref;
+ char type[32];
+ char *restriction = NULL;
+ long ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+
+ ret = -EINVAL;
+ if (_type) {
+ if (!_restriction)
+ goto error;
+
+ ret = key_get_type_from_user(type, _type, sizeof(type));
+ if (ret < 0)
+ goto error;
+
+ restriction = strndup_user(_restriction, PAGE_SIZE);
+ if (IS_ERR(restriction)) {
+ ret = PTR_ERR(restriction);
+ goto error;
+ }
+ } else {
+ if (_restriction)
+ goto error;
+ }
+
+ ret = keyring_restrict(key_ref, _type ? type : NULL, restriction);
+ kfree(restriction);
+error:
+ key_ref_put(key_ref);
+ return ret;
+}
+
+#ifdef CONFIG_KEY_NOTIFICATIONS
+/*
+ * Watch for changes to a key.
+ *
+ * The caller must have View permission to watch a key or keyring.
+ */
+long keyctl_watch_key(key_serial_t id, int watch_queue_fd, int watch_id)
+{
+ struct watch_queue *wqueue;
+ struct watch_list *wlist = NULL;
+ struct watch *watch = NULL;
+ struct key *key;
+ key_ref_t key_ref;
+ long ret;
+
+ if (watch_id < -1 || watch_id > 0xff)
+ return -EINVAL;
+
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_VIEW);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ key = key_ref_to_ptr(key_ref);
+
+ wqueue = get_watch_queue(watch_queue_fd);
+ if (IS_ERR(wqueue)) {
+ ret = PTR_ERR(wqueue);
+ goto err_key;
+ }
+
+ if (watch_id >= 0) {
+ ret = -ENOMEM;
+ if (!key->watchers) {
+ wlist = kzalloc(sizeof(*wlist), GFP_KERNEL);
+ if (!wlist)
+ goto err_wqueue;
+ init_watch_list(wlist, NULL);
+ }
+
+ watch = kzalloc(sizeof(*watch), GFP_KERNEL);
+ if (!watch)
+ goto err_wlist;
+
+ init_watch(watch, wqueue);
+ watch->id = key->serial;
+ watch->info_id = (u32)watch_id << WATCH_INFO_ID__SHIFT;
+
+ ret = security_watch_key(key);
+ if (ret < 0)
+ goto err_watch;
+
+ down_write(&key->sem);
+ if (!key->watchers) {
+ key->watchers = wlist;
+ wlist = NULL;
+ }
+
+ ret = add_watch_to_object(watch, key->watchers);
+ up_write(&key->sem);
+
+ if (ret == 0)
+ watch = NULL;
+ } else {
+ ret = -EBADSLT;
+ if (key->watchers) {
+ down_write(&key->sem);
+ ret = remove_watch_from_object(key->watchers,
+ wqueue, key_serial(key),
+ false);
+ up_write(&key->sem);
+ }
+ }
+
+err_watch:
+ kfree(watch);
+err_wlist:
+ kfree(wlist);
+err_wqueue:
+ put_watch_queue(wqueue);
+err_key:
+ key_put(key);
+ return ret;
+}
+#endif /* CONFIG_KEY_NOTIFICATIONS */
+
+/*
+ * Get keyrings subsystem capabilities.
+ */
+long keyctl_capabilities(unsigned char __user *_buffer, size_t buflen)
+{
+ size_t size = buflen;
+
+ if (size > 0) {
+ if (size > sizeof(keyrings_capabilities))
+ size = sizeof(keyrings_capabilities);
+ if (copy_to_user(_buffer, keyrings_capabilities, size) != 0)
+ return -EFAULT;
+ if (size < buflen &&
+ clear_user(_buffer + size, buflen - size) != 0)
+ return -EFAULT;
+ }
+
+ return sizeof(keyrings_capabilities);
+}
+
+/*
+ * The key control system call
+ */
+SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
+ unsigned long, arg4, unsigned long, arg5)
+{
+ switch (option) {
+ case KEYCTL_GET_KEYRING_ID:
+ return keyctl_get_keyring_ID((key_serial_t) arg2,
+ (int) arg3);
+
+ case KEYCTL_JOIN_SESSION_KEYRING:
+ return keyctl_join_session_keyring((const char __user *) arg2);
+
+ case KEYCTL_UPDATE:
+ return keyctl_update_key((key_serial_t) arg2,
+ (const void __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_REVOKE:
+ return keyctl_revoke_key((key_serial_t) arg2);
+
+ case KEYCTL_DESCRIBE:
+ return keyctl_describe_key((key_serial_t) arg2,
+ (char __user *) arg3,
+ (unsigned) arg4);
+
+ case KEYCTL_CLEAR:
+ return keyctl_keyring_clear((key_serial_t) arg2);
+
+ case KEYCTL_LINK:
+ return keyctl_keyring_link((key_serial_t) arg2,
+ (key_serial_t) arg3);
+
+ case KEYCTL_UNLINK:
+ return keyctl_keyring_unlink((key_serial_t) arg2,
+ (key_serial_t) arg3);
+
+ case KEYCTL_SEARCH:
+ return keyctl_keyring_search((key_serial_t) arg2,
+ (const char __user *) arg3,
+ (const char __user *) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_READ:
+ return keyctl_read_key((key_serial_t) arg2,
+ (char __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_CHOWN:
+ return keyctl_chown_key((key_serial_t) arg2,
+ (uid_t) arg3,
+ (gid_t) arg4);
+
+ case KEYCTL_SETPERM:
+ return keyctl_setperm_key((key_serial_t) arg2,
+ (key_perm_t) arg3);
+
+ case KEYCTL_INSTANTIATE:
+ return keyctl_instantiate_key((key_serial_t) arg2,
+ (const void __user *) arg3,
+ (size_t) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_NEGATE:
+ return keyctl_negate_key((key_serial_t) arg2,
+ (unsigned) arg3,
+ (key_serial_t) arg4);
+
+ case KEYCTL_SET_REQKEY_KEYRING:
+ return keyctl_set_reqkey_keyring(arg2);
+
+ case KEYCTL_SET_TIMEOUT:
+ return keyctl_set_timeout((key_serial_t) arg2,
+ (unsigned) arg3);
+
+ case KEYCTL_ASSUME_AUTHORITY:
+ return keyctl_assume_authority((key_serial_t) arg2);
+
+ case KEYCTL_GET_SECURITY:
+ return keyctl_get_security((key_serial_t) arg2,
+ (char __user *) arg3,
+ (size_t) arg4);
+
+ case KEYCTL_SESSION_TO_PARENT:
+ return keyctl_session_to_parent();
+
+ case KEYCTL_REJECT:
+ return keyctl_reject_key((key_serial_t) arg2,
+ (unsigned) arg3,
+ (unsigned) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_INSTANTIATE_IOV:
+ return keyctl_instantiate_key_iov(
+ (key_serial_t) arg2,
+ (const struct iovec __user *) arg3,
+ (unsigned) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key((key_serial_t) arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
+ case KEYCTL_DH_COMPUTE:
+ return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
+ (char __user *) arg3, (size_t) arg4,
+ (struct keyctl_kdf_params __user *) arg5);
+
+ case KEYCTL_RESTRICT_KEYRING:
+ return keyctl_restrict_keyring((key_serial_t) arg2,
+ (const char __user *) arg3,
+ (const char __user *) arg4);
+
+ case KEYCTL_PKEY_QUERY:
+ if (arg3 != 0)
+ return -EINVAL;
+ return keyctl_pkey_query((key_serial_t)arg2,
+ (const char __user *)arg4,
+ (struct keyctl_pkey_query __user *)arg5);
+
+ case KEYCTL_PKEY_ENCRYPT:
+ case KEYCTL_PKEY_DECRYPT:
+ case KEYCTL_PKEY_SIGN:
+ return keyctl_pkey_e_d_s(
+ option,
+ (const struct keyctl_pkey_params __user *)arg2,
+ (const char __user *)arg3,
+ (const void __user *)arg4,
+ (void __user *)arg5);
+
+ case KEYCTL_PKEY_VERIFY:
+ return keyctl_pkey_verify(
+ (const struct keyctl_pkey_params __user *)arg2,
+ (const char __user *)arg3,
+ (const void __user *)arg4,
+ (const void __user *)arg5);
+
+ case KEYCTL_MOVE:
+ return keyctl_keyring_move((key_serial_t)arg2,
+ (key_serial_t)arg3,
+ (key_serial_t)arg4,
+ (unsigned int)arg5);
+
+ case KEYCTL_CAPABILITIES:
+ return keyctl_capabilities((unsigned char __user *)arg2, (size_t)arg3);
+
+ case KEYCTL_WATCH_KEY:
+ return keyctl_watch_key((key_serial_t)arg2, (int)arg3, (int)arg4);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
new file mode 100644
index 000000000..63e5c646f
--- /dev/null
+++ b/security/keys/keyctl_pkey.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Public-key operation keyctls
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/parser.h>
+#include <linux/uaccess.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
+{
+ kfree(params->info);
+ key_put(params->key);
+}
+
+enum {
+ Opt_err,
+ Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */
+ Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */
+};
+
+static const match_table_t param_keys = {
+ { Opt_enc, "enc=%s" },
+ { Opt_hash, "hash=%s" },
+ { Opt_err, NULL }
+};
+
+/*
+ * Parse the information string which consists of key=val pairs.
+ */
+static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
+{
+ unsigned long token_mask = 0;
+ substring_t args[MAX_OPT_ARGS];
+ char *c = params->info, *p, *q;
+ int token;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, param_keys, args);
+ if (token == Opt_err)
+ return -EINVAL;
+ if (__test_and_set_bit(token, &token_mask))
+ return -EINVAL;
+ q = args[0].from;
+ if (!q[0])
+ return -EINVAL;
+
+ switch (token) {
+ case Opt_enc:
+ params->encoding = q;
+ break;
+
+ case Opt_hash:
+ params->hash_algo = q;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Interpret parameters. Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get(key_serial_t id,
+ const char __user *_info,
+ struct kernel_pkey_params *params)
+{
+ key_ref_t key_ref;
+ void *p;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->encoding = "raw";
+
+ p = strndup_user(_info, PAGE_SIZE);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+ params->info = p;
+
+ ret = keyctl_pkey_params_parse(params);
+ if (ret < 0)
+ return ret;
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref))
+ return PTR_ERR(key_ref);
+ params->key = key_ref_to_ptr(key_ref);
+
+ if (!params->key->type->asym_query)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/*
+ * Get parameters from userspace. Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ int op,
+ struct kernel_pkey_params *params)
+{
+ struct keyctl_pkey_params uparams;
+ struct kernel_pkey_query info;
+ int ret;
+
+ memset(params, 0, sizeof(*params));
+ params->encoding = "raw";
+
+ if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0)
+ return -EFAULT;
+
+ ret = keyctl_pkey_params_get(uparams.key_id, _info, params);
+ if (ret < 0)
+ return ret;
+
+ ret = params->key->type->asym_query(params, &info);
+ if (ret < 0)
+ return ret;
+
+ switch (op) {
+ case KEYCTL_PKEY_ENCRYPT:
+ if (uparams.in_len > info.max_dec_size ||
+ uparams.out_len > info.max_enc_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_DECRYPT:
+ if (uparams.in_len > info.max_enc_size ||
+ uparams.out_len > info.max_dec_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_SIGN:
+ if (uparams.in_len > info.max_data_size ||
+ uparams.out_len > info.max_sig_size)
+ return -EINVAL;
+ break;
+ case KEYCTL_PKEY_VERIFY:
+ if (uparams.in_len > info.max_data_size ||
+ uparams.in2_len > info.max_sig_size)
+ return -EINVAL;
+ break;
+ default:
+ BUG();
+ }
+
+ params->in_len = uparams.in_len;
+ params->out_len = uparams.out_len; /* Note: same as in2_len */
+ return 0;
+}
+
+/*
+ * Query information about an asymmetric key.
+ */
+long keyctl_pkey_query(key_serial_t id,
+ const char __user *_info,
+ struct keyctl_pkey_query __user *_res)
+{
+ struct kernel_pkey_params params;
+ struct kernel_pkey_query res;
+ long ret;
+
+ memset(&params, 0, sizeof(params));
+
+ ret = keyctl_pkey_params_get(id, _info, &params);
+ if (ret < 0)
+ goto error;
+
+ ret = params.key->type->asym_query(&params, &res);
+ if (ret < 0)
+ goto error;
+
+ ret = -EFAULT;
+ if (copy_to_user(_res, &res, sizeof(res)) == 0 &&
+ clear_user(_res->__spare, sizeof(_res->__spare)) == 0)
+ ret = 0;
+
+error:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
+
+/*
+ * Encrypt/decrypt/sign
+ *
+ * Encrypt data, decrypt data or sign data using a public key.
+ *
+ * _info is a string of supplementary information in key=val format. For
+ * instance, it might contain:
+ *
+ * "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the encoding and hash= selects the OID to go in that
+ * particular encoding if required. If enc= isn't supplied, it's assumed that
+ * the caller is supplying raw values.
+ *
+ * If successful, the amount of data written into the output buffer is
+ * returned.
+ */
+long keyctl_pkey_e_d_s(int op,
+ const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ const void __user *_in,
+ void __user *_out)
+{
+ struct kernel_pkey_params params;
+ void *in, *out;
+ long ret;
+
+ ret = keyctl_pkey_params_get_2(_params, _info, op, &params);
+ if (ret < 0)
+ goto error_params;
+
+ ret = -EOPNOTSUPP;
+ if (!params.key->type->asym_eds_op)
+ goto error_params;
+
+ switch (op) {
+ case KEYCTL_PKEY_ENCRYPT:
+ params.op = kernel_pkey_encrypt;
+ break;
+ case KEYCTL_PKEY_DECRYPT:
+ params.op = kernel_pkey_decrypt;
+ break;
+ case KEYCTL_PKEY_SIGN:
+ params.op = kernel_pkey_sign;
+ break;
+ default:
+ BUG();
+ }
+
+ in = memdup_user(_in, params.in_len);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto error_params;
+ }
+
+ ret = -ENOMEM;
+ out = kmalloc(params.out_len, GFP_KERNEL);
+ if (!out)
+ goto error_in;
+
+ ret = params.key->type->asym_eds_op(&params, in, out);
+ if (ret < 0)
+ goto error_out;
+
+ if (copy_to_user(_out, out, ret) != 0)
+ ret = -EFAULT;
+
+error_out:
+ kfree(out);
+error_in:
+ kfree(in);
+error_params:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
+
+/*
+ * Verify a signature.
+ *
+ * Verify a public key signature using the given key, or if not given, search
+ * for a matching key.
+ *
+ * _info is a string of supplementary information in key=val format. For
+ * instance, it might contain:
+ *
+ * "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the signature blob encoding and hash= selects the OID
+ * to go in that particular encoding. If enc= isn't supplied, it's assumed
+ * that the caller is supplying raw values.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params,
+ const char __user *_info,
+ const void __user *_in,
+ const void __user *_in2)
+{
+ struct kernel_pkey_params params;
+ void *in, *in2;
+ long ret;
+
+ ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY,
+ &params);
+ if (ret < 0)
+ goto error_params;
+
+ ret = -EOPNOTSUPP;
+ if (!params.key->type->asym_verify_signature)
+ goto error_params;
+
+ in = memdup_user(_in, params.in_len);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto error_params;
+ }
+
+ in2 = memdup_user(_in2, params.in2_len);
+ if (IS_ERR(in2)) {
+ ret = PTR_ERR(in2);
+ goto error_in;
+ }
+
+ params.op = kernel_pkey_verify;
+ ret = params.key->type->asym_verify_signature(&params, in, in2);
+
+ kfree(in2);
+error_in:
+ kfree(in);
+error_params:
+ keyctl_pkey_params_free(&params);
+ return ret;
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
new file mode 100644
index 000000000..14abfe765
--- /dev/null
+++ b/security/keys/keyring.c
@@ -0,0 +1,1794 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Keyring handling
+ *
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <linux/user_namespace.h>
+#include <linux/nsproxy.h>
+#include <keys/keyring-type.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
+#include <linux/uaccess.h>
+#include <net/net_namespace.h>
+#include "internal.h"
+
+/*
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
+ */
+#define KEYRING_SEARCH_MAX_DEPTH 6
+
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE 0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+ void *object = assoc_array_ptr_to_leaf(x);
+ return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+ return key;
+}
+
+static DEFINE_RWLOCK(keyring_name_lock);
+
+/*
+ * Clean up the bits of user_namespace that belong to us.
+ */
+void key_free_user_ns(struct user_namespace *ns)
+{
+ write_lock(&keyring_name_lock);
+ list_del_init(&ns->keyring_name_list);
+ write_unlock(&keyring_name_lock);
+
+ key_put(ns->user_keyring_register);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ key_put(ns->persistent_keyring_register);
+#endif
+}
+
+/*
+ * The keyring key type definition. Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
+ */
+static int keyring_preparse(struct key_preparsed_payload *prep);
+static void keyring_free_preparse(struct key_preparsed_payload *prep);
+static int keyring_instantiate(struct key *keyring,
+ struct key_preparsed_payload *prep);
+static void keyring_revoke(struct key *keyring);
+static void keyring_destroy(struct key *keyring);
+static void keyring_describe(const struct key *keyring, struct seq_file *m);
+static long keyring_read(const struct key *keyring,
+ char __user *buffer, size_t buflen);
+
+struct key_type key_type_keyring = {
+ .name = "keyring",
+ .def_datalen = 0,
+ .preparse = keyring_preparse,
+ .free_preparse = keyring_free_preparse,
+ .instantiate = keyring_instantiate,
+ .revoke = keyring_revoke,
+ .destroy = keyring_destroy,
+ .describe = keyring_describe,
+ .read = keyring_read,
+};
+EXPORT_SYMBOL(key_type_keyring);
+
+/*
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
+ */
+static DEFINE_MUTEX(keyring_serialise_link_lock);
+
+/*
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one and it doesn't begin with a dot).
+ */
+static void keyring_publish_name(struct key *keyring)
+{
+ struct user_namespace *ns = current_user_ns();
+
+ if (keyring->description &&
+ keyring->description[0] &&
+ keyring->description[0] != '.') {
+ write_lock(&keyring_name_lock);
+ list_add_tail(&keyring->name_link, &ns->keyring_name_list);
+ write_unlock(&keyring_name_lock);
+ }
+}
+
+/*
+ * Preparse a keyring payload
+ */
+static int keyring_preparse(struct key_preparsed_payload *prep)
+{
+ return prep->datalen != 0 ? -EINVAL : 0;
+}
+
+/*
+ * Free a preparse of a user defined key payload
+ */
+static void keyring_free_preparse(struct key_preparsed_payload *prep)
+{
+}
+
+/*
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
+ */
+static int keyring_instantiate(struct key *keyring,
+ struct key_preparsed_payload *prep)
+{
+ assoc_array_init(&keyring->keys);
+ /* make the keyring available by name if it has one */
+ keyring_publish_name(keyring);
+ return 0;
+}
+
+/*
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+ u64 hi = (u64)(u32)(x >> 32) * y;
+ u64 lo = (u64)(u32)(x) * y;
+ return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
+ */
+static void hash_key_type_and_desc(struct keyring_index_key *index_key)
+{
+ const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+ const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
+ const char *description = index_key->description;
+ unsigned long hash, type;
+ u32 piece;
+ u64 acc;
+ int n, desc_len = index_key->desc_len;
+
+ type = (unsigned long)index_key->type;
+ acc = mult_64x32_and_fold(type, desc_len + 13);
+ acc = mult_64x32_and_fold(acc, 9207);
+ piece = (unsigned long)index_key->domain_tag;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+
+ for (;;) {
+ n = desc_len;
+ if (n <= 0)
+ break;
+ if (n > 4)
+ n = 4;
+ piece = 0;
+ memcpy(&piece, description, n);
+ description += n;
+ desc_len -= n;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+ }
+
+ /* Fold the hash down to 32 bits if need be. */
+ hash = acc;
+ if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+ hash ^= acc >> 32;
+
+ /* Squidge all the keyrings into a separate part of the tree to
+ * ordinary keys by making sure the lowest level segment in the hash is
+ * zero for keyrings and non-zero otherwise.
+ */
+ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
+ hash |= (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ else if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ hash = (hash + (hash << level_shift)) & ~fan_mask;
+ index_key->hash = hash;
+}
+
+/*
+ * Finalise an index key to include a part of the description actually in the
+ * index key, to set the domain tag and to calculate the hash.
+ */
+void key_set_index_key(struct keyring_index_key *index_key)
+{
+ static struct key_tag default_domain_tag = { .usage = REFCOUNT_INIT(1), };
+ size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc));
+
+ memcpy(index_key->desc, index_key->description, n);
+
+ if (!index_key->domain_tag) {
+ if (index_key->type->flags & KEY_TYPE_NET_DOMAIN)
+ index_key->domain_tag = current->nsproxy->net_ns->key_domain;
+ else
+ index_key->domain_tag = &default_domain_tag;
+ }
+
+ hash_key_type_and_desc(index_key);
+}
+
+/**
+ * key_put_tag - Release a ref on a tag.
+ * @tag: The tag to release.
+ *
+ * This releases a reference the given tag and returns true if that ref was the
+ * last one.
+ */
+bool key_put_tag(struct key_tag *tag)
+{
+ if (refcount_dec_and_test(&tag->usage)) {
+ kfree_rcu(tag, rcu);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * key_remove_domain - Kill off a key domain and gc its keys
+ * @domain_tag: The domain tag to release.
+ *
+ * This marks a domain tag as being dead and releases a ref on it. If that
+ * wasn't the last reference, the garbage collector is poked to try and delete
+ * all keys that were in the domain.
+ */
+void key_remove_domain(struct key_tag *domain_tag)
+{
+ domain_tag->removed = true;
+ if (!key_put_tag(domain_tag))
+ key_schedule_gc_links();
+}
+
+/*
+ * Build the next index key chunk.
+ *
+ * We return it one word-sized chunk at a time.
+ */
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+ const struct keyring_index_key *index_key = data;
+ unsigned long chunk = 0;
+ const u8 *d;
+ int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+ level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ switch (level) {
+ case 0:
+ return index_key->hash;
+ case 1:
+ return index_key->x;
+ case 2:
+ return (unsigned long)index_key->type;
+ case 3:
+ return (unsigned long)index_key->domain_tag;
+ default:
+ level -= 4;
+ if (desc_len <= sizeof(index_key->desc))
+ return 0;
+
+ d = index_key->description + sizeof(index_key->desc);
+ d += level * sizeof(long);
+ desc_len -= sizeof(index_key->desc);
+ if (desc_len > n)
+ desc_len = n;
+ do {
+ chunk <<= 8;
+ chunk |= *d++;
+ } while (--desc_len > 0);
+ return chunk;
+ }
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
+{
+ const struct keyring_index_key *index_key = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ return key->index_key.type == index_key->type &&
+ key->index_key.domain_tag == index_key->domain_tag &&
+ key->index_key.desc_len == index_key->desc_len &&
+ memcmp(key->index_key.description, index_key->description,
+ index_key->desc_len) == 0;
+}
+
+/*
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
+ */
+static int keyring_diff_objects(const void *object, const void *data)
+{
+ const struct key *key_a = keyring_ptr_to_key(object);
+ const struct keyring_index_key *a = &key_a->index_key;
+ const struct keyring_index_key *b = data;
+ unsigned long seg_a, seg_b;
+ int level, i;
+
+ level = 0;
+ seg_a = a->hash;
+ seg_b = b->hash;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+
+ /* The number of bits contributed by the hash is controlled by a
+ * constant in the assoc_array headers. Everything else thereafter we
+ * can deal with as being machine word-size dependent.
+ */
+ seg_a = a->x;
+ seg_b = b->x;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += sizeof(unsigned long);
+
+ /* The next bit may not work on big endian */
+ seg_a = (unsigned long)a->type;
+ seg_b = (unsigned long)b->type;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += sizeof(unsigned long);
+
+ seg_a = (unsigned long)a->domain_tag;
+ seg_b = (unsigned long)b->domain_tag;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+ level += sizeof(unsigned long);
+
+ i = sizeof(a->desc);
+ if (a->desc_len <= i)
+ goto same;
+
+ for (; i < a->desc_len; i++) {
+ seg_a = *(unsigned char *)(a->description + i);
+ seg_b = *(unsigned char *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ }
+
+same:
+ return -1;
+
+differ_plus_i:
+ level += i;
+differ:
+ i = level * 8 + __ffs(seg_a ^ seg_b);
+ return i;
+}
+
+/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
+{
+ key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+ .get_key_chunk = keyring_get_key_chunk,
+ .get_object_key_chunk = keyring_get_object_key_chunk,
+ .compare_object = keyring_compare_object,
+ .diff_objects = keyring_diff_objects,
+ .free_object = keyring_free_object,
+};
+
+/*
+ * Clean up a keyring when it is destroyed. Unpublish its name if it had one
+ * and dispose of its data.
+ *
+ * The garbage collector detects the final key_put(), removes the keyring from
+ * the serial number tree and then does RCU synchronisation before coming here,
+ * so we shouldn't need to worry about code poking around here with the RCU
+ * readlock held by this time.
+ */
+static void keyring_destroy(struct key *keyring)
+{
+ if (keyring->description) {
+ write_lock(&keyring_name_lock);
+
+ if (keyring->name_link.next != NULL &&
+ !list_empty(&keyring->name_link))
+ list_del(&keyring->name_link);
+
+ write_unlock(&keyring_name_lock);
+ }
+
+ if (keyring->restrict_link) {
+ struct key_restriction *keyres = keyring->restrict_link;
+
+ key_put(keyres->key);
+ kfree(keyres);
+ }
+
+ assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
+}
+
+/*
+ * Describe a keyring for /proc.
+ */
+static void keyring_describe(const struct key *keyring, struct seq_file *m)
+{
+ if (keyring->description)
+ seq_puts(m, keyring->description);
+ else
+ seq_puts(m, "[anon]");
+
+ if (key_is_positive(keyring)) {
+ if (keyring->keys.nr_leaves_on_tree != 0)
+ seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
+ else
+ seq_puts(m, ": empty");
+ }
+}
+
+struct keyring_read_iterator_context {
+ size_t buflen;
+ size_t count;
+ key_serial_t __user *buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+ struct keyring_read_iterator_context *ctx = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%s,%d},,{%zu/%zu}",
+ key->type->name, key->serial, ctx->count, ctx->buflen);
+
+ if (ctx->count >= ctx->buflen)
+ return 1;
+
+ *ctx->buffer++ = key->serial;
+ ctx->count += sizeof(key->serial);
+ return 0;
+}
+
+/*
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller. This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
+ */
+static long keyring_read(const struct key *keyring,
+ char __user *buffer, size_t buflen)
+{
+ struct keyring_read_iterator_context ctx;
+ long ret;
+
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
+
+ /* Copy as many key IDs as fit into the buffer */
+ if (buffer && buflen) {
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.buflen = buflen;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys,
+ keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %ld [iterate]", ret);
+ return ret;
+ }
+ }
+
+ /* Return the size of the buffer needed */
+ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
+ if (ret <= buflen)
+ kleave("= %ld [ok]", ret);
+ else
+ kleave("= %ld [buffer too small]", ret);
+ return ret;
+}
+
+/*
+ * Allocate a keyring and link into the destination keyring.
+ */
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
+ const struct cred *cred, key_perm_t perm,
+ unsigned long flags,
+ struct key_restriction *restrict_link,
+ struct key *dest)
+{
+ struct key *keyring;
+ int ret;
+
+ keyring = key_alloc(&key_type_keyring, description,
+ uid, gid, cred, perm, flags, restrict_link);
+ if (!IS_ERR(keyring)) {
+ ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
+ if (ret < 0) {
+ key_put(keyring);
+ keyring = ERR_PTR(ret);
+ }
+ }
+
+ return keyring;
+}
+EXPORT_SYMBOL(keyring_alloc);
+
+/**
+ * restrict_link_reject - Give -EPERM to restrict link
+ * @keyring: The keyring being added to.
+ * @type: The type of key being added.
+ * @payload: The payload of the key intended to be added.
+ * @restriction_key: Keys providing additional data for evaluating restriction.
+ *
+ * Reject the addition of any links to a keyring. It can be overridden by
+ * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when
+ * adding a key to a keyring.
+ *
+ * This is meant to be stored in a key_restriction structure which is passed
+ * in the restrict_link parameter to keyring_alloc().
+ */
+int restrict_link_reject(struct key *keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key)
+{
+ return -EPERM;
+}
+
+/*
+ * By default, we keys found by getting an exact match on their descriptions.
+ */
+bool key_default_cmp(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ return strcmp(key->description, match_data->raw_data) == 0;
+}
+
+/*
+ * Iteration function to consider each key found.
+ */
+static int keyring_search_iterator(const void *object, void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+ unsigned long kflags = READ_ONCE(key->flags);
+ short state = READ_ONCE(key->state);
+
+ kenter("{%d}", key->serial);
+
+ /* ignore keys not of this type */
+ if (key->type != ctx->index_key.type) {
+ kleave(" = 0 [!type]");
+ return 0;
+ }
+
+ /* skip invalidated, revoked and expired keys */
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ time64_t expiry = READ_ONCE(key->expiry);
+
+ if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ ctx->result = ERR_PTR(-EKEYREVOKED);
+ kleave(" = %d [invrev]", ctx->skipped_ret);
+ goto skipped;
+ }
+
+ if (expiry && ctx->now >= expiry) {
+ if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
+ kleave(" = %d [expire]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
+
+ /* keys that don't match */
+ if (!ctx->match_data.cmp(key, &ctx->match_data)) {
+ kleave(" = 0 [!match]");
+ return 0;
+ }
+
+ /* key must have search permissions */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0) {
+ ctx->result = ERR_PTR(-EACCES);
+ kleave(" = %d [!perm]", ctx->skipped_ret);
+ goto skipped;
+ }
+
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ /* we set a different error code if we pass a negative key */
+ if (state < 0) {
+ ctx->result = ERR_PTR(state);
+ kleave(" = %d [neg]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
+
+ /* Found */
+ ctx->result = make_key_ref(key, ctx->possessed);
+ kleave(" = 1 [found]");
+ return 1;
+
+skipped:
+ return ctx->skipped_ret;
+}
+
+/*
+ * Search inside a keyring for a key. We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) {
+ const void *object;
+
+ object = assoc_array_find(&keyring->keys,
+ &keyring_assoc_array_ops,
+ &ctx->index_key);
+ return object ? ctx->iterator(object, ctx) : 0;
+ }
+ return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
+
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+ struct keyring_search_context *ctx)
+{
+ struct {
+ struct key *keyring;
+ struct assoc_array_node *node;
+ int slot;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
+
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ struct key *key;
+ int sp = 0, slot;
+
+ kenter("{%d},{%s,%s}",
+ keyring->serial,
+ ctx->index_key.type->name,
+ ctx->index_key.description);
+
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+ BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+ (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
+ if (ctx->index_key.description)
+ key_set_index_key(&ctx->index_key);
+
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
+ keyring_compare_object(keyring, &ctx->index_key)) {
+ ctx->skipped_ret = 2;
+ switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+ case 1:
+ goto found;
+ case 2:
+ return false;
+ default:
+ break;
+ }
+ }
+
+ ctx->skipped_ret = 0;
+
+ /* Start processing a new keyring */
+descend_to_keyring:
+ kdebug("descend to %d", keyring->serial);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto not_this_keyring;
+
+ /* Search through the keys in this keyring before its searching its
+ * subtrees.
+ */
+ if (search_keyring(keyring, ctx))
+ goto found;
+
+ /* Then manually iterate through the keyrings nested in this one.
+ *
+ * Start from the root node of the index tree. Because of the way the
+ * hash function has been set up, keyrings cluster on the leftmost
+ * branch of the root node (root slot 0) or in the root node itself.
+ * Non-keyrings avoid the leftmost branch of the root entirely (root
+ * slots 1-15).
+ */
+ if (!(ctx->flags & KEYRING_SEARCH_RECURSE))
+ goto not_this_keyring;
+
+ ptr = READ_ONCE(keyring->keys.root);
+ if (!ptr)
+ goto not_this_keyring;
+
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ /* If the root is a shortcut, either the keyring only contains
+ * keyring pointers (everything clusters behind root slot 0) or
+ * doesn't contain any keyring pointers.
+ */
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+ goto not_this_keyring;
+
+ ptr = READ_ONCE(shortcut->next_node);
+ node = assoc_array_ptr_to_node(ptr);
+ goto begin_node;
+ }
+
+ node = assoc_array_ptr_to_node(ptr);
+ ptr = node->slots[0];
+ if (!assoc_array_ptr_is_meta(ptr))
+ goto begin_node;
+
+descend_to_node:
+ /* Descend to a more distal node in this keyring's content tree and go
+ * through that.
+ */
+ kdebug("descend");
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = READ_ONCE(shortcut->next_node);
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ }
+ node = assoc_array_ptr_to_node(ptr);
+
+begin_node:
+ kdebug("begin_node");
+ slot = 0;
+ascend_to_node:
+ /* Go through the slots in a node */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = READ_ONCE(node->slots[slot]);
+
+ if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+ goto descend_to_node;
+
+ if (!keyring_ptr_is_keyring(ptr))
+ continue;
+
+ key = keyring_ptr_to_key(ptr);
+
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+ if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+ ctx->result = ERR_PTR(-ELOOP);
+ return false;
+ }
+ goto not_this_keyring;
+ }
+
+ /* Search a nested keyring */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0)
+ continue;
+
+ /* stack the current position */
+ stack[sp].keyring = keyring;
+ stack[sp].node = node;
+ stack[sp].slot = slot;
+ sp++;
+
+ /* begin again with the new keyring */
+ keyring = key;
+ goto descend_to_keyring;
+ }
+
+ /* We've dealt with all the slots in the current node, so now we need
+ * to ascend to the parent and continue processing there.
+ */
+ ptr = READ_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+
+ if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ ptr = READ_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ }
+ if (!ptr)
+ goto not_this_keyring;
+ node = assoc_array_ptr_to_node(ptr);
+ slot++;
+
+ /* If we've ascended to the root (zero backpointer), we must have just
+ * finished processing the leftmost branch rather than the root slots -
+ * so there can't be any more keyrings for us to find.
+ */
+ if (node->back_pointer) {
+ kdebug("ascend %d", slot);
+ goto ascend_to_node;
+ }
+
+ /* The keyring we're looking at was disqualified or didn't contain a
+ * matching key.
+ */
+not_this_keyring:
+ kdebug("not_this_keyring %d", sp);
+ if (sp <= 0) {
+ kleave(" = false");
+ return false;
+ }
+
+ /* Resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ node = stack[sp].node;
+ slot = stack[sp].slot + 1;
+ kdebug("ascend to %d [%d]", keyring->serial, slot);
+ goto ascend_to_node;
+
+ /* We found a viable match */
+found:
+ key = key_ref_to_ptr(ctx->result);
+ key_check(key);
+ if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+ key->last_used_at = ctx->now;
+ keyring->last_used_at = ctx->now;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = ctx->now;
+ }
+ kleave(" = true");
+ return true;
+}
+
+/**
+ * keyring_search_rcu - Search a keyring tree for a matching key under RCU
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to
+ * prevent keyrings from being destroyed or rearranged whilst they are being
+ * searched.
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_rcu(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx)
+{
+ struct key *keyring;
+ long err;
+
+ ctx->iterator = keyring_search_iterator;
+ ctx->possessed = is_key_possessed(keyring_ref);
+ ctx->result = ERR_PTR(-EAGAIN);
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return ERR_PTR(-ENOTDIR);
+
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+ err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ ctx->now = ktime_get_real_seconds();
+ if (search_nested_keyrings(keyring, ctx))
+ __key_get(key_ref_to_ptr(ctx->result));
+ return ctx->result;
+}
+
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ * @recurse: True to search the children of @keyring also
+ *
+ * As keyring_search_rcu() above, but using the current task's credentials and
+ * type's default matching function and preferred search method.
+ */
+key_ref_t keyring_search(key_ref_t keyring,
+ struct key_type *type,
+ const char *description,
+ bool recurse)
+{
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ };
+ key_ref_t key;
+ int ret;
+
+ if (recurse)
+ ctx.flags |= KEYRING_SEARCH_RECURSE;
+ if (type->match_preparse) {
+ ret = type->match_preparse(&ctx.match_data);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ }
+
+ rcu_read_lock();
+ key = keyring_search_rcu(keyring, &ctx);
+ rcu_read_unlock();
+
+ if (type->match_free)
+ type->match_free(&ctx.match_data);
+ return key;
+}
+EXPORT_SYMBOL(keyring_search);
+
+static struct key_restriction *keyring_restriction_alloc(
+ key_restrict_link_func_t check)
+{
+ struct key_restriction *keyres =
+ kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+
+ if (!keyres)
+ return ERR_PTR(-ENOMEM);
+
+ keyres->check = check;
+
+ return keyres;
+}
+
+/*
+ * Semaphore to serialise restriction setup to prevent reference count
+ * cycles through restriction key pointers.
+ */
+static DECLARE_RWSEM(keyring_serialise_restrict_sem);
+
+/*
+ * Check for restriction cycles that would prevent keyring garbage collection.
+ * keyring_serialise_restrict_sem must be held.
+ */
+static bool keyring_detect_restriction_cycle(const struct key *dest_keyring,
+ struct key_restriction *keyres)
+{
+ while (keyres && keyres->key &&
+ keyres->key->type == &key_type_keyring) {
+ if (keyres->key == dest_keyring)
+ return true;
+
+ keyres = keyres->key->restrict_link;
+ }
+
+ return false;
+}
+
+/**
+ * keyring_restrict - Look up and apply a restriction to a keyring
+ * @keyring_ref: The keyring to be restricted
+ * @type: The key type that will provide the restriction checker.
+ * @restriction: The restriction options to apply to the keyring
+ *
+ * Look up a keyring and apply a restriction to it. The restriction is managed
+ * by the specific key type, but can be configured by the options specified in
+ * the restriction string.
+ */
+int keyring_restrict(key_ref_t keyring_ref, const char *type,
+ const char *restriction)
+{
+ struct key *keyring;
+ struct key_type *restrict_type = NULL;
+ struct key_restriction *restrict_link;
+ int ret = 0;
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ if (!type) {
+ restrict_link = keyring_restriction_alloc(restrict_link_reject);
+ } else {
+ restrict_type = key_type_lookup(type);
+
+ if (IS_ERR(restrict_type))
+ return PTR_ERR(restrict_type);
+
+ if (!restrict_type->lookup_restriction) {
+ ret = -ENOENT;
+ goto error;
+ }
+
+ restrict_link = restrict_type->lookup_restriction(restriction);
+ }
+
+ if (IS_ERR(restrict_link)) {
+ ret = PTR_ERR(restrict_link);
+ goto error;
+ }
+
+ down_write(&keyring->sem);
+ down_write(&keyring_serialise_restrict_sem);
+
+ if (keyring->restrict_link) {
+ ret = -EEXIST;
+ } else if (keyring_detect_restriction_cycle(keyring, restrict_link)) {
+ ret = -EDEADLK;
+ } else {
+ keyring->restrict_link = restrict_link;
+ notify_key(keyring, NOTIFY_KEY_SETATTR, 0);
+ }
+
+ up_write(&keyring_serialise_restrict_sem);
+ up_write(&keyring->sem);
+
+ if (ret < 0) {
+ key_put(restrict_link->key);
+ kfree(restrict_link);
+ }
+
+error:
+ if (restrict_type)
+ key_type_put(restrict_type);
+
+ return ret;
+}
+EXPORT_SYMBOL(keyring_restrict);
+
+/*
+ * Search the given keyring for a key that might be updated.
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to modify the keyring as no check is made here. The
+ * caller must also hold a lock on the keyring semaphore.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns NULL if not found. Revoked and invalidated keys are
+ * skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
+ */
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key)
+{
+ struct key *keyring, *key;
+ const void *object;
+
+ keyring = key_ref_to_ptr(keyring_ref);
+
+ kenter("{%d},{%s,%s}",
+ keyring->serial, index_key->type->name, index_key->description);
+
+ object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+ index_key);
+
+ if (object)
+ goto found;
+
+ kleave(" = NULL");
+ return NULL;
+
+found:
+ key = keyring_ptr_to_key(object);
+ if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ kleave(" = NULL [x]");
+ return NULL;
+ }
+ __key_get(key);
+ kleave(" = {%d}", key->serial);
+ return make_key_ref(key, is_key_possessed(keyring_ref));
+}
+
+/*
+ * Find a keyring with the specified name.
+ *
+ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
+ * user in the current user namespace are considered. If @uid_keyring is %true,
+ * the keyring additionally must have been allocated as a user or user session
+ * keyring; otherwise, it must grant Search permission directly to the caller.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success. -ENOKEY is returned if a key could not be found.
+ */
+struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+{
+ struct user_namespace *ns = current_user_ns();
+ struct key *keyring;
+
+ if (!name)
+ return ERR_PTR(-EINVAL);
+
+ read_lock(&keyring_name_lock);
+
+ /* Search this hash bucket for a keyring with a matching name that
+ * grants Search permission and that hasn't been revoked
+ */
+ list_for_each_entry(keyring, &ns->keyring_name_list, name_link) {
+ if (!kuid_has_mapping(ns, keyring->user->uid))
+ continue;
+
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ continue;
+
+ if (strcmp(keyring->description, name) != 0)
+ continue;
+
+ if (uid_keyring) {
+ if (!test_bit(KEY_FLAG_UID_KEYRING,
+ &keyring->flags))
+ continue;
+ } else {
+ if (key_permission(make_key_ref(keyring, 0),
+ KEY_NEED_SEARCH) < 0)
+ continue;
+ }
+
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+ * (ie. it has a zero usage count) */
+ if (!refcount_inc_not_zero(&keyring->usage))
+ continue;
+ keyring->last_used_at = ktime_get_real_seconds();
+ goto out;
+ }
+
+ keyring = ERR_PTR(-ENOKEY);
+out:
+ read_unlock(&keyring_name_lock);
+ return keyring;
+}
+
+static int keyring_detect_cycle_iterator(const void *object,
+ void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%d}", key->serial);
+
+ /* We might get a keyring with matching index-key that is nonetheless a
+ * different keyring. */
+ if (key != ctx->match_data.raw_data)
+ return 0;
+
+ ctx->result = ERR_PTR(-EDEADLK);
+ return 1;
+}
+
+/*
+ * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
+ */
+static int keyring_detect_cycle(struct key *A, struct key *B)
+{
+ struct keyring_search_context ctx = {
+ .index_key = A->index_key,
+ .match_data.raw_data = A,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP |
+ KEYRING_SEARCH_RECURSE),
+ };
+
+ rcu_read_lock();
+ search_nested_keyrings(B, &ctx);
+ rcu_read_unlock();
+ return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
+}
+
+/*
+ * Lock keyring for link.
+ */
+int __key_link_lock(struct key *keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
+ * Lock keyrings for move (link/unlink combination).
+ */
+int __key_move_lock(struct key *l_keyring, struct key *u_keyring,
+ const struct keyring_index_key *index_key)
+ __acquires(&l_keyring->sem)
+ __acquires(&u_keyring->sem)
+ __acquires(&keyring_serialise_link_lock)
+{
+ if (l_keyring->type != &key_type_keyring ||
+ u_keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ /* We have to be very careful here to take the keyring locks in the
+ * right order, lest we open ourselves to deadlocking against another
+ * move operation.
+ */
+ if (l_keyring < u_keyring) {
+ down_write(&l_keyring->sem);
+ down_write_nested(&u_keyring->sem, 1);
+ } else {
+ down_write(&u_keyring->sem);
+ down_write_nested(&l_keyring->sem, 1);
+ }
+
+ /* Serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders.
+ */
+ if (index_key->type == &key_type_keyring)
+ mutex_lock(&keyring_serialise_link_lock);
+
+ return 0;
+}
+
+/*
+ * Preallocate memory so that a key can be linked into to a keyring.
+ */
+int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit)
+{
+ struct assoc_array_edit *edit;
+ int ret;
+
+ kenter("%d,%s,%s,",
+ keyring->serial, index_key->type->name, index_key->description);
+
+ BUG_ON(index_key->desc_len == 0);
+ BUG_ON(*_edit != NULL);
+
+ *_edit = NULL;
+
+ ret = -EKEYREVOKED;
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ goto error;
+
+ /* Create an edit script that will insert/replace the key in the
+ * keyring tree.
+ */
+ edit = assoc_array_insert(&keyring->keys,
+ &keyring_assoc_array_ops,
+ index_key,
+ NULL);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error;
+ }
+
+ /* If we're not replacing a link in-place then we're going to need some
+ * extra quota.
+ */
+ if (!edit->dead_leaf) {
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error_cancel;
+ }
+
+ *_edit = edit;
+ kleave(" = 0");
+ return 0;
+
+error_cancel:
+ assoc_array_cancel_edit(edit);
+error:
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
+ */
+int __key_link_check_live_key(struct key *keyring, struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ /* check that we aren't going to create a cycle by linking one
+ * keyring to another */
+ return keyring_detect_cycle(keyring, key);
+ return 0;
+}
+
+/*
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called. Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
+ */
+void __key_link(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ __key_get(key);
+ assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
+ notify_key(keyring, NOTIFY_KEY_LINKED, key_serial(key));
+}
+
+/*
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
+ */
+void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+ __releases(&keyring_serialise_link_lock)
+{
+ BUG_ON(index_key->type == NULL);
+ kenter("%d,%s,", keyring->serial, index_key->type->name);
+
+ if (edit) {
+ if (!edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ }
+ assoc_array_cancel_edit(edit);
+ }
+ up_write(&keyring->sem);
+
+ if (index_key->type == &key_type_keyring)
+ mutex_unlock(&keyring_serialise_link_lock);
+}
+
+/*
+ * Check addition of keys to restricted keyrings.
+ */
+static int __key_link_check_restriction(struct key *keyring, struct key *key)
+{
+ if (!keyring->restrict_link || !keyring->restrict_link->check)
+ return 0;
+ return keyring->restrict_link->check(keyring, key->type, &key->payload,
+ keyring->restrict_link->key);
+}
+
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_link(struct key *keyring, struct key *key)
+{
+ struct assoc_array_edit *edit = NULL;
+ int ret;
+
+ kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage));
+
+ key_check(keyring);
+ key_check(key);
+
+ ret = __key_link_lock(keyring, &key->index_key);
+ if (ret < 0)
+ goto error;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_end;
+
+ kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage));
+ ret = __key_link_check_restriction(keyring, key);
+ if (ret == 0)
+ ret = __key_link_check_live_key(keyring, key);
+ if (ret == 0)
+ __key_link(keyring, key, &edit);
+
+error_end:
+ __key_link_end(keyring, &key->index_key, edit);
+error:
+ kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage));
+ return ret;
+}
+EXPORT_SYMBOL(key_link);
+
+/*
+ * Lock a keyring for unlink.
+ */
+static int __key_unlink_lock(struct key *keyring)
+ __acquires(&keyring->sem)
+{
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+ return 0;
+}
+
+/*
+ * Begin the process of unlinking a key from a keyring.
+ */
+static int __key_unlink_begin(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ struct assoc_array_edit *edit;
+
+ BUG_ON(*_edit != NULL);
+
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit))
+ return PTR_ERR(edit);
+
+ if (!edit)
+ return -ENOENT;
+
+ *_edit = edit;
+ return 0;
+}
+
+/*
+ * Apply an unlink change.
+ */
+static void __key_unlink(struct key *keyring, struct key *key,
+ struct assoc_array_edit **_edit)
+{
+ assoc_array_apply_edit(*_edit);
+ notify_key(keyring, NOTIFY_KEY_UNLINKED, key_serial(key));
+ *_edit = NULL;
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
+}
+
+/*
+ * Finish unlinking a key from to a keyring.
+ */
+static void __key_unlink_end(struct key *keyring,
+ struct key *key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+{
+ if (edit)
+ assoc_array_cancel_edit(edit);
+ up_write(&keyring->sem);
+}
+
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
+ */
+int key_unlink(struct key *keyring, struct key *key)
+{
+ struct assoc_array_edit *edit = NULL;
+ int ret;
+
+ key_check(keyring);
+ key_check(key);
+
+ ret = __key_unlink_lock(keyring);
+ if (ret < 0)
+ return ret;
+
+ ret = __key_unlink_begin(keyring, key, &edit);
+ if (ret == 0)
+ __key_unlink(keyring, key, &edit);
+ __key_unlink_end(keyring, key, edit);
+ return ret;
+}
+EXPORT_SYMBOL(key_unlink);
+
+/**
+ * key_move - Move a key from one keyring to another
+ * @key: The key to move
+ * @from_keyring: The keyring to remove the link from.
+ * @to_keyring: The keyring to make the link in.
+ * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL.
+ *
+ * Make a link in @to_keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring
+ * whilst simultaneously removing a link to the key from @from_keyring.
+ *
+ * This function will write-lock both keyring's semaphores and will consume
+ * some of the user's key data quota to hold the link on @to_keyring.
+ *
+ * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring,
+ * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second
+ * keyring is full, -EDQUOT if there is insufficient key data quota remaining
+ * to add another link or -ENOMEM if there's insufficient memory. If
+ * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a
+ * matching key in @to_keyring.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
+ */
+int key_move(struct key *key,
+ struct key *from_keyring,
+ struct key *to_keyring,
+ unsigned int flags)
+{
+ struct assoc_array_edit *from_edit = NULL, *to_edit = NULL;
+ int ret;
+
+ kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial);
+
+ if (from_keyring == to_keyring)
+ return 0;
+
+ key_check(key);
+ key_check(from_keyring);
+ key_check(to_keyring);
+
+ ret = __key_move_lock(from_keyring, to_keyring, &key->index_key);
+ if (ret < 0)
+ goto out;
+ ret = __key_unlink_begin(from_keyring, key, &from_edit);
+ if (ret < 0)
+ goto error;
+ ret = __key_link_begin(to_keyring, &key->index_key, &to_edit);
+ if (ret < 0)
+ goto error;
+
+ ret = -EEXIST;
+ if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL))
+ goto error;
+
+ ret = __key_link_check_restriction(to_keyring, key);
+ if (ret < 0)
+ goto error;
+ ret = __key_link_check_live_key(to_keyring, key);
+ if (ret < 0)
+ goto error;
+
+ __key_unlink(from_keyring, key, &from_edit);
+ __key_link(to_keyring, key, &to_edit);
+error:
+ __key_link_end(to_keyring, &key->index_key, to_edit);
+ __key_unlink_end(from_keyring, key, from_edit);
+out:
+ kleave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(key_move);
+
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
+ */
+int keyring_clear(struct key *keyring)
+{
+ struct assoc_array_edit *edit;
+ int ret;
+
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
+
+ down_write(&keyring->sem);
+
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ } else {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ notify_key(keyring, NOTIFY_KEY_CLEARED, 0);
+ key_payload_reserve(keyring, 0);
+ ret = 0;
+ }
+
+ up_write(&keyring->sem);
+ return ret;
+}
+EXPORT_SYMBOL(keyring_clear);
+
+/*
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
+ */
+static void keyring_revoke(struct key *keyring)
+{
+ struct assoc_array_edit *edit;
+
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (!IS_ERR(edit)) {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
+ }
+}
+
+static bool keyring_gc_select_iterator(void *object, void *iterator_data)
+{
+ struct key *key = keyring_ptr_to_key(object);
+ time64_t *limit = iterator_data;
+
+ if (key_is_dead(key, *limit))
+ return false;
+ key_get(key);
+ return true;
+}
+
+static int keyring_gc_check_iterator(const void *object, void *iterator_data)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ time64_t *limit = iterator_data;
+
+ key_check(key);
+ return key_is_dead(key, *limit);
+}
+
+/*
+ * Garbage collect pointers from a keyring.
+ *
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
+ */
+void keyring_gc(struct key *keyring, time64_t limit)
+{
+ int result;
+
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
+
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto dont_gc;
+
+ /* scan the keyring looking for dead keys */
+ rcu_read_lock();
+ result = assoc_array_iterate(&keyring->keys,
+ keyring_gc_check_iterator, &limit);
+ rcu_read_unlock();
+ if (result == true)
+ goto do_gc;
+
+dont_gc:
+ kleave(" [no gc]");
+ return;
+
+do_gc:
+ down_write(&keyring->sem);
+ assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+ keyring_gc_select_iterator, &limit);
+ up_write(&keyring->sem);
+ kleave(" [gc]");
+}
+
+/*
+ * Garbage collect restriction pointers from a keyring.
+ *
+ * Keyring restrictions are associated with a key type, and must be cleaned
+ * up if the key type is unregistered. The restriction is altered to always
+ * reject additional keys so a keyring cannot be opened up by unregistering
+ * a key type.
+ *
+ * Not called with any keyring locks held. The keyring's key struct will not
+ * be deallocated under us as only our caller may deallocate it.
+ *
+ * The caller is required to hold key_types_sem and dead_type->sem. This is
+ * fulfilled by key_gc_keytype() holding the locks on behalf of
+ * key_garbage_collector(), which it invokes on a workqueue.
+ */
+void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type)
+{
+ struct key_restriction *keyres;
+
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
+
+ /*
+ * keyring->restrict_link is only assigned at key allocation time
+ * or with the key type locked, so the only values that could be
+ * concurrently assigned to keyring->restrict_link are for key
+ * types other than dead_type. Given this, it's ok to check
+ * the key type before acquiring keyring->sem.
+ */
+ if (!dead_type || !keyring->restrict_link ||
+ keyring->restrict_link->keytype != dead_type) {
+ kleave(" [no restriction gc]");
+ return;
+ }
+
+ /* Lock the keyring to ensure that a link is not in progress */
+ down_write(&keyring->sem);
+
+ keyres = keyring->restrict_link;
+
+ keyres->check = restrict_link_reject;
+
+ key_put(keyres->key);
+ keyres->key = NULL;
+ keyres->keytype = NULL;
+
+ up_write(&keyring->sem);
+
+ kleave(" [restriction gc]");
+}
diff --git a/security/keys/permission.c b/security/keys/permission.c
new file mode 100644
index 000000000..4a61f804e
--- /dev/null
+++ b/security/keys/permission.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Key permission checking
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/security.h>
+#include "internal.h"
+
+/**
+ * key_task_permission - Check a key can be used
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @need_perm: The permission required.
+ *
+ * Check to see whether permission is granted to use a key in the desired way,
+ * but permit the security modules to override.
+ *
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
+ */
+int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
+ enum key_need_perm need_perm)
+{
+ struct key *key;
+ key_perm_t kperm, mask;
+ int ret;
+
+ switch (need_perm) {
+ default:
+ WARN_ON(1);
+ return -EACCES;
+ case KEY_NEED_UNLINK:
+ case KEY_SYSADMIN_OVERRIDE:
+ case KEY_AUTHTOKEN_OVERRIDE:
+ case KEY_DEFER_PERM_CHECK:
+ goto lsm;
+
+ case KEY_NEED_VIEW: mask = KEY_OTH_VIEW; break;
+ case KEY_NEED_READ: mask = KEY_OTH_READ; break;
+ case KEY_NEED_WRITE: mask = KEY_OTH_WRITE; break;
+ case KEY_NEED_SEARCH: mask = KEY_OTH_SEARCH; break;
+ case KEY_NEED_LINK: mask = KEY_OTH_LINK; break;
+ case KEY_NEED_SETATTR: mask = KEY_OTH_SETATTR; break;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+
+ /* use the second 8-bits of permissions for keys the caller owns */
+ if (uid_eq(key->uid, cred->fsuid)) {
+ kperm = key->perm >> 16;
+ goto use_these_perms;
+ }
+
+ /* use the third 8-bits of permissions for keys the caller has a group
+ * membership in common with */
+ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+ if (gid_eq(key->gid, cred->fsgid)) {
+ kperm = key->perm >> 8;
+ goto use_these_perms;
+ }
+
+ ret = groups_search(cred->group_info, key->gid);
+ if (ret) {
+ kperm = key->perm >> 8;
+ goto use_these_perms;
+ }
+ }
+
+ /* otherwise use the least-significant 8-bits */
+ kperm = key->perm;
+
+use_these_perms:
+
+ /* use the top 8-bits of permissions for keys the caller possesses
+ * - possessor permissions are additive with other permissions
+ */
+ if (is_key_possessed(key_ref))
+ kperm |= key->perm >> 24;
+
+ if ((kperm & mask) != mask)
+ return -EACCES;
+
+ /* let LSM be the final arbiter */
+lsm:
+ return security_key_permission(key_ref, cred, need_perm);
+}
+EXPORT_SYMBOL(key_task_permission);
+
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the
+ * key is invalidated, -EKEYREVOKED if the key's type has been removed or if
+ * the key has been revoked or -EKEYEXPIRED if the key has expired.
+ */
+int key_validate(const struct key *key)
+{
+ unsigned long flags = READ_ONCE(key->flags);
+ time64_t expiry = READ_ONCE(key->expiry);
+
+ if (flags & (1 << KEY_FLAG_INVALIDATED))
+ return -ENOKEY;
+
+ /* check it's still accessible */
+ if (flags & ((1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD)))
+ return -EKEYREVOKED;
+
+ /* check it hasn't expired */
+ if (expiry) {
+ if (ktime_get_real_seconds() >= expiry)
+ return -EKEYEXPIRED;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 000000000..97af230aa
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/user_namespace.h>
+#include <linux/cred.h>
+
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+ struct key *reg = keyring_alloc(".persistent_register",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ns->persistent_keyring_register = reg;
+ return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+ struct keyring_index_key *index_key)
+{
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+
+ if (!ns->persistent_keyring_register) {
+ long err = key_create_persistent_register(ns);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ persistent_ref = find_key_to_update(reg_ref, index_key);
+ if (persistent_ref)
+ return persistent_ref;
+ }
+
+ persistent = keyring_alloc(index_key->description,
+ uid, INVALID_GID, current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL,
+ ns->persistent_keyring_register);
+ if (IS_ERR(persistent))
+ return ERR_CAST(persistent);
+
+ return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+ key_ref_t dest_ref)
+{
+ struct keyring_index_key index_key;
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+ char buf[32];
+ long ret;
+
+ /* Look in the register if it exists */
+ memset(&index_key, 0, sizeof(index_key));
+ index_key.type = &key_type_keyring;
+ index_key.description = buf;
+ index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+ key_set_index_key(&index_key);
+
+ if (ns->persistent_keyring_register) {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ down_read(&ns->keyring_sem);
+ persistent_ref = find_key_to_update(reg_ref, &index_key);
+ up_read(&ns->keyring_sem);
+
+ if (persistent_ref)
+ goto found;
+ }
+
+ /* It wasn't in the register, so we'll need to create it. We might
+ * also need to create the register.
+ */
+ down_write(&ns->keyring_sem);
+ persistent_ref = key_create_persistent(ns, uid, &index_key);
+ up_write(&ns->keyring_sem);
+ if (!IS_ERR(persistent_ref))
+ goto found;
+
+ return PTR_ERR(persistent_ref);
+
+found:
+ ret = key_task_permission(persistent_ref, current_cred(), KEY_NEED_LINK);
+ if (ret == 0) {
+ persistent = key_ref_to_ptr(persistent_ref);
+ ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+ if (ret == 0) {
+ key_set_timeout(persistent, persistent_keyring_expiry);
+ ret = persistent->serial;
+ }
+ }
+
+ key_ref_put(persistent_ref);
+ return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+ struct user_namespace *ns = current_user_ns();
+ key_ref_t dest_ref;
+ kuid_t uid;
+ long ret;
+
+ /* -1 indicates the current user */
+ if (_uid == (uid_t)-1) {
+ uid = current_uid();
+ } else {
+ uid = make_kuid(ns, _uid);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ /* You can only see your own persistent cache if you're not
+ * sufficiently privileged.
+ */
+ if (!uid_eq(uid, current_uid()) &&
+ !uid_eq(uid, current_euid()) &&
+ !ns_capable(ns, CAP_SETUID))
+ return -EPERM;
+ }
+
+ /* There must be a destination keyring */
+ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref))
+ return PTR_ERR(dest_ref);
+ if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+ ret = -ENOTDIR;
+ goto out_put_dest;
+ }
+
+ ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+ key_ref_put(dest_ref);
+ return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
new file mode 100644
index 000000000..4f4e2c182
--- /dev/null
+++ b/security/keys/proc.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* procfs files for key database enumeration
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/errno.h>
+#include "internal.h"
+
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos);
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_keys_stop(struct seq_file *p, void *v);
+static int proc_keys_show(struct seq_file *m, void *v);
+
+static const struct seq_operations proc_keys_ops = {
+ .start = proc_keys_start,
+ .next = proc_keys_next,
+ .stop = proc_keys_stop,
+ .show = proc_keys_show,
+};
+
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos);
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos);
+static void proc_key_users_stop(struct seq_file *p, void *v);
+static int proc_key_users_show(struct seq_file *m, void *v);
+
+static const struct seq_operations proc_key_users_ops = {
+ .start = proc_key_users_start,
+ .next = proc_key_users_next,
+ .stop = proc_key_users_stop,
+ .show = proc_key_users_show,
+};
+
+/*
+ * Declare the /proc files.
+ */
+static int __init key_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ p = proc_create_seq("keys", 0, NULL, &proc_keys_ops);
+ if (!p)
+ panic("Cannot create /proc/keys\n");
+
+ p = proc_create_seq("key-users", 0, NULL, &proc_key_users_ops);
+ if (!p)
+ panic("Cannot create /proc/key-users\n");
+
+ return 0;
+}
+
+__initcall(key_proc_init);
+
+/*
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
+ */
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
+{
+ struct user_namespace *user_ns = seq_user_ns(p);
+
+ n = rb_next(n);
+ while (n) {
+ struct key *key = rb_entry(n, struct key, serial_node);
+ if (kuid_has_mapping(user_ns, key->user->uid))
+ break;
+ n = rb_next(n);
+ }
+ return n;
+}
+
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
+{
+ struct user_namespace *user_ns = seq_user_ns(p);
+ struct rb_node *n = key_serial_tree.rb_node;
+ struct key *minkey = NULL;
+
+ while (n) {
+ struct key *key = rb_entry(n, struct key, serial_node);
+ if (id < key->serial) {
+ if (!minkey || minkey->serial > key->serial)
+ minkey = key;
+ n = n->rb_left;
+ } else if (id > key->serial) {
+ n = n->rb_right;
+ } else {
+ minkey = key;
+ break;
+ }
+ key = NULL;
+ }
+
+ if (!minkey)
+ return NULL;
+
+ for (;;) {
+ if (kuid_has_mapping(user_ns, minkey->user->uid))
+ return minkey;
+ n = rb_next(&minkey->serial_node);
+ if (!n)
+ return NULL;
+ minkey = rb_entry(n, struct key, serial_node);
+ }
+}
+
+static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_serial_lock)
+{
+ key_serial_t pos = *_pos;
+ struct key *key;
+
+ spin_lock(&key_serial_lock);
+
+ if (*_pos > INT_MAX)
+ return NULL;
+ key = find_ge_key(p, pos);
+ if (!key)
+ return NULL;
+ *_pos = key->serial;
+ return &key->serial_node;
+}
+
+static inline key_serial_t key_node_serial(struct rb_node *n)
+{
+ struct key *key = rb_entry(n, struct key, serial_node);
+ return key->serial;
+}
+
+static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ struct rb_node *n;
+
+ n = key_serial_next(p, v);
+ if (n)
+ *_pos = key_node_serial(n);
+ else
+ (*_pos)++;
+ return n;
+}
+
+static void proc_keys_stop(struct seq_file *p, void *v)
+ __releases(key_serial_lock)
+{
+ spin_unlock(&key_serial_lock);
+}
+
+static int proc_keys_show(struct seq_file *m, void *v)
+{
+ struct rb_node *_p = v;
+ struct key *key = rb_entry(_p, struct key, serial_node);
+ unsigned long flags;
+ key_ref_t key_ref, skey_ref;
+ time64_t now, expiry;
+ char xbuf[16];
+ short state;
+ u64 timo;
+ int rc;
+
+ struct keyring_search_context ctx = {
+ .index_key = key->index_key,
+ .cred = m->file->f_cred,
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.raw_data = key,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
+ };
+
+ key_ref = make_key_ref(key, 0);
+
+ /* determine if the key is possessed by this process (a test we can
+ * skip if the key does not indicate the possessor can view it
+ */
+ if (key->perm & KEY_POS_VIEW) {
+ rcu_read_lock();
+ skey_ref = search_cred_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+ if (!IS_ERR(skey_ref)) {
+ key_ref_put(skey_ref);
+ key_ref = make_key_ref(key, 1);
+ }
+ }
+
+ /* check whether the current task is allowed to view the key */
+ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
+ if (rc < 0)
+ return 0;
+
+ now = ktime_get_real_seconds();
+
+ rcu_read_lock();
+
+ /* come up with a suitable timeout value */
+ expiry = READ_ONCE(key->expiry);
+ if (expiry == TIME64_MAX) {
+ memcpy(xbuf, "perm", 5);
+ } else if (now >= expiry) {
+ memcpy(xbuf, "expd", 5);
+ } else {
+ timo = expiry - now;
+
+ if (timo < 60)
+ sprintf(xbuf, "%llus", timo);
+ else if (timo < 60*60)
+ sprintf(xbuf, "%llum", div_u64(timo, 60));
+ else if (timo < 60*60*24)
+ sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60));
+ else if (timo < 60*60*24*7)
+ sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24));
+ else
+ sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7));
+ }
+
+ state = key_read_state(key);
+
+#define showflag(FLAGS, LETTER, FLAG) \
+ ((FLAGS & (1 << FLAG)) ? LETTER : '-')
+
+ flags = READ_ONCE(key->flags);
+ seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
+ key->serial,
+ state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+ showflag(flags, 'R', KEY_FLAG_REVOKED),
+ showflag(flags, 'D', KEY_FLAG_DEAD),
+ showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
+ showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
+ state < 0 ? 'N' : '-',
+ showflag(flags, 'i', KEY_FLAG_INVALIDATED),
+ refcount_read(&key->usage),
+ xbuf,
+ key->perm,
+ from_kuid_munged(seq_user_ns(m), key->uid),
+ from_kgid_munged(seq_user_ns(m), key->gid),
+ key->type->name);
+
+#undef showflag
+
+ if (key->type->describe)
+ key->type->describe(key, m);
+ seq_putc(m, '\n');
+
+ rcu_read_unlock();
+ return 0;
+}
+
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
+{
+ while (n) {
+ struct key_user *user = rb_entry(n, struct key_user, node);
+ if (kuid_has_mapping(user_ns, user->uid))
+ break;
+ n = rb_next(n);
+ }
+ return n;
+}
+
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
+{
+ return __key_user_next(user_ns, rb_next(n));
+}
+
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
+{
+ struct rb_node *n = rb_first(r);
+ return __key_user_next(user_ns, n);
+}
+
+static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
+ __acquires(key_user_lock)
+{
+ struct rb_node *_p;
+ loff_t pos = *_pos;
+
+ spin_lock(&key_user_lock);
+
+ _p = key_user_first(seq_user_ns(p), &key_user_tree);
+ while (pos > 0 && _p) {
+ pos--;
+ _p = key_user_next(seq_user_ns(p), _p);
+ }
+
+ return _p;
+}
+
+static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
+{
+ (*_pos)++;
+ return key_user_next(seq_user_ns(p), (struct rb_node *)v);
+}
+
+static void proc_key_users_stop(struct seq_file *p, void *v)
+ __releases(key_user_lock)
+{
+ spin_unlock(&key_user_lock);
+}
+
+static int proc_key_users_show(struct seq_file *m, void *v)
+{
+ struct rb_node *_p = v;
+ struct key_user *user = rb_entry(_p, struct key_user, node);
+ unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxkeys : key_quota_maxkeys;
+ unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
+ key_quota_root_maxbytes : key_quota_maxbytes;
+
+ seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
+ from_kuid_munged(seq_user_ns(m), user->uid),
+ refcount_read(&user->usage),
+ atomic_read(&user->nkeys),
+ atomic_read(&user->nikeys),
+ user->qnkeys,
+ maxkeys,
+ user->qnbytes,
+ maxbytes);
+
+ return 0;
+}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
new file mode 100644
index 000000000..1fe8b934f
--- /dev/null
+++ b/security/keys/process_keys.c
@@ -0,0 +1,956 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Manage a process's keyrings
+ *
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/user.h>
+#include <linux/keyctl.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/security.h>
+#include <linux/user_namespace.h>
+#include <linux/uaccess.h>
+#include <linux/init_task.h>
+#include <keys/request_key_auth-type.h>
+#include "internal.h"
+
+/* Session keyring create vs join semaphore */
+static DEFINE_MUTEX(key_session_mutex);
+
+/* The root user's tracking struct */
+struct key_user root_key_user = {
+ .usage = REFCOUNT_INIT(3),
+ .cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
+ .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
+ .nkeys = ATOMIC_INIT(2),
+ .nikeys = ATOMIC_INIT(2),
+ .uid = GLOBAL_ROOT_UID,
+};
+
+/*
+ * Get or create a user register keyring.
+ */
+static struct key *get_user_register(struct user_namespace *user_ns)
+{
+ struct key *reg_keyring = READ_ONCE(user_ns->user_keyring_register);
+
+ if (reg_keyring)
+ return reg_keyring;
+
+ down_write(&user_ns->keyring_sem);
+
+ /* Make sure there's a register keyring. It gets owned by the
+ * user_namespace's owner.
+ */
+ reg_keyring = user_ns->user_keyring_register;
+ if (!reg_keyring) {
+ reg_keyring = keyring_alloc(".user_reg",
+ user_ns->owner, INVALID_GID,
+ &init_cred,
+ KEY_POS_WRITE | KEY_POS_SEARCH |
+ KEY_USR_VIEW | KEY_USR_READ,
+ 0,
+ NULL, NULL);
+ if (!IS_ERR(reg_keyring))
+ smp_store_release(&user_ns->user_keyring_register,
+ reg_keyring);
+ }
+
+ up_write(&user_ns->keyring_sem);
+
+ /* We don't return a ref since the keyring is pinned by the user_ns */
+ return reg_keyring;
+}
+
+/*
+ * Look up the user and user session keyrings for the current process's UID,
+ * creating them if they don't exist.
+ */
+int look_up_user_keyrings(struct key **_user_keyring,
+ struct key **_user_session_keyring)
+{
+ const struct cred *cred = current_cred();
+ struct user_namespace *user_ns = current_user_ns();
+ struct key *reg_keyring, *uid_keyring, *session_keyring;
+ key_perm_t user_keyring_perm;
+ key_ref_t uid_keyring_r, session_keyring_r;
+ uid_t uid = from_kuid(user_ns, cred->user->uid);
+ char buf[20];
+ int ret;
+
+ user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
+
+ kenter("%u", uid);
+
+ reg_keyring = get_user_register(user_ns);
+ if (IS_ERR(reg_keyring))
+ return PTR_ERR(reg_keyring);
+
+ down_write(&user_ns->keyring_sem);
+ ret = 0;
+
+ /* Get the user keyring. Note that there may be one in existence
+ * already as it may have been pinned by a session, but the user_struct
+ * pointing to it may have been destroyed by setuid.
+ */
+ snprintf(buf, sizeof(buf), "_uid.%u", uid);
+ uid_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid %p", uid_keyring_r);
+ if (uid_keyring_r == ERR_PTR(-EAGAIN)) {
+ uid_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, reg_keyring);
+ if (IS_ERR(uid_keyring)) {
+ ret = PTR_ERR(uid_keyring);
+ goto error;
+ }
+ } else if (IS_ERR(uid_keyring_r)) {
+ ret = PTR_ERR(uid_keyring_r);
+ goto error;
+ } else {
+ uid_keyring = key_ref_to_ptr(uid_keyring_r);
+ }
+
+ /* Get a default session keyring (which might also exist already) */
+ snprintf(buf, sizeof(buf), "_uid_ses.%u", uid);
+ session_keyring_r = keyring_search(make_key_ref(reg_keyring, true),
+ &key_type_keyring, buf, false);
+ kdebug("_uid_ses %p", session_keyring_r);
+ if (session_keyring_r == ERR_PTR(-EAGAIN)) {
+ session_keyring = keyring_alloc(buf, cred->user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_UID_KEYRING |
+ KEY_ALLOC_IN_QUOTA,
+ NULL, NULL);
+ if (IS_ERR(session_keyring)) {
+ ret = PTR_ERR(session_keyring);
+ goto error_release;
+ }
+
+ /* We install a link from the user session keyring to
+ * the user keyring.
+ */
+ ret = key_link(session_keyring, uid_keyring);
+ if (ret < 0)
+ goto error_release_session;
+
+ /* And only then link the user-session keyring to the
+ * register.
+ */
+ ret = key_link(reg_keyring, session_keyring);
+ if (ret < 0)
+ goto error_release_session;
+ } else if (IS_ERR(session_keyring_r)) {
+ ret = PTR_ERR(session_keyring_r);
+ goto error_release;
+ } else {
+ session_keyring = key_ref_to_ptr(session_keyring_r);
+ }
+
+ up_write(&user_ns->keyring_sem);
+
+ if (_user_session_keyring)
+ *_user_session_keyring = session_keyring;
+ else
+ key_put(session_keyring);
+ if (_user_keyring)
+ *_user_keyring = uid_keyring;
+ else
+ key_put(uid_keyring);
+ kleave(" = 0");
+ return 0;
+
+error_release_session:
+ key_put(session_keyring);
+error_release:
+ key_put(uid_keyring);
+error:
+ up_write(&user_ns->keyring_sem);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Get the user session keyring if it exists, but don't create it if it
+ * doesn't.
+ */
+struct key *get_user_session_keyring_rcu(const struct cred *cred)
+{
+ struct key *reg_keyring = READ_ONCE(cred->user_ns->user_keyring_register);
+ key_ref_t session_keyring_r;
+ char buf[20];
+
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_keyring,
+ .index_key.description = buf,
+ .cred = cred,
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = buf,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
+ };
+
+ if (!reg_keyring)
+ return NULL;
+
+ ctx.index_key.desc_len = snprintf(buf, sizeof(buf), "_uid_ses.%u",
+ from_kuid(cred->user_ns,
+ cred->user->uid));
+
+ session_keyring_r = keyring_search_rcu(make_key_ref(reg_keyring, true),
+ &ctx);
+ if (IS_ERR(session_keyring_r))
+ return NULL;
+ return key_ref_to_ptr(session_keyring_r);
+}
+
+/*
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
+ */
+int install_thread_keyring_to_cred(struct cred *new)
+{
+ struct key *keyring;
+
+ if (new->thread_keyring)
+ return 0;
+
+ keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN,
+ NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ new->thread_keyring = keyring;
+ return 0;
+}
+
+/*
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
+ */
+static int install_thread_keyring(void)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = install_thread_keyring_to_cred(new);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+/*
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a process keyring is now present; -errno on failure.
+ */
+int install_process_keyring_to_cred(struct cred *new)
+{
+ struct key *keyring;
+
+ if (new->process_keyring)
+ return 0;
+
+ keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN,
+ NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+
+ new->process_keyring = keyring;
+ return 0;
+}
+
+/*
+ * Install a process keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a process keyring is now present; -errno on failure.
+ */
+static int install_process_keyring(void)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = install_process_keyring_to_cred(new);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+/*
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ * @cred can not be in use by any task yet.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
+{
+ unsigned long flags;
+ struct key *old;
+
+ might_sleep();
+
+ /* create an empty session keyring */
+ if (!keyring) {
+ flags = KEY_ALLOC_QUOTA_OVERRUN;
+ if (cred->session_keyring)
+ flags = KEY_ALLOC_IN_QUOTA;
+
+ keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ flags, NULL, NULL);
+ if (IS_ERR(keyring))
+ return PTR_ERR(keyring);
+ } else {
+ __key_get(keyring);
+ }
+
+ /* install the keyring */
+ old = cred->session_keyring;
+ cred->session_keyring = keyring;
+
+ if (old)
+ key_put(old);
+
+ return 0;
+}
+
+/*
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
+ */
+static int install_session_keyring(struct key *keyring)
+{
+ struct cred *new;
+ int ret;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ ret = install_session_keyring_to_cred(new, keyring);
+ if (ret < 0) {
+ abort_creds(new);
+ return ret;
+ }
+
+ return commit_creds(new);
+}
+
+/*
+ * Handle the fsuid changing.
+ */
+void key_fsuid_changed(struct cred *new_cred)
+{
+ /* update the ownership of the thread keyring */
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->uid = new_cred->fsuid;
+ up_write(&new_cred->thread_keyring->sem);
+ }
+}
+
+/*
+ * Handle the fsgid changing.
+ */
+void key_fsgid_changed(struct cred *new_cred)
+{
+ /* update the ownership of the thread keyring */
+ if (new_cred->thread_keyring) {
+ down_write(&new_cred->thread_keyring->sem);
+ new_cred->thread_keyring->gid = new_cred->fsgid;
+ up_write(&new_cred->thread_keyring->sem);
+ }
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key under RCU conditions (the caller must be holding the RCU read
+ * lock).
+ *
+ * The search criteria are the type and the match function. The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search. Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials. Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too. Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
+ */
+key_ref_t search_cred_keyrings_rcu(struct keyring_search_context *ctx)
+{
+ struct key *user_session;
+ key_ref_t key_ref, ret, err;
+ const struct cred *cred = ctx->cred;
+
+ /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
+ * searchable, but we failed to find a key or we found a negative key;
+ * otherwise we want to return a sample error (probably -EACCES) if
+ * none of the keyrings were searchable
+ *
+ * in terms of priority: success > -ENOKEY > -EAGAIN > other error
+ */
+ key_ref = NULL;
+ ret = NULL;
+ err = ERR_PTR(-EAGAIN);
+
+ /* search the thread keyring first */
+ if (cred->thread_keyring) {
+ key_ref = keyring_search_rcu(
+ make_key_ref(cred->thread_keyring, 1), ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+
+ /* search the process keyring second */
+ if (cred->process_keyring) {
+ key_ref = keyring_search_rcu(
+ make_key_ref(cred->process_keyring, 1), ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ fallthrough;
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+
+ /* search the session keyring */
+ if (cred->session_keyring) {
+ key_ref = keyring_search_rcu(
+ make_key_ref(cred->session_keyring, 1), ctx);
+
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ fallthrough;
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+ /* or search the user-session keyring */
+ else if ((user_session = get_user_session_keyring_rcu(cred))) {
+ key_ref = keyring_search_rcu(make_key_ref(user_session, 1),
+ ctx);
+ key_put(user_session);
+
+ if (!IS_ERR(key_ref))
+ goto found;
+
+ switch (PTR_ERR(key_ref)) {
+ case -EAGAIN: /* no key */
+ if (ret)
+ break;
+ fallthrough;
+ case -ENOKEY: /* negative key */
+ ret = key_ref;
+ break;
+ default:
+ err = key_ref;
+ break;
+ }
+ }
+
+ /* no key - decide on the error we're going to go for */
+ key_ref = ret ? ret : err;
+
+found:
+ return key_ref;
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * The caller must be holding the RCU read lock.
+ *
+ * Return same as search_cred_keyrings_rcu().
+ */
+key_ref_t search_process_keyrings_rcu(struct keyring_search_context *ctx)
+{
+ struct request_key_auth *rka;
+ key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
+
+ key_ref = search_cred_keyrings_rcu(ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+ err = key_ref;
+
+ /* if this process has an instantiation authorisation key, then we also
+ * search the keyrings of the process mentioned there
+ * - we don't permit access to request_key auth keys via this method
+ */
+ if (ctx->cred->request_key_auth &&
+ ctx->cred == current_cred() &&
+ ctx->index_key.type != &key_type_request_key_auth
+ ) {
+ const struct cred *cred = ctx->cred;
+
+ if (key_validate(cred->request_key_auth) == 0) {
+ rka = ctx->cred->request_key_auth->payload.data[0];
+
+ //// was search_process_keyrings() [ie. recursive]
+ ctx->cred = rka->cred;
+ key_ref = search_cred_keyrings_rcu(ctx);
+ ctx->cred = cred;
+
+ if (!IS_ERR(key_ref))
+ goto found;
+ ret = key_ref;
+ }
+ }
+
+ /* no key - decide on the error we're going to go for */
+ if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
+ key_ref = ERR_PTR(-ENOKEY);
+ else if (err == ERR_PTR(-EACCES))
+ key_ref = ret;
+ else
+ key_ref = err;
+
+found:
+ return key_ref;
+}
+/*
+ * See if the key we're looking at is the target key.
+ */
+bool lookup_user_key_possessed(const struct key *key,
+ const struct key_match_data *match_data)
+{
+ return key == match_data->raw_data;
+}
+
+/*
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
+ */
+key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
+ enum key_need_perm need_perm)
+{
+ struct keyring_search_context ctx = {
+ .match_data.cmp = lookup_user_key_possessed,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
+ };
+ struct request_key_auth *rka;
+ struct key *key, *user_session;
+ key_ref_t key_ref, skey_ref;
+ int ret;
+
+try_again:
+ ctx.cred = get_current_cred();
+ key_ref = ERR_PTR(-ENOKEY);
+
+ switch (id) {
+ case KEY_SPEC_THREAD_KEYRING:
+ if (!ctx.cred->thread_keyring) {
+ if (!(lflags & KEY_LOOKUP_CREATE))
+ goto error;
+
+ ret = install_thread_keyring();
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error;
+ }
+ goto reget_creds;
+ }
+
+ key = ctx.cred->thread_keyring;
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_PROCESS_KEYRING:
+ if (!ctx.cred->process_keyring) {
+ if (!(lflags & KEY_LOOKUP_CREATE))
+ goto error;
+
+ ret = install_process_keyring();
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error;
+ }
+ goto reget_creds;
+ }
+
+ key = ctx.cred->process_keyring;
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_SESSION_KEYRING:
+ if (!ctx.cred->session_keyring) {
+ /* always install a session keyring upon access if one
+ * doesn't exist yet */
+ ret = look_up_user_keyrings(NULL, &user_session);
+ if (ret < 0)
+ goto error;
+ if (lflags & KEY_LOOKUP_CREATE)
+ ret = join_session_keyring(NULL);
+ else
+ ret = install_session_keyring(user_session);
+
+ key_put(user_session);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
+ } else if (test_bit(KEY_FLAG_UID_KEYRING,
+ &ctx.cred->session_keyring->flags) &&
+ lflags & KEY_LOOKUP_CREATE) {
+ ret = join_session_keyring(NULL);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
+ }
+
+ key = ctx.cred->session_keyring;
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_USER_KEYRING:
+ ret = look_up_user_keyrings(&key, NULL);
+ if (ret < 0)
+ goto error;
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_USER_SESSION_KEYRING:
+ ret = look_up_user_keyrings(NULL, &key);
+ if (ret < 0)
+ goto error;
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_GROUP_KEYRING:
+ /* group keyrings are not yet supported */
+ key_ref = ERR_PTR(-EINVAL);
+ goto error;
+
+ case KEY_SPEC_REQKEY_AUTH_KEY:
+ key = ctx.cred->request_key_auth;
+ if (!key)
+ goto error;
+
+ __key_get(key);
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ case KEY_SPEC_REQUESTOR_KEYRING:
+ if (!ctx.cred->request_key_auth)
+ goto error;
+
+ down_read(&ctx.cred->request_key_auth->sem);
+ if (test_bit(KEY_FLAG_REVOKED,
+ &ctx.cred->request_key_auth->flags)) {
+ key_ref = ERR_PTR(-EKEYREVOKED);
+ key = NULL;
+ } else {
+ rka = ctx.cred->request_key_auth->payload.data[0];
+ key = rka->dest_keyring;
+ __key_get(key);
+ }
+ up_read(&ctx.cred->request_key_auth->sem);
+ if (!key)
+ goto error;
+ key_ref = make_key_ref(key, 1);
+ break;
+
+ default:
+ key_ref = ERR_PTR(-EINVAL);
+ if (id < 1)
+ goto error;
+
+ key = key_lookup(id);
+ if (IS_ERR(key)) {
+ key_ref = ERR_CAST(key);
+ goto error;
+ }
+
+ key_ref = make_key_ref(key, 0);
+
+ /* check to see if we possess the key */
+ ctx.index_key = key->index_key;
+ ctx.match_data.raw_data = key;
+ kdebug("check possessed");
+ rcu_read_lock();
+ skey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+ kdebug("possessed=%p", skey_ref);
+
+ if (!IS_ERR(skey_ref)) {
+ key_put(key);
+ key_ref = skey_ref;
+ }
+
+ break;
+ }
+
+ /* unlink does not use the nominated key in any way, so can skip all
+ * the permission checks as it is only concerned with the keyring */
+ if (need_perm != KEY_NEED_UNLINK) {
+ if (!(lflags & KEY_LOOKUP_PARTIAL)) {
+ ret = wait_for_key_construction(key, true);
+ switch (ret) {
+ case -ERESTARTSYS:
+ goto invalid_key;
+ default:
+ if (need_perm != KEY_AUTHTOKEN_OVERRIDE &&
+ need_perm != KEY_DEFER_PERM_CHECK)
+ goto invalid_key;
+ case 0:
+ break;
+ }
+ } else if (need_perm != KEY_DEFER_PERM_CHECK) {
+ ret = key_validate(key);
+ if (ret < 0)
+ goto invalid_key;
+ }
+
+ ret = -EIO;
+ if (!(lflags & KEY_LOOKUP_PARTIAL) &&
+ key_read_state(key) == KEY_IS_UNINSTANTIATED)
+ goto invalid_key;
+ }
+
+ /* check the permissions */
+ ret = key_task_permission(key_ref, ctx.cred, need_perm);
+ if (ret < 0)
+ goto invalid_key;
+
+ key->last_used_at = ktime_get_real_seconds();
+
+error:
+ put_cred(ctx.cred);
+ return key_ref;
+
+invalid_key:
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error;
+
+ /* if we attempted to install a keyring, then it may have caused new
+ * creds to be installed */
+reget_creds:
+ put_cred(ctx.cred);
+ goto try_again;
+}
+EXPORT_SYMBOL(lookup_user_key);
+
+/*
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
+ */
+long join_session_keyring(const char *name)
+{
+ const struct cred *old;
+ struct cred *new;
+ struct key *keyring;
+ long ret, serial;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ old = current_cred();
+
+ /* if no name is provided, install an anonymous keyring */
+ if (!name) {
+ ret = install_session_keyring_to_cred(new, NULL);
+ if (ret < 0)
+ goto error;
+
+ serial = new->session_keyring->serial;
+ ret = commit_creds(new);
+ if (ret == 0)
+ ret = serial;
+ goto okay;
+ }
+
+ /* allow the user to join or create a named keyring */
+ mutex_lock(&key_session_mutex);
+
+ /* look for an existing keyring of this name */
+ keyring = find_keyring_by_name(name, false);
+ if (PTR_ERR(keyring) == -ENOKEY) {
+ /* not found - try and create a new one */
+ keyring = keyring_alloc(
+ name, old->uid, old->gid, old,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+ KEY_ALLOC_IN_QUOTA, NULL, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ }
+ } else if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error2;
+ } else if (keyring == new->session_keyring) {
+ ret = 0;
+ goto error3;
+ }
+
+ /* we've got a keyring - now to install it */
+ ret = install_session_keyring_to_cred(new, keyring);
+ if (ret < 0)
+ goto error3;
+
+ commit_creds(new);
+ mutex_unlock(&key_session_mutex);
+
+ ret = keyring->serial;
+ key_put(keyring);
+okay:
+ return ret;
+
+error3:
+ key_put(keyring);
+error2:
+ mutex_unlock(&key_session_mutex);
+error:
+ abort_creds(new);
+ return ret;
+}
+
+/*
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target process is about to resume userspace execution.
+ */
+void key_change_session_keyring(struct callback_head *twork)
+{
+ const struct cred *old = current_cred();
+ struct cred *new = container_of(twork, struct cred, rcu);
+
+ if (unlikely(current->flags & PF_EXITING)) {
+ put_cred(new);
+ return;
+ }
+
+ new-> uid = old-> uid;
+ new-> euid = old-> euid;
+ new-> suid = old-> suid;
+ new->fsuid = old->fsuid;
+ new-> gid = old-> gid;
+ new-> egid = old-> egid;
+ new-> sgid = old-> sgid;
+ new->fsgid = old->fsgid;
+ new->user = get_uid(old->user);
+ new->user_ns = get_user_ns(old->user_ns);
+ new->group_info = get_group_info(old->group_info);
+
+ new->securebits = old->securebits;
+ new->cap_inheritable = old->cap_inheritable;
+ new->cap_permitted = old->cap_permitted;
+ new->cap_effective = old->cap_effective;
+ new->cap_ambient = old->cap_ambient;
+ new->cap_bset = old->cap_bset;
+
+ new->jit_keyring = old->jit_keyring;
+ new->thread_keyring = key_get(old->thread_keyring);
+ new->process_keyring = key_get(old->process_keyring);
+
+ security_transfer_creds(new, old);
+
+ commit_creds(new);
+}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+ return look_up_user_keyrings(NULL, NULL);
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
new file mode 100644
index 000000000..a7673ad86
--- /dev/null
+++ b/security/keys/request_key.c
@@ -0,0 +1,821 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Request a key from userspace
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/security/keys/request-key.rst
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/err.h>
+#include <linux/keyctl.h>
+#include <linux/slab.h>
+#include <net/net_namespace.h>
+#include "internal.h"
+#include <keys/request_key_auth-type.h>
+
+#define key_negative_timeout 60 /* default timeout on a negative key's existence */
+
+static struct key *check_cached_key(struct keyring_search_context *ctx)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct key *key = current->cached_requested_key;
+
+ if (key &&
+ ctx->match_data.cmp(key, &ctx->match_data) &&
+ !(key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))))
+ return key_get(key);
+#endif
+ return NULL;
+}
+
+static void cache_requested_key(struct key *key)
+{
+#ifdef CONFIG_KEYS_REQUEST_CACHE
+ struct task_struct *t = current;
+
+ /* Do not cache key if it is a kernel thread */
+ if (!(t->flags & PF_KTHREAD)) {
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ }
+#endif
+}
+
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @authkey: The authorisation key.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key. The key will be negated
+ * if an error is indicated. The authorisation key will be revoked
+ * unconditionally.
+ */
+void complete_request_key(struct key *authkey, int error)
+{
+ struct request_key_auth *rka = get_request_key_auth(authkey);
+ struct key *key = rka->target_key;
+
+ kenter("%d{%d},%d", authkey->serial, key->serial, error);
+
+ if (error < 0)
+ key_negate_and_link(key, key_negative_timeout, NULL, authkey);
+ else
+ key_revoke(authkey);
+}
+EXPORT_SYMBOL(complete_request_key);
+
+/*
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
+{
+ struct key *keyring = info->data;
+
+ return install_session_keyring_to_cred(cred, keyring);
+}
+
+/*
+ * Clean up a usermode helper with session keyring.
+ */
+static void umh_keys_cleanup(struct subprocess_info *info)
+{
+ struct key *keyring = info->data;
+ key_put(keyring);
+}
+
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
+static int call_usermodehelper_keys(const char *path, char **argv, char **envp,
+ struct key *session_keyring, int wait)
+{
+ struct subprocess_info *info;
+
+ info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
+ umh_keys_init, umh_keys_cleanup,
+ session_keyring);
+ if (!info)
+ return -ENOMEM;
+
+ key_get(session_keyring);
+ return call_usermodehelper_exec(info, wait);
+}
+
+/*
+ * Request userspace finish the construction of a key
+ * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
+ */
+static int call_sbin_request_key(struct key *authkey, void *aux)
+{
+ static char const request_key[] = "/sbin/request-key";
+ struct request_key_auth *rka = get_request_key_auth(authkey);
+ const struct cred *cred = current_cred();
+ key_serial_t prkey, sskey;
+ struct key *key = rka->target_key, *keyring, *session, *user_session;
+ char *argv[9], *envp[3], uid_str[12], gid_str[12];
+ char key_str[12], keyring_str[3][12];
+ char desc[20];
+ int ret, i;
+
+ kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op);
+
+ ret = look_up_user_keyrings(NULL, &user_session);
+ if (ret < 0)
+ goto error_us;
+
+ /* allocate a new session keyring */
+ sprintf(desc, "_req.%u", key->serial);
+
+ cred = get_current_cred();
+ keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL);
+ put_cred(cred);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto error_alloc;
+ }
+
+ /* attach the auth key to the session keyring */
+ ret = key_link(keyring, authkey);
+ if (ret < 0)
+ goto error_link;
+
+ /* record the UID and GID */
+ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+ sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
+
+ /* we say which key is under construction */
+ sprintf(key_str, "%d", key->serial);
+
+ /* we specify the process's default keyrings */
+ sprintf(keyring_str[0], "%d",
+ cred->thread_keyring ? cred->thread_keyring->serial : 0);
+
+ prkey = 0;
+ if (cred->process_keyring)
+ prkey = cred->process_keyring->serial;
+ sprintf(keyring_str[1], "%d", prkey);
+
+ session = cred->session_keyring;
+ if (!session)
+ session = user_session;
+ sskey = session->serial;
+
+ sprintf(keyring_str[2], "%d", sskey);
+
+ /* set up a minimal environment */
+ i = 0;
+ envp[i++] = "HOME=/";
+ envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[i] = NULL;
+
+ /* set up the argument list */
+ i = 0;
+ argv[i++] = (char *)request_key;
+ argv[i++] = (char *)rka->op;
+ argv[i++] = key_str;
+ argv[i++] = uid_str;
+ argv[i++] = gid_str;
+ argv[i++] = keyring_str[0];
+ argv[i++] = keyring_str[1];
+ argv[i++] = keyring_str[2];
+ argv[i] = NULL;
+
+ /* do it */
+ ret = call_usermodehelper_keys(request_key, argv, envp, keyring,
+ UMH_WAIT_PROC);
+ kdebug("usermode -> 0x%x", ret);
+ if (ret >= 0) {
+ /* ret is the exit/wait code */
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) ||
+ key_validate(key) < 0)
+ ret = -ENOKEY;
+ else
+ /* ignore any errors from userspace if the key was
+ * instantiated */
+ ret = 0;
+ }
+
+error_link:
+ key_put(keyring);
+
+error_alloc:
+ key_put(user_session);
+error_us:
+ complete_request_key(authkey, ret);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
+ */
+static int construct_key(struct key *key, const void *callout_info,
+ size_t callout_len, void *aux,
+ struct key *dest_keyring)
+{
+ request_key_actor_t actor;
+ struct key *authkey;
+ int ret;
+
+ kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
+
+ /* allocate an authorisation key */
+ authkey = request_key_auth_new(key, "create", callout_info, callout_len,
+ dest_keyring);
+ if (IS_ERR(authkey))
+ return PTR_ERR(authkey);
+
+ /* Make the call */
+ actor = call_sbin_request_key;
+ if (key->type->request_key)
+ actor = key->type->request_key;
+
+ ret = actor(authkey, aux);
+
+ /* check that the actor called complete_request_key() prior to
+ * returning an error */
+ WARN_ON(ret < 0 &&
+ !test_bit(KEY_FLAG_INVALIDATED, &authkey->flags));
+
+ key_put(authkey);
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
+ */
+static int construct_get_dest_keyring(struct key **_dest_keyring)
+{
+ struct request_key_auth *rka;
+ const struct cred *cred = current_cred();
+ struct key *dest_keyring = *_dest_keyring, *authkey;
+ int ret;
+
+ kenter("%p", dest_keyring);
+
+ /* find the appropriate keyring */
+ if (dest_keyring) {
+ /* the caller supplied one */
+ key_get(dest_keyring);
+ } else {
+ bool do_perm_check = true;
+
+ /* use a default keyring; falling through the cases until we
+ * find one that we actually have */
+ switch (cred->jit_keyring) {
+ case KEY_REQKEY_DEFL_DEFAULT:
+ case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
+ if (cred->request_key_auth) {
+ authkey = cred->request_key_auth;
+ down_read(&authkey->sem);
+ rka = get_request_key_auth(authkey);
+ if (!test_bit(KEY_FLAG_REVOKED,
+ &authkey->flags))
+ dest_keyring =
+ key_get(rka->dest_keyring);
+ up_read(&authkey->sem);
+ if (dest_keyring) {
+ do_perm_check = false;
+ break;
+ }
+ }
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_THREAD_KEYRING:
+ dest_keyring = key_get(cred->thread_keyring);
+ if (dest_keyring)
+ break;
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ dest_keyring = key_get(cred->process_keyring);
+ if (dest_keyring)
+ break;
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_SESSION_KEYRING:
+ dest_keyring = key_get(cred->session_keyring);
+
+ if (dest_keyring)
+ break;
+
+ fallthrough;
+ case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+ ret = look_up_user_keyrings(NULL, &dest_keyring);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case KEY_REQKEY_DEFL_USER_KEYRING:
+ ret = look_up_user_keyrings(&dest_keyring, NULL);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case KEY_REQKEY_DEFL_GROUP_KEYRING:
+ default:
+ BUG();
+ }
+
+ /*
+ * Require Write permission on the keyring. This is essential
+ * because the default keyring may be the session keyring, and
+ * joining a keyring only requires Search permission.
+ *
+ * However, this check is skipped for the "requestor keyring" so
+ * that /sbin/request-key can itself use request_key() to add
+ * keys to the original requestor's destination keyring.
+ */
+ if (dest_keyring && do_perm_check) {
+ ret = key_permission(make_key_ref(dest_keyring, 1),
+ KEY_NEED_WRITE);
+ if (ret) {
+ key_put(dest_keyring);
+ return ret;
+ }
+ }
+ }
+
+ *_dest_keyring = dest_keyring;
+ kleave(" [dk %d]", key_serial(dest_keyring));
+ return 0;
+}
+
+/*
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
+ */
+static int construct_alloc_key(struct keyring_search_context *ctx,
+ struct key *dest_keyring,
+ unsigned long flags,
+ struct key_user *user,
+ struct key **_key)
+{
+ struct assoc_array_edit *edit = NULL;
+ struct key *key;
+ key_perm_t perm;
+ key_ref_t key_ref;
+ int ret;
+
+ kenter("%s,%s,,,",
+ ctx->index_key.type->name, ctx->index_key.description);
+
+ *_key = NULL;
+ mutex_lock(&user->cons_lock);
+
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+ if (ctx->index_key.type->read)
+ perm |= KEY_POS_READ;
+ if (ctx->index_key.type == &key_type_keyring ||
+ ctx->index_key.type->update)
+ perm |= KEY_POS_WRITE;
+
+ key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+ ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
+ perm, flags, NULL);
+ if (IS_ERR(key))
+ goto alloc_failed;
+
+ set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
+
+ if (dest_keyring) {
+ ret = __key_link_lock(dest_keyring, &key->index_key);
+ if (ret < 0)
+ goto link_lock_failed;
+ }
+
+ /*
+ * Attach the key to the destination keyring under lock, but we do need
+ * to do another check just in case someone beat us to it whilst we
+ * waited for locks.
+ *
+ * The caller might specify a comparison function which looks for keys
+ * that do not exactly match but are still equivalent from the caller's
+ * perspective. The __key_link_begin() operation must be done only after
+ * an actual key is determined.
+ */
+ mutex_lock(&key_construction_mutex);
+
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(ctx);
+ rcu_read_unlock();
+ if (!IS_ERR(key_ref))
+ goto key_already_present;
+
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto link_alloc_failed;
+ __key_link(dest_keyring, key, &edit);
+ }
+
+ mutex_unlock(&key_construction_mutex);
+ if (dest_keyring)
+ __key_link_end(dest_keyring, &key->index_key, edit);
+ mutex_unlock(&user->cons_lock);
+ *_key = key;
+ kleave(" = 0 [%d]", key_serial(key));
+ return 0;
+
+ /* the key is now present - we tell the caller that we found it by
+ * returning -EINPROGRESS */
+key_already_present:
+ key_put(key);
+ mutex_unlock(&key_construction_mutex);
+ key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto link_alloc_failed_unlocked;
+ ret = __key_link_check_live_key(dest_keyring, key);
+ if (ret == 0)
+ __key_link(dest_keyring, key, &edit);
+ __key_link_end(dest_keyring, &key->index_key, edit);
+ if (ret < 0)
+ goto link_check_failed;
+ }
+ mutex_unlock(&user->cons_lock);
+ *_key = key;
+ kleave(" = -EINPROGRESS [%d]", key_serial(key));
+ return -EINPROGRESS;
+
+link_check_failed:
+ mutex_unlock(&user->cons_lock);
+ key_put(key);
+ kleave(" = %d [linkcheck]", ret);
+ return ret;
+
+link_alloc_failed:
+ mutex_unlock(&key_construction_mutex);
+link_alloc_failed_unlocked:
+ __key_link_end(dest_keyring, &key->index_key, edit);
+link_lock_failed:
+ mutex_unlock(&user->cons_lock);
+ key_put(key);
+ kleave(" = %d [prelink]", ret);
+ return ret;
+
+alloc_failed:
+ mutex_unlock(&user->cons_lock);
+ kleave(" = %ld", PTR_ERR(key));
+ return PTR_ERR(key);
+}
+
+/*
+ * Commence key construction.
+ */
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
+ const char *callout_info,
+ size_t callout_len,
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags)
+{
+ struct key_user *user;
+ struct key *key;
+ int ret;
+
+ kenter("");
+
+ if (ctx->index_key.type == &key_type_keyring)
+ return ERR_PTR(-EPERM);
+
+ ret = construct_get_dest_keyring(&dest_keyring);
+ if (ret)
+ goto error;
+
+ user = key_user_lookup(current_fsuid());
+ if (!user) {
+ ret = -ENOMEM;
+ goto error_put_dest_keyring;
+ }
+
+ ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
+ key_user_put(user);
+
+ if (ret == 0) {
+ ret = construct_key(key, callout_info, callout_len, aux,
+ dest_keyring);
+ if (ret < 0) {
+ kdebug("cons failed");
+ goto construction_failed;
+ }
+ } else if (ret == -EINPROGRESS) {
+ ret = 0;
+ } else {
+ goto error_put_dest_keyring;
+ }
+
+ key_put(dest_keyring);
+ kleave(" = key %d", key_serial(key));
+ return key;
+
+construction_failed:
+ key_negate_and_link(key, key_negative_timeout, NULL, NULL);
+ key_put(key);
+error_put_dest_keyring:
+ key_put(dest_keyring);
+error:
+ kleave(" = %d", ret);
+ return ERR_PTR(ret);
+}
+
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria (type, description, domain_tag) is
+ * searched for in the process's keyrings and returned with its usage count
+ * incremented if found. Otherwise, if callout_info is not NULL, a key will be
+ * allocated and some service (probably in userspace) will be asked to
+ * instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
+ */
+struct key *request_key_and_link(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux,
+ struct key *dest_keyring,
+ unsigned long flags)
+{
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.domain_tag = domain_tag,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED |
+ KEYRING_SEARCH_RECURSE),
+ };
+ struct key *key;
+ key_ref_t key_ref;
+ int ret;
+
+ kenter("%s,%s,%p,%zu,%p,%p,%lx",
+ ctx.index_key.type->name, ctx.index_key.description,
+ callout_info, callout_len, aux, dest_keyring, flags);
+
+ if (type->match_preparse) {
+ ret = type->match_preparse(&ctx.match_data);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ key = check_cached_key(&ctx);
+ if (key)
+ goto error_free;
+
+ /* search all the process keyrings for a key */
+ rcu_read_lock();
+ key_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+
+ if (!IS_ERR(key_ref)) {
+ if (dest_keyring) {
+ ret = key_task_permission(key_ref, current_cred(),
+ KEY_NEED_LINK);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key = ERR_PTR(ret);
+ goto error_free;
+ }
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ ret = key_link(dest_keyring, key);
+ if (ret < 0) {
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto error_free;
+ }
+ }
+
+ /* Only cache the key on immediate success */
+ cache_requested_key(key);
+ } else if (PTR_ERR(key_ref) != -EAGAIN) {
+ key = ERR_CAST(key_ref);
+ } else {
+ /* the search failed, but the keyrings were searchable, so we
+ * should consult userspace if we can */
+ key = ERR_PTR(-ENOKEY);
+ if (!callout_info)
+ goto error_free;
+
+ key = construct_key_and_link(&ctx, callout_info, callout_len,
+ aux, dest_keyring, flags);
+ }
+
+error_free:
+ if (type->match_free)
+ type->match_free(&ctx.match_data);
+error:
+ kleave(" = %p", key);
+ return key;
+}
+
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
+ */
+int wait_for_key_construction(struct key *key, bool intr)
+{
+ int ret;
+
+ ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
+ intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (ret)
+ return -ERESTARTSYS;
+ ret = key_read_state(key);
+ if (ret < 0)
+ return ret;
+ return key_validate(key);
+}
+EXPORT_SYMBOL(wait_for_key_construction);
+
+/**
+ * request_key_tag - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+struct key *request_key_tag(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const char *callout_info)
+{
+ struct key *key;
+ size_t callout_len = 0;
+ int ret;
+
+ if (callout_info)
+ callout_len = strlen(callout_info);
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
+ NULL, NULL, KEY_ALLOC_IN_QUOTA);
+ if (!IS_ERR(key)) {
+ ret = wait_for_key_construction(key, false);
+ if (ret < 0) {
+ key_put(key);
+ return ERR_PTR(ret);
+ }
+ }
+ return key;
+}
+EXPORT_SYMBOL(request_key_tag);
+
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @domain_tag: The domain in which the key operates.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
+ */
+struct key *request_key_with_auxdata(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux)
+{
+ struct key *key;
+ int ret;
+
+ key = request_key_and_link(type, description, domain_tag,
+ callout_info, callout_len,
+ aux, NULL, KEY_ALLOC_IN_QUOTA);
+ if (!IS_ERR(key)) {
+ ret = wait_for_key_construction(key, false);
+ if (ret < 0) {
+ key_put(key);
+ return ERR_PTR(ret);
+ }
+ }
+ return key;
+}
+EXPORT_SYMBOL(request_key_with_auxdata);
+
+/**
+ * request_key_rcu - Request key from RCU-read-locked context
+ * @type: The type of key we want.
+ * @description: The name of the key we want.
+ * @domain_tag: The domain in which the key operates.
+ *
+ * Request a key from a context that we may not sleep in (such as RCU-mode
+ * pathwalk). Keys under construction are ignored.
+ *
+ * Return a pointer to the found key if successful, -ENOKEY if we couldn't find
+ * a key or some other error if the key found was unsuitable or inaccessible.
+ */
+struct key *request_key_rcu(struct key_type *type,
+ const char *description,
+ struct key_tag *domain_tag)
+{
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.domain_tag = domain_tag,
+ .index_key.description = description,
+ .index_key.desc_len = strlen(description),
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED),
+ };
+ struct key *key;
+ key_ref_t key_ref;
+
+ kenter("%s,%s", type->name, description);
+
+ key = check_cached_key(&ctx);
+ if (key)
+ return key;
+
+ /* search all the process keyrings for a key */
+ key_ref = search_process_keyrings_rcu(&ctx);
+ if (IS_ERR(key_ref)) {
+ key = ERR_CAST(key_ref);
+ if (PTR_ERR(key_ref) == -EAGAIN)
+ key = ERR_PTR(-ENOKEY);
+ } else {
+ key = key_ref_to_ptr(key_ref);
+ cache_requested_key(key);
+ }
+
+ kleave(" = %p", key);
+ return key;
+}
+EXPORT_SYMBOL(request_key_rcu);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
new file mode 100644
index 000000000..41e973500
--- /dev/null
+++ b/security/keys/request_key_auth.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Request key authorisation token key definition.
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * See Documentation/security/keys/request-key.rst
+ */
+
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "internal.h"
+#include <keys/request_key_auth-type.h>
+
+static int request_key_auth_preparse(struct key_preparsed_payload *);
+static void request_key_auth_free_preparse(struct key_preparsed_payload *);
+static int request_key_auth_instantiate(struct key *,
+ struct key_preparsed_payload *);
+static void request_key_auth_describe(const struct key *, struct seq_file *);
+static void request_key_auth_revoke(struct key *);
+static void request_key_auth_destroy(struct key *);
+static long request_key_auth_read(const struct key *, char *, size_t);
+
+/*
+ * The request-key authorisation key type definition.
+ */
+struct key_type key_type_request_key_auth = {
+ .name = ".request_key_auth",
+ .def_datalen = sizeof(struct request_key_auth),
+ .preparse = request_key_auth_preparse,
+ .free_preparse = request_key_auth_free_preparse,
+ .instantiate = request_key_auth_instantiate,
+ .describe = request_key_auth_describe,
+ .revoke = request_key_auth_revoke,
+ .destroy = request_key_auth_destroy,
+ .read = request_key_auth_read,
+};
+
+static int request_key_auth_preparse(struct key_preparsed_payload *prep)
+{
+ return 0;
+}
+
+static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
+{
+}
+
+/*
+ * Instantiate a request-key authorisation key.
+ */
+static int request_key_auth_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ rcu_assign_keypointer(key, (struct request_key_auth *)prep->data);
+ return 0;
+}
+
+/*
+ * Describe an authorisation token.
+ */
+static void request_key_auth_describe(const struct key *key,
+ struct seq_file *m)
+{
+ struct request_key_auth *rka = dereference_key_rcu(key);
+
+ if (!rka)
+ return;
+
+ seq_puts(m, "key:");
+ seq_puts(m, key->description);
+ if (key_is_positive(key))
+ seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
+
+/*
+ * Read the callout_info data (retrieves the callout information).
+ * - the key's semaphore is read-locked
+ */
+static long request_key_auth_read(const struct key *key,
+ char *buffer, size_t buflen)
+{
+ struct request_key_auth *rka = dereference_key_locked(key);
+ size_t datalen;
+ long ret;
+
+ if (!rka)
+ return -EKEYREVOKED;
+
+ datalen = rka->callout_len;
+ ret = datalen;
+
+ /* we can return the data as is */
+ if (buffer && buflen > 0) {
+ if (buflen > datalen)
+ buflen = datalen;
+
+ memcpy(buffer, rka->callout_info, buflen);
+ }
+
+ return ret;
+}
+
+static void free_request_key_auth(struct request_key_auth *rka)
+{
+ if (!rka)
+ return;
+ key_put(rka->target_key);
+ key_put(rka->dest_keyring);
+ if (rka->cred)
+ put_cred(rka->cred);
+ kfree(rka->callout_info);
+ kfree(rka);
+}
+
+/*
+ * Dispose of the request_key_auth record under RCU conditions
+ */
+static void request_key_auth_rcu_disposal(struct rcu_head *rcu)
+{
+ struct request_key_auth *rka =
+ container_of(rcu, struct request_key_auth, rcu);
+
+ free_request_key_auth(rka);
+}
+
+/*
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
+ */
+static void request_key_auth_revoke(struct key *key)
+{
+ struct request_key_auth *rka = dereference_key_locked(key);
+
+ kenter("{%d}", key->serial);
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+}
+
+/*
+ * Destroy an instantiation authorisation token key.
+ */
+static void request_key_auth_destroy(struct key *key)
+{
+ struct request_key_auth *rka = rcu_access_pointer(key->payload.rcu_data0);
+
+ kenter("{%d}", key->serial);
+ if (rka) {
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&rka->rcu, request_key_auth_rcu_disposal);
+ }
+}
+
+/*
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
+ */
+struct key *request_key_auth_new(struct key *target, const char *op,
+ const void *callout_info, size_t callout_len,
+ struct key *dest_keyring)
+{
+ struct request_key_auth *rka, *irka;
+ const struct cred *cred = current_cred();
+ struct key *authkey = NULL;
+ char desc[20];
+ int ret = -ENOMEM;
+
+ kenter("%d,", target->serial);
+
+ /* allocate a auth record */
+ rka = kzalloc(sizeof(*rka), GFP_KERNEL);
+ if (!rka)
+ goto error;
+ rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL);
+ if (!rka->callout_info)
+ goto error_free_rka;
+ rka->callout_len = callout_len;
+ strlcpy(rka->op, op, sizeof(rka->op));
+
+ /* see if the calling process is already servicing the key request of
+ * another process */
+ if (cred->request_key_auth) {
+ /* it is - use that instantiation context here too */
+ down_read(&cred->request_key_auth->sem);
+
+ /* if the auth key has been revoked, then the key we're
+ * servicing is already instantiated */
+ if (test_bit(KEY_FLAG_REVOKED,
+ &cred->request_key_auth->flags)) {
+ up_read(&cred->request_key_auth->sem);
+ ret = -EKEYREVOKED;
+ goto error_free_rka;
+ }
+
+ irka = cred->request_key_auth->payload.data[0];
+ rka->cred = get_cred(irka->cred);
+ rka->pid = irka->pid;
+
+ up_read(&cred->request_key_auth->sem);
+ }
+ else {
+ /* it isn't - use this process as the context */
+ rka->cred = get_cred(cred);
+ rka->pid = current->pid;
+ }
+
+ rka->target_key = key_get(target);
+ rka->dest_keyring = key_get(dest_keyring);
+
+ /* allocate the auth key */
+ sprintf(desc, "%x", target->serial);
+
+ authkey = key_alloc(&key_type_request_key_auth, desc,
+ cred->fsuid, cred->fsgid, cred,
+ KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_POS_LINK |
+ KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(authkey)) {
+ ret = PTR_ERR(authkey);
+ goto error_free_rka;
+ }
+
+ /* construct the auth key */
+ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
+ if (ret < 0)
+ goto error_put_authkey;
+
+ kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage));
+ return authkey;
+
+error_put_authkey:
+ key_put(authkey);
+error_free_rka:
+ free_request_key_auth(rka);
+error:
+ kleave("= %d", ret);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
+ */
+struct key *key_get_instantiation_authkey(key_serial_t target_id)
+{
+ char description[16];
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_request_key_auth,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match_data.cmp = key_default_cmp,
+ .match_data.raw_data = description,
+ .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_RECURSE),
+ };
+ struct key *authkey;
+ key_ref_t authkey_ref;
+
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
+
+ rcu_read_lock();
+ authkey_ref = search_process_keyrings_rcu(&ctx);
+ rcu_read_unlock();
+
+ if (IS_ERR(authkey_ref)) {
+ authkey = ERR_CAST(authkey_ref);
+ if (authkey == ERR_PTR(-EAGAIN))
+ authkey = ERR_PTR(-ENOKEY);
+ goto error;
+ }
+
+ authkey = key_ref_to_ptr(authkey_ref);
+ if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
+ key_put(authkey);
+ authkey = ERR_PTR(-EKEYREVOKED);
+ }
+
+error:
+ return authkey;
+}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
new file mode 100644
index 000000000..b46b651b3
--- /dev/null
+++ b/security/keys/sysctl.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Key management controls
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/key.h>
+#include <linux/sysctl.h>
+#include "internal.h"
+
+struct ctl_table key_sysctls[] = {
+ {
+ .procname = "maxkeys",
+ .data = &key_quota_maxkeys,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "maxbytes",
+ .data = &key_quota_maxbytes,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "root_maxkeys",
+ .data = &key_quota_root_maxkeys,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "root_maxbytes",
+ .data = &key_quota_root_maxbytes,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ONE,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+ {
+ .procname = "gc_delay",
+ .data = &key_gc_delay,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ZERO,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ {
+ .procname = "persistent_keyring_expiry",
+ .data = &persistent_keyring_expiry,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) SYSCTL_ZERO,
+ .extra2 = (void *) SYSCTL_INT_MAX,
+ },
+#endif
+ { }
+};
diff --git a/security/keys/trusted-keys/Makefile b/security/keys/trusted-keys/Makefile
new file mode 100644
index 000000000..7b73cebbb
--- /dev/null
+++ b/security/keys/trusted-keys/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for trusted keys
+#
+
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+trusted-y += trusted_tpm1.o
+trusted-y += trusted_tpm2.o
diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
new file mode 100644
index 000000000..4c3cffcd2
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm1.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Author:
+ * David Safford <safford@us.ibm.com>
+ *
+ * See Documentation/security/keys/trusted-encrypted.rst
+ */
+
+#include <crypto/hash_info.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <linux/key-type.h>
+#include <linux/rcupdate.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/capability.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted_tpm.h>
+
+static const char hmac_alg[] = "hmac(sha1)";
+static const char hash_alg[] = "sha1";
+static struct tpm_chip *chip;
+static struct tpm_digest *digests;
+
+struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+static struct sdesc *init_sdesc(struct crypto_shash *alg)
+{
+ struct sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return ERR_PTR(-ENOMEM);
+ sdesc->shash.tfm = alg;
+ return sdesc;
+}
+
+static int TSS_sha1(const unsigned char *data, unsigned int datalen,
+ unsigned char *digest)
+{
+ struct sdesc *sdesc;
+ int ret;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+ kfree_sensitive(sdesc);
+ return ret;
+}
+
+static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, ...)
+{
+ struct sdesc *sdesc;
+ va_list argp;
+ unsigned int dlen;
+ unsigned char *data;
+ int ret;
+
+ sdesc = init_sdesc(hmacalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hmac_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_setkey(hmacalg, key, keylen);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+
+ va_start(argp, keylen);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ data = va_arg(argp, unsigned char *);
+ if (data == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, digest);
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+
+/*
+ * calculate authorization info fields to send to TPM
+ */
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, unsigned char *h1,
+ unsigned char *h2, unsigned int h3, ...)
+{
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned char *data;
+ unsigned char c;
+ int ret;
+ va_list argp;
+
+ if (!chip)
+ return -ENODEV;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ c = !!h3;
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ va_start(argp, h3);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ data = va_arg(argp, unsigned char *);
+ if (!data) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (!ret)
+ ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, h1,
+ TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(TSS_authhmac);
+
+/*
+ * verify the AUTH1_COMMAND (Seal) result from TPM
+ */
+int TSS_checkhmac1(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key,
+ unsigned int keylen, ...)
+{
+ uint32_t bufsize;
+ uint16_t tag;
+ uint32_t ordinal;
+ uint32_t result;
+ unsigned char *enonce;
+ unsigned char *continueflag;
+ unsigned char *authdata;
+ unsigned char testhmac[SHA1_DIGEST_SIZE];
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned int dpos;
+ va_list argp;
+ int ret;
+
+ if (!chip)
+ return -ENODEV;
+
+ bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+ tag = LOAD16(buffer, 0);
+ ordinal = command;
+ result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+ if (tag == TPM_TAG_RSP_COMMAND)
+ return 0;
+ if (tag != TPM_TAG_RSP_AUTH1_COMMAND)
+ return -EINVAL;
+ authdata = buffer + bufsize - SHA1_DIGEST_SIZE;
+ continueflag = authdata - 1;
+ enonce = continueflag - TPM_NONCE_SIZE;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+ sizeof result);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+ sizeof ordinal);
+ if (ret < 0)
+ goto out;
+ va_start(argp, keylen);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ dpos = va_arg(argp, unsigned int);
+ ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (ret < 0)
+ goto out;
+
+ ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest,
+ TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce,
+ 1, continueflag, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
+ ret = -EINVAL;
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(TSS_checkhmac1);
+
+/*
+ * verify the AUTH2_COMMAND (unseal) result from TPM
+ */
+static int TSS_checkhmac2(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key1,
+ unsigned int keylen1,
+ const unsigned char *key2,
+ unsigned int keylen2, ...)
+{
+ uint32_t bufsize;
+ uint16_t tag;
+ uint32_t ordinal;
+ uint32_t result;
+ unsigned char *enonce1;
+ unsigned char *continueflag1;
+ unsigned char *authdata1;
+ unsigned char *enonce2;
+ unsigned char *continueflag2;
+ unsigned char *authdata2;
+ unsigned char testhmac1[SHA1_DIGEST_SIZE];
+ unsigned char testhmac2[SHA1_DIGEST_SIZE];
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned int dpos;
+ va_list argp;
+ int ret;
+
+ bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+ tag = LOAD16(buffer, 0);
+ ordinal = command;
+ result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+
+ if (tag == TPM_TAG_RSP_COMMAND)
+ return 0;
+ if (tag != TPM_TAG_RSP_AUTH2_COMMAND)
+ return -EINVAL;
+ authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1
+ + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE);
+ authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE);
+ continueflag1 = authdata1 - 1;
+ continueflag2 = authdata2 - 1;
+ enonce1 = continueflag1 - TPM_NONCE_SIZE;
+ enonce2 = continueflag2 - TPM_NONCE_SIZE;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+ sizeof result);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+ sizeof ordinal);
+ if (ret < 0)
+ goto out;
+
+ va_start(argp, keylen2);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ dpos = va_arg(argp, unsigned int);
+ ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (ret < 0)
+ goto out;
+
+ ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, enonce1,
+ TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, enonce2,
+ TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+ ret = -EINVAL;
+out:
+ kfree_sensitive(sdesc);
+ return ret;
+}
+
+/*
+ * For key specific tpm requests, we will generate and send our
+ * own TPM command packets using the drivers send function.
+ */
+int trusted_tpm_send(unsigned char *cmd, size_t buflen)
+{
+ int rc;
+
+ if (!chip)
+ return -ENODEV;
+
+ dump_tpm_buf(cmd);
+ rc = tpm_send(chip, cmd, buflen);
+ dump_tpm_buf(cmd);
+ if (rc > 0)
+ /* Can't return positive return codes values to keyctl */
+ rc = -EPERM;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(trusted_tpm_send);
+
+/*
+ * Lock a trusted key, by extending a selected PCR.
+ *
+ * Prevents a trusted key that is sealed to PCRs from being accessed.
+ * This uses the tpm driver's extend function.
+ */
+static int pcrlock(const int pcrnum)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return tpm_pcr_extend(chip, pcrnum, digests) ? -EINVAL : 0;
+}
+
+/*
+ * Create an object specific authorisation protocol (OSAP) session
+ */
+static int osap(struct tpm_buf *tb, struct osapsess *s,
+ const unsigned char *key, uint16_t type, uint32_t handle)
+{
+ unsigned char enonce[TPM_NONCE_SIZE];
+ unsigned char ononce[TPM_NONCE_SIZE];
+ int ret;
+
+ ret = tpm_get_random(chip, ononce, TPM_NONCE_SIZE);
+ if (ret < 0)
+ return ret;
+
+ if (ret != TPM_NONCE_SIZE)
+ return -EIO;
+
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OSAP);
+ tpm_buf_append_u16(tb, type);
+ tpm_buf_append_u32(tb, handle);
+ tpm_buf_append(tb, ononce, TPM_NONCE_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+
+ s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
+ TPM_NONCE_SIZE);
+ memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
+ TPM_NONCE_SIZE]), TPM_NONCE_SIZE);
+ return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE,
+ enonce, TPM_NONCE_SIZE, ononce, 0, 0);
+}
+
+/*
+ * Create an object independent authorisation protocol (oiap) session
+ */
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+{
+ int ret;
+
+ if (!chip)
+ return -ENODEV;
+
+ tpm_buf_reset(tb, TPM_TAG_RQU_COMMAND, TPM_ORD_OIAP);
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+
+ *handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
+ TPM_NONCE_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(oiap);
+
+struct tpm_digests {
+ unsigned char encauth[SHA1_DIGEST_SIZE];
+ unsigned char pubauth[SHA1_DIGEST_SIZE];
+ unsigned char xorwork[SHA1_DIGEST_SIZE * 2];
+ unsigned char xorhash[SHA1_DIGEST_SIZE];
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+};
+
+/*
+ * Have the TPM seal(encrypt) the trusted key, possibly based on
+ * Platform Configuration Registers (PCRs). AUTH1 for sealing key.
+ */
+static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+ uint32_t keyhandle, const unsigned char *keyauth,
+ const unsigned char *data, uint32_t datalen,
+ unsigned char *blob, uint32_t *bloblen,
+ const unsigned char *blobauth,
+ const unsigned char *pcrinfo, uint32_t pcrinfosize)
+{
+ struct osapsess sess;
+ struct tpm_digests *td;
+ unsigned char cont;
+ uint32_t ordinal;
+ uint32_t pcrsize;
+ uint32_t datsize;
+ int sealinfosize;
+ int encdatasize;
+ int storedsize;
+ int ret;
+ int i;
+
+ /* alloc some work space for all the hashes */
+ td = kmalloc(sizeof *td, GFP_KERNEL);
+ if (!td)
+ return -ENOMEM;
+
+ /* get session for sealing key */
+ ret = osap(tb, &sess, keyauth, keytype, keyhandle);
+ if (ret < 0)
+ goto out;
+ dump_sess(&sess);
+
+ /* calculate encrypted authorization value */
+ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE);
+ memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
+ ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
+ if (ret < 0)
+ goto out;
+
+ ret = tpm_get_random(chip, td->nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0)
+ goto out;
+
+ if (ret != TPM_NONCE_SIZE) {
+ ret = -EIO;
+ goto out;
+ }
+
+ ordinal = htonl(TPM_ORD_SEAL);
+ datsize = htonl(datalen);
+ pcrsize = htonl(pcrinfosize);
+ cont = 0;
+
+ /* encrypt data authorization key */
+ for (i = 0; i < SHA1_DIGEST_SIZE; ++i)
+ td->encauth[i] = td->xorhash[i] ^ blobauth[i];
+
+ /* calculate authorization HMAC value */
+ if (pcrinfosize == 0) {
+ /* no pcr info specified */
+ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+ sess.enonce, td->nonceodd, cont,
+ sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+ td->encauth, sizeof(uint32_t), &pcrsize,
+ sizeof(uint32_t), &datsize, datalen, data, 0,
+ 0);
+ } else {
+ /* pcr info specified */
+ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+ sess.enonce, td->nonceodd, cont,
+ sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+ td->encauth, sizeof(uint32_t), &pcrsize,
+ pcrinfosize, pcrinfo, sizeof(uint32_t),
+ &datsize, datalen, data, 0, 0);
+ }
+ if (ret < 0)
+ goto out;
+
+ /* build and send the TPM request packet */
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH1_COMMAND, TPM_ORD_SEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, td->encauth, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, pcrinfosize);
+ tpm_buf_append(tb, pcrinfo, pcrinfosize);
+ tpm_buf_append_u32(tb, datalen);
+ tpm_buf_append(tb, data, datalen);
+ tpm_buf_append_u32(tb, sess.handle);
+ tpm_buf_append(tb, td->nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, td->pubauth, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ goto out;
+
+ /* calculate the size of the returned Blob */
+ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
+ encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
+ sizeof(uint32_t) + sealinfosize);
+ storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize +
+ sizeof(uint32_t) + encdatasize;
+
+ /* check the HMAC in the response */
+ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
+ SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0,
+ 0);
+
+ /* copy the returned blob to caller */
+ if (!ret) {
+ memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
+ *bloblen = storedsize;
+ }
+out:
+ kfree_sensitive(td);
+ return ret;
+}
+
+/*
+ * use the AUTH2_COMMAND form of unseal, to authorize both key and blob
+ */
+static int tpm_unseal(struct tpm_buf *tb,
+ uint32_t keyhandle, const unsigned char *keyauth,
+ const unsigned char *blob, int bloblen,
+ const unsigned char *blobauth,
+ unsigned char *data, unsigned int *datalen)
+{
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+ unsigned char enonce1[TPM_NONCE_SIZE];
+ unsigned char enonce2[TPM_NONCE_SIZE];
+ unsigned char authdata1[SHA1_DIGEST_SIZE];
+ unsigned char authdata2[SHA1_DIGEST_SIZE];
+ uint32_t authhandle1 = 0;
+ uint32_t authhandle2 = 0;
+ unsigned char cont = 0;
+ uint32_t ordinal;
+ int ret;
+
+ /* sessions for unsealing key and data */
+ ret = oiap(tb, &authhandle1, enonce1);
+ if (ret < 0) {
+ pr_info("trusted_key: oiap failed (%d)\n", ret);
+ return ret;
+ }
+ ret = oiap(tb, &authhandle2, enonce2);
+ if (ret < 0) {
+ pr_info("trusted_key: oiap failed (%d)\n", ret);
+ return ret;
+ }
+
+ ordinal = htonl(TPM_ORD_UNSEAL);
+ ret = tpm_get_random(chip, nonceodd, TPM_NONCE_SIZE);
+ if (ret < 0)
+ return ret;
+
+ if (ret != TPM_NONCE_SIZE) {
+ pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+ return -EIO;
+ }
+ ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
+ enonce1, nonceodd, cont, sizeof(uint32_t),
+ &ordinal, bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE,
+ enonce2, nonceodd, cont, sizeof(uint32_t),
+ &ordinal, bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* build and send TPM request packet */
+ tpm_buf_reset(tb, TPM_TAG_RQU_AUTH2_COMMAND, TPM_ORD_UNSEAL);
+ tpm_buf_append_u32(tb, keyhandle);
+ tpm_buf_append(tb, blob, bloblen);
+ tpm_buf_append_u32(tb, authhandle1);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata1, SHA1_DIGEST_SIZE);
+ tpm_buf_append_u32(tb, authhandle2);
+ tpm_buf_append(tb, nonceodd, TPM_NONCE_SIZE);
+ tpm_buf_append_u8(tb, cont);
+ tpm_buf_append(tb, authdata2, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+ if (ret < 0) {
+ pr_info("trusted_key: authhmac failed (%d)\n", ret);
+ return ret;
+ }
+
+ *datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+ ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
+ keyauth, SHA1_DIGEST_SIZE,
+ blobauth, SHA1_DIGEST_SIZE,
+ sizeof(uint32_t), TPM_DATA_OFFSET,
+ *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
+ 0);
+ if (ret < 0) {
+ pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
+ return ret;
+ }
+ memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
+ return 0;
+}
+
+/*
+ * Have the TPM seal(encrypt) the symmetric key
+ */
+static int key_seal(struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ struct tpm_buf tb;
+ int ret;
+
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
+
+ /* include migratable flag at end of sealed key */
+ p->key[p->key_len] = p->migratable;
+
+ ret = tpm_seal(&tb, o->keytype, o->keyhandle, o->keyauth,
+ p->key, p->key_len + 1, p->blob, &p->blob_len,
+ o->blobauth, o->pcrinfo, o->pcrinfo_len);
+ if (ret < 0)
+ pr_info("trusted_key: srkseal failed (%d)\n", ret);
+
+ tpm_buf_destroy(&tb);
+ return ret;
+}
+
+/*
+ * Have the TPM unseal(decrypt) the symmetric key
+ */
+static int key_unseal(struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ struct tpm_buf tb;
+ int ret;
+
+ ret = tpm_buf_init(&tb, 0, 0);
+ if (ret)
+ return ret;
+
+ ret = tpm_unseal(&tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+ o->blobauth, p->key, &p->key_len);
+ if (ret < 0)
+ pr_info("trusted_key: srkunseal failed (%d)\n", ret);
+ else
+ /* pull migratable flag out of sealed key */
+ p->migratable = p->key[--p->key_len];
+
+ tpm_buf_destroy(&tb);
+ return ret;
+}
+
+enum {
+ Opt_err,
+ Opt_new, Opt_load, Opt_update,
+ Opt_keyhandle, Opt_keyauth, Opt_blobauth,
+ Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
+ Opt_hash,
+ Opt_policydigest,
+ Opt_policyhandle,
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_keyhandle, "keyhandle=%s"},
+ {Opt_keyauth, "keyauth=%s"},
+ {Opt_blobauth, "blobauth=%s"},
+ {Opt_pcrinfo, "pcrinfo=%s"},
+ {Opt_pcrlock, "pcrlock=%s"},
+ {Opt_migratable, "migratable=%s"},
+ {Opt_hash, "hash=%s"},
+ {Opt_policydigest, "policydigest=%s"},
+ {Opt_policyhandle, "policyhandle=%s"},
+ {Opt_err, NULL}
+};
+
+/* can have zero or more token= options */
+static int getoptions(char *c, struct trusted_key_payload *pay,
+ struct trusted_key_options *opt)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p = c;
+ int token;
+ int res;
+ unsigned long handle;
+ unsigned long lock;
+ unsigned long token_mask = 0;
+ unsigned int digest_len;
+ int i;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return tpm2;
+
+ opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, key_tokens, args);
+ if (test_and_set_bit(token, &token_mask))
+ return -EINVAL;
+
+ switch (token) {
+ case Opt_pcrinfo:
+ opt->pcrinfo_len = strlen(args[0].from) / 2;
+ if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->pcrinfo, args[0].from,
+ opt->pcrinfo_len);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_keyhandle:
+ res = kstrtoul(args[0].from, 16, &handle);
+ if (res < 0)
+ return -EINVAL;
+ opt->keytype = SEAL_keytype;
+ opt->keyhandle = handle;
+ break;
+ case Opt_keyauth:
+ if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->keyauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_blobauth:
+ /*
+ * TPM 1.2 authorizations are sha1 hashes passed in as
+ * hex strings. TPM 2.0 authorizations are simple
+ * passwords (although it can take a hash as well)
+ */
+ opt->blobauth_len = strlen(args[0].from);
+
+ if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
+ res = hex2bin(opt->blobauth, args[0].from,
+ TPM_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+
+ opt->blobauth_len = TPM_DIGEST_SIZE;
+ break;
+ }
+
+ if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
+ memcpy(opt->blobauth, args[0].from,
+ opt->blobauth_len);
+ break;
+ }
+
+ return -EINVAL;
+
+ break;
+
+ case Opt_migratable:
+ if (*args[0].from == '0')
+ pay->migratable = 0;
+ else if (*args[0].from != '1')
+ return -EINVAL;
+ break;
+ case Opt_pcrlock:
+ res = kstrtoul(args[0].from, 10, &lock);
+ if (res < 0)
+ return -EINVAL;
+ opt->pcrlock = lock;
+ break;
+ case Opt_hash:
+ if (test_bit(Opt_policydigest, &token_mask))
+ return -EINVAL;
+ for (i = 0; i < HASH_ALGO__LAST; i++) {
+ if (!strcmp(args[0].from, hash_algo_name[i])) {
+ opt->hash = i;
+ break;
+ }
+ }
+ if (i == HASH_ALGO__LAST)
+ return -EINVAL;
+ if (!tpm2 && i != HASH_ALGO_SHA1) {
+ pr_info("trusted_key: TPM 1.x only supports SHA-1.\n");
+ return -EINVAL;
+ }
+ break;
+ case Opt_policydigest:
+ digest_len = hash_digest_size[opt->hash];
+ if (!tpm2 || strlen(args[0].from) != (2 * digest_len))
+ return -EINVAL;
+ res = hex2bin(opt->policydigest, args[0].from,
+ digest_len);
+ if (res < 0)
+ return -EINVAL;
+ opt->policydigest_len = digest_len;
+ break;
+ case Opt_policyhandle:
+ if (!tpm2)
+ return -EINVAL;
+ res = kstrtoul(args[0].from, 16, &handle);
+ if (res < 0)
+ return -EINVAL;
+ opt->policyhandle = handle;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ * payload and options structures
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ substring_t args[MAX_OPT_ARGS];
+ long keylen;
+ int ret = -EINVAL;
+ int key_cmd;
+ char *c;
+
+ /* main command */
+ c = strsep(&datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ key_cmd = match_token(c, key_tokens, args);
+ switch (key_cmd) {
+ case Opt_new:
+ /* first argument is key size */
+ c = strsep(&datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ ret = kstrtol(c, 10, &keylen);
+ if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+ return -EINVAL;
+ p->key_len = keylen;
+ ret = getoptions(datablob, p, o);
+ if (ret < 0)
+ return ret;
+ ret = Opt_new;
+ break;
+ case Opt_load:
+ /* first argument is sealed blob */
+ c = strsep(&datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ p->blob_len = strlen(c) / 2;
+ if (p->blob_len > MAX_BLOB_SIZE)
+ return -EINVAL;
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
+ ret = getoptions(datablob, p, o);
+ if (ret < 0)
+ return ret;
+ ret = Opt_load;
+ break;
+ case Opt_update:
+ /* all arguments are options */
+ ret = getoptions(datablob, p, o);
+ if (ret < 0)
+ return ret;
+ ret = Opt_update;
+ break;
+ case Opt_err:
+ return -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static struct trusted_key_options *trusted_options_alloc(void)
+{
+ struct trusted_key_options *options;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return NULL;
+
+ options = kzalloc(sizeof *options, GFP_KERNEL);
+ if (options) {
+ /* set any non-zero defaults */
+ options->keytype = SRK_keytype;
+
+ if (!tpm2)
+ options->keyhandle = SRKHANDLE;
+ }
+ return options;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+ struct trusted_key_payload *p = NULL;
+ int ret;
+
+ ret = key_payload_reserve(key, sizeof *p);
+ if (ret < 0)
+ return p;
+ p = kzalloc(sizeof *p, GFP_KERNEL);
+ if (p)
+ p->migratable = 1; /* migratable by default */
+ return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *payload = NULL;
+ struct trusted_key_options *options = NULL;
+ size_t datalen = prep->datalen;
+ char *datablob;
+ int ret = 0;
+ int key_cmd;
+ size_t key_len;
+ int tpm2;
+
+ tpm2 = tpm_is_tpm2(chip);
+ if (tpm2 < 0)
+ return tpm2;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+
+ options = trusted_options_alloc();
+ if (!options) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ payload = trusted_payload_alloc(key);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key_cmd = datablob_parse(datablob, payload, options);
+ if (key_cmd < 0) {
+ ret = key_cmd;
+ goto out;
+ }
+
+ if (!options->keyhandle) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dump_payload(payload);
+ dump_options(options);
+
+ switch (key_cmd) {
+ case Opt_load:
+ if (tpm2)
+ ret = tpm2_unseal_trusted(chip, payload, options);
+ else
+ ret = key_unseal(payload, options);
+ dump_payload(payload);
+ dump_options(options);
+ if (ret < 0)
+ pr_info("trusted_key: key_unseal failed (%d)\n", ret);
+ break;
+ case Opt_new:
+ key_len = payload->key_len;
+ ret = tpm_get_random(chip, payload->key, key_len);
+ if (ret < 0)
+ goto out;
+
+ if (ret != key_len) {
+ pr_info("trusted_key: key_create failed (%d)\n", ret);
+ ret = -EIO;
+ goto out;
+ }
+ if (tpm2)
+ ret = tpm2_seal_trusted(chip, payload, options);
+ else
+ ret = key_seal(payload, options);
+ if (ret < 0)
+ pr_info("trusted_key: key_seal failed (%d)\n", ret);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!ret && options->pcrlock)
+ ret = pcrlock(options->pcrlock);
+out:
+ kfree_sensitive(datablob);
+ kfree_sensitive(options);
+ if (!ret)
+ rcu_assign_keypointer(key, payload);
+ else
+ kfree_sensitive(payload);
+ return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+ struct trusted_key_payload *p;
+
+ p = container_of(rcu, struct trusted_key_payload, rcu);
+ kfree_sensitive(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *p;
+ struct trusted_key_payload *new_p;
+ struct trusted_key_options *new_o;
+ size_t datalen = prep->datalen;
+ char *datablob;
+ int ret = 0;
+
+ if (key_is_negative(key))
+ return -ENOKEY;
+ p = key->payload.data[0];
+ if (!p->migratable)
+ return -EPERM;
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ new_o = trusted_options_alloc();
+ if (!new_o) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ new_p = trusted_payload_alloc(key);
+ if (!new_p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+ ret = datablob_parse(datablob, new_p, new_o);
+ if (ret != Opt_update) {
+ ret = -EINVAL;
+ kfree_sensitive(new_p);
+ goto out;
+ }
+
+ if (!new_o->keyhandle) {
+ ret = -EINVAL;
+ kfree_sensitive(new_p);
+ goto out;
+ }
+
+ /* copy old key values, and reseal with new pcrs */
+ new_p->migratable = p->migratable;
+ new_p->key_len = p->key_len;
+ memcpy(new_p->key, p->key, p->key_len);
+ dump_payload(p);
+ dump_payload(new_p);
+
+ ret = key_seal(new_p, new_o);
+ if (ret < 0) {
+ pr_info("trusted_key: key_seal failed (%d)\n", ret);
+ kfree_sensitive(new_p);
+ goto out;
+ }
+ if (new_o->pcrlock) {
+ ret = pcrlock(new_o->pcrlock);
+ if (ret < 0) {
+ pr_info("trusted_key: pcrlock failed (%d)\n", ret);
+ kfree_sensitive(new_p);
+ goto out;
+ }
+ }
+ rcu_assign_keypointer(key, new_p);
+ call_rcu(&p->rcu, trusted_rcu_free);
+out:
+ kfree_sensitive(datablob);
+ kfree_sensitive(new_o);
+ return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char *buffer,
+ size_t buflen)
+{
+ const struct trusted_key_payload *p;
+ char *bufp;
+ int i;
+
+ p = dereference_key_locked(key);
+ if (!p)
+ return -EINVAL;
+
+ if (buffer && buflen >= 2 * p->blob_len) {
+ bufp = buffer;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ }
+ return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - clear and free the key's payload
+ */
+static void trusted_destroy(struct key *key)
+{
+ kfree_sensitive(key->payload.data[0]);
+}
+
+struct key_type key_type_trusted = {
+ .name = "trusted",
+ .instantiate = trusted_instantiate,
+ .update = trusted_update,
+ .destroy = trusted_destroy,
+ .describe = user_describe,
+ .read = trusted_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static void trusted_shash_release(void)
+{
+ if (hashalg)
+ crypto_free_shash(hashalg);
+ if (hmacalg)
+ crypto_free_shash(hmacalg);
+}
+
+static int __init trusted_shash_alloc(void)
+{
+ int ret;
+
+ hmacalg = crypto_alloc_shash(hmac_alg, 0, 0);
+ if (IS_ERR(hmacalg)) {
+ pr_info("trusted_key: could not allocate crypto %s\n",
+ hmac_alg);
+ return PTR_ERR(hmacalg);
+ }
+
+ hashalg = crypto_alloc_shash(hash_alg, 0, 0);
+ if (IS_ERR(hashalg)) {
+ pr_info("trusted_key: could not allocate crypto %s\n",
+ hash_alg);
+ ret = PTR_ERR(hashalg);
+ goto hashalg_fail;
+ }
+
+ return 0;
+
+hashalg_fail:
+ crypto_free_shash(hmacalg);
+ return ret;
+}
+
+static int __init init_digests(void)
+{
+ int i;
+
+ digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
+ GFP_KERNEL);
+ if (!digests)
+ return -ENOMEM;
+
+ for (i = 0; i < chip->nr_allocated_banks; i++)
+ digests[i].alg_id = chip->allocated_banks[i].alg_id;
+
+ return 0;
+}
+
+static int __init init_trusted(void)
+{
+ int ret;
+
+ /* encrypted_keys.ko depends on successful load of this module even if
+ * TPM is not used.
+ */
+ chip = tpm_default_chip();
+ if (!chip)
+ return 0;
+
+ ret = init_digests();
+ if (ret < 0)
+ goto err_put;
+ ret = trusted_shash_alloc();
+ if (ret < 0)
+ goto err_free;
+ ret = register_key_type(&key_type_trusted);
+ if (ret < 0)
+ goto err_release;
+ return 0;
+err_release:
+ trusted_shash_release();
+err_free:
+ kfree(digests);
+err_put:
+ put_device(&chip->dev);
+ return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+ if (chip) {
+ put_device(&chip->dev);
+ kfree(digests);
+ trusted_shash_release();
+ unregister_key_type(&key_type_trusted);
+ }
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
new file mode 100644
index 000000000..65f688564
--- /dev/null
+++ b/security/keys/trusted-keys/trusted_tpm2.c
@@ -0,0 +1,331 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 IBM Corporation
+ * Copyright (C) 2014 Intel Corporation
+ */
+
+#include <linux/string.h>
+#include <linux/err.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include <keys/trusted-type.h>
+#include <keys/trusted_tpm.h>
+
+static struct tpm2_hash tpm2_hash_map[] = {
+ {HASH_ALGO_SHA1, TPM_ALG_SHA1},
+ {HASH_ALGO_SHA256, TPM_ALG_SHA256},
+ {HASH_ALGO_SHA384, TPM_ALG_SHA384},
+ {HASH_ALGO_SHA512, TPM_ALG_SHA512},
+ {HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
+};
+
+/**
+ * tpm2_buf_append_auth() - append TPMS_AUTH_COMMAND to the buffer.
+ *
+ * @buf: an allocated tpm_buf instance
+ * @session_handle: session handle
+ * @nonce: the session nonce, may be NULL if not used
+ * @nonce_len: the session nonce length, may be 0 if not used
+ * @attributes: the session attributes
+ * @hmac: the session HMAC or password, may be NULL if not used
+ * @hmac_len: the session HMAC or password length, maybe 0 if not used
+ */
+static void tpm2_buf_append_auth(struct tpm_buf *buf, u32 session_handle,
+ const u8 *nonce, u16 nonce_len,
+ u8 attributes,
+ const u8 *hmac, u16 hmac_len)
+{
+ tpm_buf_append_u32(buf, 9 + nonce_len + hmac_len);
+ tpm_buf_append_u32(buf, session_handle);
+ tpm_buf_append_u16(buf, nonce_len);
+
+ if (nonce && nonce_len)
+ tpm_buf_append(buf, nonce, nonce_len);
+
+ tpm_buf_append_u8(buf, attributes);
+ tpm_buf_append_u16(buf, hmac_len);
+
+ if (hmac && hmac_len)
+ tpm_buf_append(buf, hmac, hmac_len);
+}
+
+/**
+ * tpm2_seal_trusted() - seal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: < 0 on error and 0 on success.
+ */
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ unsigned int blob_len;
+ struct tpm_buf buf;
+ u32 hash;
+ int i;
+ int rc;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) {
+ if (options->hash == tpm2_hash_map[i].crypto_id) {
+ hash = tpm2_hash_map[i].tpm_id;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(tpm2_hash_map))
+ return -EINVAL;
+
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
+ if (rc) {
+ tpm_put_ops(chip);
+ return rc;
+ }
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ /* sensitive */
+ tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
+
+ tpm_buf_append_u16(&buf, options->blobauth_len);
+ if (options->blobauth_len)
+ tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
+
+ tpm_buf_append_u16(&buf, payload->key_len + 1);
+ tpm_buf_append(&buf, payload->key, payload->key_len);
+ tpm_buf_append_u8(&buf, payload->migratable);
+
+ /* public */
+ tpm_buf_append_u16(&buf, 14 + options->policydigest_len);
+ tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH);
+ tpm_buf_append_u16(&buf, hash);
+
+ /* policy */
+ if (options->policydigest_len) {
+ tpm_buf_append_u32(&buf, 0);
+ tpm_buf_append_u16(&buf, options->policydigest_len);
+ tpm_buf_append(&buf, options->policydigest,
+ options->policydigest_len);
+ } else {
+ tpm_buf_append_u32(&buf, TPM2_OA_USER_WITH_AUTH);
+ tpm_buf_append_u16(&buf, 0);
+ }
+
+ /* public parameters */
+ tpm_buf_append_u16(&buf, TPM_ALG_NULL);
+ tpm_buf_append_u16(&buf, 0);
+
+ /* outside info */
+ tpm_buf_append_u16(&buf, 0);
+
+ /* creation PCR */
+ tpm_buf_append_u32(&buf, 0);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data");
+ if (rc)
+ goto out;
+
+ blob_len = be32_to_cpup((__be32 *) &buf.data[TPM_HEADER_SIZE]);
+ if (blob_len > MAX_BLOB_SIZE) {
+ rc = -E2BIG;
+ goto out;
+ }
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 4 + blob_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ memcpy(payload->blob, &buf.data[TPM_HEADER_SIZE + 4], blob_len);
+ payload->blob_len = blob_len;
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0) {
+ if (tpm2_rc_value(rc) == TPM2_RC_HASH)
+ rc = -EINVAL;
+ else
+ rc = -EPERM;
+ }
+
+ tpm_put_ops(chip);
+ return rc;
+}
+
+/**
+ * tpm2_load_cmd() - execute a TPM2_Load command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: returned blob handle
+ *
+ * Return: 0 on success.
+ * -E2BIG on wrong payload size.
+ * -EPERM on tpm error status.
+ * < 0 error from tpm_send.
+ */
+static int tpm2_load_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 *blob_handle)
+{
+ struct tpm_buf buf;
+ unsigned int private_len;
+ unsigned int public_len;
+ unsigned int blob_len;
+ int rc;
+
+ private_len = be16_to_cpup((__be16 *) &payload->blob[0]);
+ if (private_len > (payload->blob_len - 2))
+ return -E2BIG;
+
+ public_len = be16_to_cpup((__be16 *) &payload->blob[2 + private_len]);
+ blob_len = private_len + public_len + 4;
+ if (blob_len > payload->blob_len)
+ return -E2BIG;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_LOAD);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, options->keyhandle);
+ tpm2_buf_append_auth(&buf, TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ 0 /* session_attributes */,
+ options->keyauth /* hmac */,
+ TPM_DIGEST_SIZE);
+
+ tpm_buf_append(&buf, payload->blob, blob_len);
+
+ if (buf.flags & TPM_BUF_OVERFLOW) {
+ rc = -E2BIG;
+ goto out;
+ }
+
+ rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob");
+ if (!rc)
+ *blob_handle = be32_to_cpup(
+ (__be32 *) &buf.data[TPM_HEADER_SIZE]);
+
+out:
+ tpm_buf_destroy(&buf);
+
+ if (rc > 0)
+ rc = -EPERM;
+
+ return rc;
+}
+
+/**
+ * tpm2_unseal_cmd() - execute a TPM2_Unload command
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ * @blob_handle: blob handle
+ *
+ * Return: 0 on success
+ * -EPERM on tpm error status
+ * < 0 error from tpm_send
+ */
+static int tpm2_unseal_cmd(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options,
+ u32 blob_handle)
+{
+ struct tpm_buf buf;
+ u16 data_len;
+ u8 *data;
+ int rc;
+
+ rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_UNSEAL);
+ if (rc)
+ return rc;
+
+ tpm_buf_append_u32(&buf, blob_handle);
+ tpm2_buf_append_auth(&buf,
+ options->policyhandle ?
+ options->policyhandle : TPM2_RS_PW,
+ NULL /* nonce */, 0,
+ TPM2_SA_CONTINUE_SESSION,
+ options->blobauth /* hmac */,
+ options->blobauth_len);
+
+ rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
+ if (rc > 0)
+ rc = -EPERM;
+
+ if (!rc) {
+ data_len = be16_to_cpup(
+ (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
+ if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (tpm_buf_length(&buf) < TPM_HEADER_SIZE + 6 + data_len) {
+ rc = -EFAULT;
+ goto out;
+ }
+ data = &buf.data[TPM_HEADER_SIZE + 6];
+
+ memcpy(payload->key, data, data_len - 1);
+ payload->key_len = data_len - 1;
+ payload->migratable = data[data_len - 1];
+ }
+
+out:
+ tpm_buf_destroy(&buf);
+ return rc;
+}
+
+/**
+ * tpm2_unseal_trusted() - unseal the payload of a trusted key
+ *
+ * @chip: TPM chip to use
+ * @payload: the key data in clear and encrypted form
+ * @options: authentication values and other options
+ *
+ * Return: Same as with tpm_send.
+ */
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options)
+{
+ u32 blob_handle;
+ int rc;
+
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm2_load_cmd(chip, payload, options, &blob_handle);
+ if (rc)
+ goto out;
+
+ rc = tpm2_unseal_cmd(chip, payload, options, blob_handle);
+ tpm2_flush_context(chip, blob_handle);
+
+out:
+ tpm_put_ops(chip);
+
+ return rc;
+}
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
new file mode 100644
index 000000000..749e2a4dc
--- /dev/null
+++ b/security/keys/user_defined.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* user_defined.c: user defined key type
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <linux/uaccess.h>
+#include "internal.h"
+
+static int logon_vet_description(const char *desc);
+
+/*
+ * user defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_user = {
+ .name = "user",
+ .preparse = user_preparse,
+ .free_preparse = user_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .update = user_update,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_user);
+
+/*
+ * This key type is essentially the same as key_type_user, but it does
+ * not define a .read op. This is suitable for storing username and
+ * password pairs in the keyring that you do not want to be readable
+ * from userspace.
+ */
+struct key_type key_type_logon = {
+ .name = "logon",
+ .preparse = user_preparse,
+ .free_preparse = user_free_preparse,
+ .instantiate = generic_key_instantiate,
+ .update = user_update,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .vet_description = logon_vet_description,
+};
+EXPORT_SYMBOL_GPL(key_type_logon);
+
+/*
+ * Preparse a user defined key payload
+ */
+int user_preparse(struct key_preparsed_payload *prep)
+{
+ struct user_key_payload *upayload;
+ size_t datalen = prep->datalen;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
+ if (!upayload)
+ return -ENOMEM;
+
+ /* attach the data */
+ prep->quotalen = datalen;
+ prep->payload.data[0] = upayload;
+ upayload->datalen = datalen;
+ memcpy(upayload->data, prep->data, datalen);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(user_preparse);
+
+/*
+ * Free a preparse of a user defined key payload
+ */
+void user_free_preparse(struct key_preparsed_payload *prep)
+{
+ kfree_sensitive(prep->payload.data[0]);
+}
+EXPORT_SYMBOL_GPL(user_free_preparse);
+
+static void user_free_payload_rcu(struct rcu_head *head)
+{
+ struct user_key_payload *payload;
+
+ payload = container_of(head, struct user_key_payload, rcu);
+ kfree_sensitive(payload);
+}
+
+/*
+ * update a user defined key
+ * - the key's semaphore is write-locked
+ */
+int user_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct user_key_payload *zap = NULL;
+ int ret;
+
+ /* check the quota and attach the new data */
+ ret = key_payload_reserve(key, prep->datalen);
+ if (ret < 0)
+ return ret;
+
+ /* attach the new data, displacing the old */
+ key->expiry = prep->expiry;
+ if (key_is_positive(key))
+ zap = dereference_key_locked(key);
+ rcu_assign_keypointer(key, prep->payload.data[0]);
+ prep->payload.data[0] = NULL;
+
+ if (zap)
+ call_rcu(&zap->rcu, user_free_payload_rcu);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(user_update);
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void user_revoke(struct key *key)
+{
+ struct user_key_payload *upayload = user_key_payload_locked(key);
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+
+ if (upayload) {
+ rcu_assign_keypointer(key, NULL);
+ call_rcu(&upayload->rcu, user_free_payload_rcu);
+ }
+}
+
+EXPORT_SYMBOL(user_revoke);
+
+/*
+ * dispose of the data dangling from the corpse of a user key
+ */
+void user_destroy(struct key *key)
+{
+ struct user_key_payload *upayload = key->payload.data[0];
+
+ kfree_sensitive(upayload);
+}
+
+EXPORT_SYMBOL_GPL(user_destroy);
+
+/*
+ * describe the user key
+ */
+void user_describe(const struct key *key, struct seq_file *m)
+{
+ seq_puts(m, key->description);
+ if (key_is_positive(key))
+ seq_printf(m, ": %u", key->datalen);
+}
+
+EXPORT_SYMBOL_GPL(user_describe);
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long user_read(const struct key *key, char *buffer, size_t buflen)
+{
+ const struct user_key_payload *upayload;
+ long ret;
+
+ upayload = user_key_payload_locked(key);
+ ret = upayload->datalen;
+
+ /* we can return the data as is */
+ if (buffer && buflen > 0) {
+ if (buflen > upayload->datalen)
+ buflen = upayload->datalen;
+
+ memcpy(buffer, upayload->data, buflen);
+ }
+
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(user_read);
+
+/* Vet the description for a "logon" key */
+static int logon_vet_description(const char *desc)
+{
+ char *p;
+
+ /* require a "qualified" description string */
+ p = strchr(desc, ':');
+ if (!p)
+ return -EINVAL;
+
+ /* also reject description with ':' as first char */
+ if (p == desc)
+ return -EINVAL;
+
+ return 0;
+}