summaryrefslogtreecommitdiffstats
path: root/security/integrity
diff options
context:
space:
mode:
Diffstat (limited to 'security/integrity')
-rw-r--r--security/integrity/Kconfig72
-rw-r--r--security/integrity/Makefile16
-rw-r--r--security/integrity/digsig.c149
-rw-r--r--security/integrity/digsig_asymmetric.c140
-rw-r--r--security/integrity/evm/Kconfig73
-rw-r--r--security/integrity/evm/Makefile7
-rw-r--r--security/integrity/evm/evm.h71
-rw-r--r--security/integrity/evm/evm_crypto.c379
-rw-r--r--security/integrity/evm/evm_main.c604
-rw-r--r--security/integrity/evm/evm_posix_acl.c27
-rw-r--r--security/integrity/evm/evm_secfs.c327
-rw-r--r--security/integrity/iint.c234
-rw-r--r--security/integrity/ima/Kconfig288
-rw-r--r--security/integrity/ima/Makefile13
-rw-r--r--security/integrity/ima/ima.h325
-rw-r--r--security/integrity/ima/ima_api.c377
-rw-r--r--security/integrity/ima/ima_appraise.c466
-rw-r--r--security/integrity/ima/ima_crypto.c698
-rw-r--r--security/integrity/ima/ima_fs.c508
-rw-r--r--security/integrity/ima/ima_init.c135
-rw-r--r--security/integrity/ima/ima_kexec.c173
-rw-r--r--security/integrity/ima/ima_main.c565
-rw-r--r--security/integrity/ima/ima_mok.c54
-rw-r--r--security/integrity/ima/ima_policy.c1256
-rw-r--r--security/integrity/ima/ima_queue.c214
-rw-r--r--security/integrity/ima/ima_template.c439
-rw-r--r--security/integrity/ima/ima_template_lib.c408
-rw-r--r--security/integrity/ima/ima_template_lib.h45
-rw-r--r--security/integrity/integrity.h224
-rw-r--r--security/integrity/integrity_audit.c63
30 files changed, 8350 insertions, 0 deletions
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 000000000..da9565891
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,72 @@
+#
+config INTEGRITY
+ bool "Integrity subsystem"
+ depends on SECURITY
+ default y
+ help
+ This option enables the integrity subsystem, which is comprised
+ of a number of different components including the Integrity
+ Measurement Architecture (IMA), Extended Verification Module
+ (EVM), IMA-appraisal extension, digital signature verification
+ extension and audit measurement log support.
+
+ Each of these components can be enabled/disabled separately.
+ Refer to the individual components for additional details.
+
+if INTEGRITY
+
+config INTEGRITY_SIGNATURE
+ bool "Digital signature verification using multiple keyrings"
+ depends on KEYS
+ default n
+ select SIGNATURE
+ help
+ This option enables digital signature verification support
+ using multiple keyrings. It defines separate keyrings for each
+ of the different use cases - evm, ima, and modules.
+ Different keyrings improves search performance, but also allow
+ to "lock" certain keyring to prevent adding new keys.
+ This is useful for evm and module keyrings, when keys are
+ usually only added from initramfs.
+
+config INTEGRITY_ASYMMETRIC_KEYS
+ bool "Enable asymmetric keys support"
+ depends on INTEGRITY_SIGNATURE
+ default n
+ select ASYMMETRIC_KEY_TYPE
+ select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select CRYPTO_RSA
+ select X509_CERTIFICATE_PARSER
+ help
+ This option enables digital signature verification using
+ asymmetric keys.
+
+config INTEGRITY_TRUSTED_KEYRING
+ bool "Require all keys on the integrity keyrings be signed"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ default y
+ help
+ This option requires that all keys added to the .ima and
+ .evm keyrings be signed by a key on the system trusted
+ keyring.
+
+config INTEGRITY_AUDIT
+ bool "Enables integrity auditing support "
+ depends on AUDIT
+ default y
+ help
+ In addition to enabling integrity auditing support, this
+ option adds a kernel parameter 'integrity_audit', which
+ controls the level of integrity auditing messages.
+ 0 - basic integrity auditing messages (default)
+ 1 - additional integrity auditing messages
+
+ Additional informational integrity auditing messages would
+ be enabled by specifying 'integrity_audit=1' on the kernel
+ command line.
+
+source security/integrity/ima/Kconfig
+source security/integrity/evm/Kconfig
+
+endif # if INTEGRITY
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 000000000..04d6e462b
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+
+integrity-y := iint.o
+integrity-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
+integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
+integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
+
+subdir-$(CONFIG_IMA) += ima
+obj-$(CONFIG_IMA) += ima/
+subdir-$(CONFIG_EVM) += evm
+obj-$(CONFIG_EVM) += evm/
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
new file mode 100644
index 000000000..9bb0a7f28
--- /dev/null
+++ b/security/integrity/digsig.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/key-type.h>
+#include <linux/digsig.h>
+#include <linux/vmalloc.h>
+#include <crypto/public_key.h>
+#include <keys/system_keyring.h>
+
+#include "integrity.h"
+
+static struct key *keyring[INTEGRITY_KEYRING_MAX];
+
+static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
+#ifndef CONFIG_INTEGRITY_TRUSTED_KEYRING
+ "_evm",
+ "_ima",
+#else
+ ".evm",
+ ".ima",
+#endif
+ "_module",
+};
+
+#ifdef CONFIG_INTEGRITY_TRUSTED_KEYRING
+static bool init_keyring __initdata = true;
+#else
+static bool init_keyring __initdata;
+#endif
+
+#ifdef CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+#define restrict_link_to_ima restrict_link_by_builtin_and_secondary_trusted
+#else
+#define restrict_link_to_ima restrict_link_by_builtin_trusted
+#endif
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ if (id >= INTEGRITY_KEYRING_MAX || siglen < 2)
+ return -EINVAL;
+
+ if (!keyring[id]) {
+ keyring[id] =
+ request_key(&key_type_keyring, keyring_name[id], NULL);
+ if (IS_ERR(keyring[id])) {
+ int err = PTR_ERR(keyring[id]);
+ pr_err("no %s keyring: %d\n", keyring_name[id], err);
+ keyring[id] = NULL;
+ return err;
+ }
+ }
+
+ switch (sig[1]) {
+ case 1:
+ /* v1 API expect signature without xattr type */
+ return digsig_verify(keyring[id], sig + 1, siglen - 1,
+ digest, digestlen);
+ case 2:
+ return asymmetric_verify(keyring[id], sig, siglen,
+ digest, digestlen);
+ }
+
+ return -EOPNOTSUPP;
+}
+
+int __init integrity_init_keyring(const unsigned int id)
+{
+ const struct cred *cred = current_cred();
+ struct key_restriction *restriction;
+ int err = 0;
+
+ if (!init_keyring)
+ return 0;
+
+ restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+ if (!restriction)
+ return -ENOMEM;
+
+ restriction->check = restrict_link_to_ima;
+
+ keyring[id] = keyring_alloc(keyring_name[id], KUIDT_INIT(0),
+ KGIDT_INIT(0), cred,
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_WRITE | KEY_USR_SEARCH),
+ KEY_ALLOC_NOT_IN_QUOTA,
+ restriction, NULL);
+ if (IS_ERR(keyring[id])) {
+ err = PTR_ERR(keyring[id]);
+ pr_info("Can't allocate %s keyring (%d)\n",
+ keyring_name[id], err);
+ keyring[id] = NULL;
+ }
+ return err;
+}
+
+int __init integrity_load_x509(const unsigned int id, const char *path)
+{
+ key_ref_t key;
+ void *data;
+ loff_t size;
+ int rc;
+
+ if (!keyring[id])
+ return -EINVAL;
+
+ rc = kernel_read_file_from_path(path, &data, &size, 0,
+ READING_X509_CERTIFICATE);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
+ return rc;
+ }
+
+ key = key_create_or_update(make_key_ref(keyring[id], 1),
+ "asymmetric",
+ NULL,
+ data,
+ size,
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(key)) {
+ rc = PTR_ERR(key);
+ pr_err("Problem loading X.509 certificate (%d): %s\n",
+ rc, path);
+ } else {
+ pr_notice("Loaded X.509 cert '%s': %s\n",
+ key_ref_to_ptr(key)->description, path);
+ key_ref_put(key);
+ }
+ vfree(data);
+ return 0;
+}
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
new file mode 100644
index 000000000..6dc075144
--- /dev/null
+++ b/security/integrity/digsig_asymmetric.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/ratelimit.h>
+#include <linux/key-type.h>
+#include <crypto/public_key.h>
+#include <crypto/hash_info.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "integrity.h"
+
+/*
+ * Request an asymmetric key.
+ */
+static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
+{
+ struct key *key;
+ char name[12];
+
+ sprintf(name, "id:%08x", keyid);
+
+ pr_debug("key search: \"%s\"\n", name);
+
+ key = get_ima_blacklist_keyring();
+ if (key) {
+ key_ref_t kref;
+
+ kref = keyring_search(make_key_ref(key, 1),
+ &key_type_asymmetric, name);
+ if (!IS_ERR(kref)) {
+ pr_err("Key '%s' is in ima_blacklist_keyring\n", name);
+ return ERR_PTR(-EKEYREJECTED);
+ }
+ }
+
+ if (keyring) {
+ /* search in specific keyring */
+ key_ref_t kref;
+
+ kref = keyring_search(make_key_ref(keyring, 1),
+ &key_type_asymmetric, name);
+ if (IS_ERR(kref))
+ key = ERR_CAST(kref);
+ else
+ key = key_ref_to_ptr(kref);
+ } else {
+ key = request_key(&key_type_asymmetric, name, NULL);
+ }
+
+ if (IS_ERR(key)) {
+ pr_err_ratelimited("Request for unknown key '%s' err %ld\n",
+ name, PTR_ERR(key));
+ switch (PTR_ERR(key)) {
+ /* Hide some search errors */
+ case -EACCES:
+ case -ENOTDIR:
+ case -EAGAIN:
+ return ERR_PTR(-ENOKEY);
+ default:
+ return key;
+ }
+ }
+
+ pr_debug("%s() = 0 [%x]\n", __func__, key_serial(key));
+
+ return key;
+}
+
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ struct public_key_signature pks;
+ struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+ struct key *key;
+ int ret = -ENOMEM;
+
+ if (siglen <= sizeof(*hdr))
+ return -EBADMSG;
+
+ siglen -= sizeof(*hdr);
+
+ if (siglen != be16_to_cpu(hdr->sig_size))
+ return -EBADMSG;
+
+ if (hdr->hash_algo >= HASH_ALGO__LAST)
+ return -ENOPKG;
+
+ key = request_asymmetric_key(keyring, be32_to_cpu(hdr->keyid));
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ memset(&pks, 0, sizeof(pks));
+
+ pks.pkey_algo = "rsa";
+ pks.hash_algo = hash_algo_name[hdr->hash_algo];
+ pks.digest = (u8 *)data;
+ pks.digest_size = datalen;
+ pks.s = hdr->sig;
+ pks.s_size = siglen;
+ ret = verify_signature(key, &pks);
+ key_put(key);
+ pr_debug("%s() = %d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * integrity_kernel_module_request - prevent crypto-pkcs1pad(rsa,*) requests
+ * @kmod_name: kernel module name
+ *
+ * We have situation, when public_key_verify_signature() in case of RSA
+ * algorithm use alg_name to store internal information in order to
+ * construct an algorithm on the fly, but crypto_larval_lookup() will try
+ * to use alg_name in order to load kernel module with same name.
+ * Since we don't have any real "crypto-pkcs1pad(rsa,*)" kernel modules,
+ * we are safe to fail such module request from crypto_larval_lookup().
+ *
+ * In this way we prevent modprobe execution during digsig verification
+ * and avoid possible deadlock if modprobe and/or it's dependencies
+ * also signed with digsig.
+ */
+int integrity_kernel_module_request(char *kmod_name)
+{
+ if (strncmp(kmod_name, "crypto-pkcs1pad(rsa,", 20) == 0)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 000000000..60221852b
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,73 @@
+config EVM
+ bool "EVM support"
+ select KEYS
+ select ENCRYPTED_KEYS
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ default n
+ help
+ EVM protects a file's security extended attributes against
+ integrity attacks.
+
+ If you are unsure how to answer this question, answer N.
+
+config EVM_ATTR_FSUUID
+ bool "FSUUID (version 2)"
+ default y
+ depends on EVM
+ help
+ Include filesystem UUID for HMAC calculation.
+
+ Default value is 'selected', which is former version 2.
+ if 'not selected', it is former version 1
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+config EVM_EXTRA_SMACK_XATTRS
+ bool "Additional SMACK xattrs"
+ depends on EVM && SECURITY_SMACK
+ default n
+ help
+ Include additional SMACK xattrs for HMAC calculation.
+
+ In addition to the original security xattrs (eg. security.selinux,
+ security.SMACK64, security.capability, and security.ima) included
+ in the HMAC calculation, enabling this option includes newly defined
+ Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and
+ security.SMACK64MMAP.
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+config EVM_ADD_XATTRS
+ bool "Add additional EVM extended attributes at runtime"
+ depends on EVM
+ default n
+ help
+ Allow userland to provide additional xattrs for HMAC calculation.
+
+ When this option is enabled, root can add additional xattrs to the
+ list used by EVM by writing them into
+ /sys/kernel/security/integrity/evm/evm_xattrs.
+
+config EVM_LOAD_X509
+ bool "Load an X509 certificate onto the '.evm' trusted keyring"
+ depends on EVM && INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ Load an X509 certificate onto the '.evm' trusted keyring.
+
+ This option enables X509 certificate loading from the kernel
+ onto the '.evm' trusted keyring. A public key can be used to
+ verify EVM integrity starting from the 'init' process.
+
+config EVM_X509_PATH
+ string "EVM X509 certificate path"
+ depends on EVM_LOAD_X509
+ default "/etc/keys/x509_evm.der"
+ help
+ This option defines X509 certificate path.
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 000000000..7393c415a
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 000000000..c3f437f5d
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm.h
+ *
+ */
+
+#ifndef __INTEGRITY_EVM_H
+#define __INTEGRITY_EVM_H
+
+#include <linux/xattr.h>
+#include <linux/security.h>
+
+#include "../integrity.h"
+
+#define EVM_INIT_HMAC 0x0001
+#define EVM_INIT_X509 0x0002
+#define EVM_ALLOW_METADATA_WRITES 0x0004
+#define EVM_SETUP_COMPLETE 0x80000000 /* userland has signaled key load */
+
+#define EVM_KEY_MASK (EVM_INIT_HMAC | EVM_INIT_X509)
+#define EVM_INIT_MASK (EVM_INIT_HMAC | EVM_INIT_X509 | EVM_SETUP_COMPLETE | \
+ EVM_ALLOW_METADATA_WRITES)
+
+struct xattr_list {
+ struct list_head list;
+ char *name;
+};
+
+extern int evm_initialized;
+
+#define EVM_ATTR_FSUUID 0x0001
+
+extern int evm_hmac_attrs;
+
+extern struct crypto_shash *hmac_tfm;
+extern struct crypto_shash *hash_tfm;
+
+/* List of EVM protected security xattrs */
+extern struct list_head evm_config_xattrnames;
+
+struct evm_digest {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+} __packed;
+
+int evm_init_key(void);
+int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, struct evm_digest *data);
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char type,
+ struct evm_digest *data);
+int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+int evm_init_secfs(void);
+
+#endif
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 000000000..fc142d04d
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_crypto.c
+ * Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static int evmkey_len = MAX_KEY_SIZE;
+
+struct crypto_shash *hmac_tfm;
+static struct crypto_shash *evm_tfm[HASH_ALGO__LAST];
+
+static DEFINE_MUTEX(mutex);
+
+#define EVM_SET_KEY_BUSY 0
+
+static unsigned long evm_set_key_flags;
+
+static char * const evm_hmac = "hmac(sha1)";
+
+/**
+ * evm_set_key() - set EVM HMAC key from the kernel
+ * @key: pointer to a buffer with the key data
+ * @size: length of the key data
+ *
+ * This function allows setting the EVM HMAC key from the kernel
+ * without using the "encrypted" key subsystem keys. It can be used
+ * by the crypto HW kernel module which has its own way of managing
+ * keys.
+ *
+ * key length should be between 32 and 128 bytes long
+ */
+int evm_set_key(void *key, size_t keylen)
+{
+ int rc;
+
+ rc = -EBUSY;
+ if (test_and_set_bit(EVM_SET_KEY_BUSY, &evm_set_key_flags))
+ goto busy;
+ rc = -EINVAL;
+ if (keylen > MAX_KEY_SIZE)
+ goto inval;
+ memcpy(evmkey, key, keylen);
+ evm_initialized |= EVM_INIT_HMAC;
+ pr_info("key initialized\n");
+ return 0;
+inval:
+ clear_bit(EVM_SET_KEY_BUSY, &evm_set_key_flags);
+busy:
+ pr_err("key initialization failed\n");
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_set_key);
+
+static struct shash_desc *init_desc(char type, uint8_t hash_algo)
+{
+ long rc;
+ const char *algo;
+ struct crypto_shash **tfm;
+ struct shash_desc *desc;
+
+ if (type == EVM_XATTR_HMAC) {
+ if (!(evm_initialized & EVM_INIT_HMAC)) {
+ pr_err_once("HMAC key is not set\n");
+ return ERR_PTR(-ENOKEY);
+ }
+ tfm = &hmac_tfm;
+ algo = evm_hmac;
+ } else {
+ if (hash_algo >= HASH_ALGO__LAST)
+ return ERR_PTR(-EINVAL);
+
+ tfm = &evm_tfm[hash_algo];
+ algo = hash_algo_name[hash_algo];
+ }
+
+ if (IS_ERR_OR_NULL(*tfm)) {
+ mutex_lock(&mutex);
+ if (*tfm)
+ goto out;
+ *tfm = crypto_alloc_shash(algo, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_NOLOAD);
+ if (IS_ERR(*tfm)) {
+ rc = PTR_ERR(*tfm);
+ pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
+ *tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ if (type == EVM_XATTR_HMAC) {
+ rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len);
+ if (rc) {
+ crypto_free_shash(*tfm);
+ *tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ }
+out:
+ mutex_unlock(&mutex);
+ }
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->tfm = *tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rc = crypto_shash_init(desc);
+ if (rc) {
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+ return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ char type, char *digest)
+{
+ struct h_misc {
+ unsigned long ino;
+ __u32 generation;
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof(hmac_misc));
+ /* Don't include the inode or generation number in portable
+ * signatures
+ */
+ if (type != EVM_XATTR_PORTABLE_DIGSIG) {
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ }
+ /* The hmac uid and gid must be encoded in the initial user
+ * namespace (not the filesystems user namespace) as encoding
+ * them in the filesystems user namespace allows an attack
+ * where first they are written in an unprivileged fuse mount
+ * of a filesystem and then the system is tricked to mount the
+ * filesystem for real on next boot and trust it because
+ * everything is signed.
+ */
+ hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
+ if ((evm_hmac_attrs & EVM_ATTR_FSUUID) &&
+ type != EVM_XATTR_PORTABLE_DIGSIG)
+ crypto_shash_update(desc, &inode->i_sb->s_uuid.b[0],
+ sizeof(inode->i_sb->s_uuid));
+ crypto_shash_final(desc, digest);
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+static int evm_calc_hmac_or_hash(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len,
+ uint8_t type, struct evm_digest *data)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct xattr_list *xattr;
+ struct shash_desc *desc;
+ size_t xattr_size = 0;
+ char *xattr_value = NULL;
+ int error;
+ int size;
+ bool ima_present = false;
+
+ if (!(inode->i_opflags & IOP_XATTR) ||
+ inode->i_sb->s_user_ns != &init_user_ns)
+ return -EOPNOTSUPP;
+
+ desc = init_desc(type, data->hdr.algo);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ data->hdr.length = crypto_shash_digestsize(desc->tfm);
+
+ error = -ENODATA;
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ bool is_ima = false;
+
+ if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
+ is_ima = true;
+
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(xattr->name, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
+ if (is_ima)
+ ima_present = true;
+ continue;
+ }
+ size = vfs_getxattr_alloc(dentry, xattr->name,
+ &xattr_value, xattr_size, GFP_NOFS);
+ if (size == -ENOMEM) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (size < 0)
+ continue;
+
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ if (is_ima)
+ ima_present = true;
+ }
+ hmac_add_misc(desc, inode, type, data->digest);
+
+ /* Portable EVM signatures must include an IMA hash */
+ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present)
+ error = -EPERM;
+out:
+ kfree(xattr_value);
+ kfree(desc);
+ return error;
+}
+
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ struct evm_digest *data)
+{
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+ req_xattr_value_len, EVM_XATTR_HMAC, data);
+}
+
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char type, struct evm_digest *data)
+{
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+ req_xattr_value_len, type, data);
+}
+
+static int evm_is_immutable(struct dentry *dentry, struct inode *inode)
+{
+ const struct evm_ima_xattr_data *xattr_data = NULL;
+ struct integrity_iint_cache *iint;
+ int rc = 0;
+
+ iint = integrity_iint_find(inode);
+ if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG))
+ return 1;
+
+ /* Do this the hard way */
+ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0) {
+ if (rc == -ENODATA)
+ return 0;
+ return rc;
+ }
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG)
+ rc = 1;
+ else
+ rc = 0;
+
+ kfree(xattr_data);
+ return rc;
+}
+
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct evm_digest data;
+ int rc = 0;
+
+ /*
+ * Don't permit any transformation of the EVM xattr if the signature
+ * is of an immutable type
+ */
+ rc = evm_is_immutable(dentry, inode);
+ if (rc < 0)
+ return rc;
+ if (rc)
+ return -EPERM;
+
+ data.hdr.algo = HASH_ALGO_SHA1;
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, &data);
+ if (rc == 0) {
+ data.hdr.xattr.sha1.type = EVM_XATTR_HMAC;
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &data.hdr.xattr.data[1],
+ SHA1_DIGEST_SIZE + 1, 0);
+ } else if (rc == -ENODATA && (inode->i_opflags & IOP_XATTR)) {
+ rc = __vfs_removexattr(dentry, XATTR_NAME_EVM);
+ }
+ return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ char *hmac_val)
+{
+ struct shash_desc *desc;
+
+ desc = init_desc(EVM_XATTR_HMAC, HASH_ALGO_SHA1);
+ if (IS_ERR(desc)) {
+ pr_info("init_desc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+ hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val);
+ kfree(desc);
+ return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+ struct key *evm_key;
+ struct encrypted_key_payload *ekp;
+ int rc;
+
+ evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+ if (IS_ERR(evm_key))
+ return -ENOENT;
+
+ down_read(&evm_key->sem);
+ ekp = evm_key->payload.data[0];
+
+ rc = evm_set_key(ekp->decrypted_data, ekp->decrypted_datalen);
+
+ /* burn the original key contents */
+ memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+ up_read(&evm_key->sem);
+ key_put(evm_key);
+ return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 000000000..6d1efe135
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_main.c
+ * implements evm_inode_setxattr, evm_inode_post_setxattr,
+ * evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/audit.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <linux/magic.h>
+
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
+#include <crypto/algapi.h>
+#include "evm.h"
+
+int evm_initialized;
+
+static const char * const integrity_status_msg[] = {
+ "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown"
+};
+int evm_hmac_attrs;
+
+static struct xattr_list evm_config_default_xattrnames[] = {
+#ifdef CONFIG_SECURITY_SELINUX
+ {.name = XATTR_NAME_SELINUX},
+#endif
+#ifdef CONFIG_SECURITY_SMACK
+ {.name = XATTR_NAME_SMACK},
+#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
+ {.name = XATTR_NAME_SMACKEXEC},
+ {.name = XATTR_NAME_SMACKTRANSMUTE},
+ {.name = XATTR_NAME_SMACKMMAP},
+#endif
+#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+ {.name = XATTR_NAME_APPARMOR},
+#endif
+#ifdef CONFIG_IMA_APPRAISE
+ {.name = XATTR_NAME_IMA},
+#endif
+ {.name = XATTR_NAME_CAPS},
+};
+
+LIST_HEAD(evm_config_xattrnames);
+
+static int evm_fixmode __ro_after_init;
+static int __init evm_set_fixmode(char *str)
+{
+ if (strncmp(str, "fix", 3) == 0)
+ evm_fixmode = 1;
+ return 0;
+}
+__setup("evm=", evm_set_fixmode);
+
+static void __init evm_init_config(void)
+{
+ int i, xattrs;
+
+ xattrs = ARRAY_SIZE(evm_config_default_xattrnames);
+
+ pr_info("Initialising EVM extended attributes:\n");
+ for (i = 0; i < xattrs; i++) {
+ pr_info("%s\n", evm_config_default_xattrnames[i].name);
+ list_add_tail(&evm_config_default_xattrnames[i].list,
+ &evm_config_xattrnames);
+ }
+
+#ifdef CONFIG_EVM_ATTR_FSUUID
+ evm_hmac_attrs |= EVM_ATTR_FSUUID;
+#endif
+ pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
+}
+
+static bool evm_key_loaded(void)
+{
+ return (bool)(evm_initialized & EVM_KEY_MASK);
+}
+
+static int evm_find_protected_xattrs(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct xattr_list *xattr;
+ int error;
+ int count = 0;
+
+ if (!(inode->i_opflags & IOP_XATTR))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
+ if (error < 0) {
+ if (error == -ENODATA)
+ continue;
+ return error;
+ }
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ * HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ const char *xattr_name,
+ char *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ struct evm_ima_xattr_data *xattr_data = NULL;
+ struct signature_v2_hdr *hdr;
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ struct evm_digest digest;
+ struct inode *inode;
+ int rc, xattr_len;
+
+ if (iint && (iint->evm_status == INTEGRITY_PASS ||
+ iint->evm_status == INTEGRITY_PASS_IMMUTABLE))
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+
+ /* first need to know the sig type */
+ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0) {
+ evm_status = INTEGRITY_FAIL;
+ if (rc == -ENODATA) {
+ rc = evm_find_protected_xattrs(dentry);
+ if (rc > 0)
+ evm_status = INTEGRITY_NOLABEL;
+ else if (rc == 0)
+ evm_status = INTEGRITY_NOXATTRS; /* new file */
+ } else if (rc == -EOPNOTSUPP) {
+ evm_status = INTEGRITY_UNKNOWN;
+ }
+ goto out;
+ }
+
+ xattr_len = rc;
+
+ /* check value type */
+ switch (xattr_data->type) {
+ case EVM_XATTR_HMAC:
+ if (xattr_len != sizeof(struct evm_ima_xattr_data)) {
+ evm_status = INTEGRITY_FAIL;
+ goto out;
+ }
+
+ digest.hdr.algo = HASH_ALGO_SHA1;
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, &digest);
+ if (rc)
+ break;
+ rc = crypto_memneq(xattr_data->digest, digest.digest,
+ SHA1_DIGEST_SIZE);
+ if (rc)
+ rc = -EINVAL;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ case EVM_XATTR_PORTABLE_DIGSIG:
+ /* accept xattr with non-empty signature field */
+ if (xattr_len <= sizeof(struct signature_v2_hdr)) {
+ evm_status = INTEGRITY_FAIL;
+ goto out;
+ }
+
+ hdr = (struct signature_v2_hdr *)xattr_data;
+ digest.hdr.algo = hdr->hash_algo;
+ rc = evm_calc_hash(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data->type, &digest);
+ if (rc)
+ break;
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
+ (const char *)xattr_data, xattr_len,
+ digest.digest, digest.hdr.length);
+ if (!rc) {
+ inode = d_backing_inode(dentry);
+
+ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) {
+ if (iint)
+ iint->flags |= EVM_IMMUTABLE_DIGSIG;
+ evm_status = INTEGRITY_PASS_IMMUTABLE;
+ } else if (!IS_RDONLY(inode) &&
+ !(inode->i_sb->s_readonly_remount) &&
+ !IS_IMMUTABLE(inode)) {
+ evm_update_evmxattr(dentry, xattr_name,
+ xattr_value,
+ xattr_value_len);
+ }
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ evm_status = (rc == -ENODATA) ?
+ INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+out:
+ if (iint)
+ iint->evm_status = evm_status;
+ kfree(xattr_data);
+ return evm_status;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ int namelen;
+ int found = 0;
+ struct xattr_list *xattr;
+
+ namelen = strlen(req_xattr_name);
+ list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
+ if ((strlen(xattr->name) == namelen)
+ && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
+ found = 1;
+ break;
+ }
+ if (strncmp(req_xattr_name,
+ xattr->name + XATTR_SECURITY_PREFIX_LEN,
+ strlen(req_xattr_name)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value, size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
+ return INTEGRITY_UNKNOWN;
+
+ if (!iint) {
+ iint = integrity_iint_find(d_backing_inode(dentry));
+ if (!iint)
+ return INTEGRITY_UNKNOWN;
+ }
+ return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+
+ if (!evm_key_loaded() || !S_ISREG(inode->i_mode) || evm_fixmode)
+ return 0;
+ return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated unless the EVM signature is immutable.
+ */
+static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ enum integrity_status evm_status;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ } else if (!evm_protected_xattr(xattr_name)) {
+ if (!posix_xattr_acl(xattr_name))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ goto out;
+ }
+
+ evm_status = evm_verify_current_integrity(dentry);
+ if (evm_status == INTEGRITY_NOXATTRS) {
+ struct integrity_iint_cache *iint;
+
+ iint = integrity_iint_find(d_backing_inode(dentry));
+ if (iint && (iint->flags & IMA_NEW_FILE))
+ return 0;
+
+ /* exception for pseudo filesystems */
+ if (dentry->d_sb->s_magic == TMPFS_MAGIC
+ || dentry->d_sb->s_magic == SYSFS_MAGIC)
+ return 0;
+
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
+ dentry->d_inode, dentry->d_name.name,
+ "update_metadata",
+ integrity_status_msg[evm_status],
+ -EPERM, 0);
+ }
+out:
+ if (evm_status != INTEGRITY_PASS)
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status],
+ -EPERM, 0);
+ return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid. As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value. Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
+ */
+int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!xattr_value_len)
+ return -EINVAL;
+ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG &&
+ xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG)
+ return -EPERM;
+ }
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ /* Policy permits modification of the protected xattrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
+ return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+}
+
+static void evm_reset_status(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ iint = integrity_iint_find(inode);
+ if (iint)
+ iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm(). The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (!evm_key_loaded() || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
+ return;
+
+ evm_reset_status(dentry->d_inode);
+
+ evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * vfs_removexattr() which takes the i_mutex.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ if (!evm_key_loaded() || !evm_protected_xattr(xattr_name))
+ return;
+
+ evm_reset_status(dentry->d_inode);
+
+ evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ *
+ * Permit update of file attributes when files have a valid EVM signature,
+ * except in the case of them having an immutable portable signature.
+ */
+int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ enum integrity_status evm_status;
+
+ /* Policy permits modification of the protected attrs even though
+ * there's no HMAC key loaded
+ */
+ if (evm_initialized & EVM_ALLOW_METADATA_WRITES)
+ return 0;
+
+ if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, d_backing_inode(dentry),
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status], -EPERM, 0);
+ return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ if (!evm_key_loaded())
+ return;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm HMAC value
+ */
+int evm_inode_init_security(struct inode *inode,
+ const struct xattr *lsm_xattr,
+ struct xattr *evm_xattr)
+{
+ struct evm_ima_xattr_data *xattr_data;
+ int rc;
+
+ if (!(evm_initialized & EVM_INIT_HMAC) ||
+ !evm_protected_xattr(lsm_xattr->name))
+ return 0;
+
+ xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+ if (!xattr_data)
+ return -ENOMEM;
+
+ xattr_data->type = EVM_XATTR_HMAC;
+ rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+ if (rc < 0)
+ goto out;
+
+ evm_xattr->value = xattr_data;
+ evm_xattr->value_len = sizeof(*xattr_data);
+ evm_xattr->name = XATTR_EVM_SUFFIX;
+ return 0;
+out:
+ kfree(xattr_data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+#ifdef CONFIG_EVM_LOAD_X509
+void __init evm_load_x509(void)
+{
+ int rc;
+
+ rc = integrity_load_x509(INTEGRITY_KEYRING_EVM, CONFIG_EVM_X509_PATH);
+ if (!rc)
+ evm_initialized |= EVM_INIT_X509;
+}
+#endif
+
+static int __init init_evm(void)
+{
+ int error;
+ struct list_head *pos, *q;
+ struct xattr_list *xattr;
+
+ evm_init_config();
+
+ error = integrity_init_keyring(INTEGRITY_KEYRING_EVM);
+ if (error)
+ goto error;
+
+ error = evm_init_secfs();
+ if (error < 0) {
+ pr_info("Error registering secfs\n");
+ goto error;
+ }
+
+error:
+ if (error != 0) {
+ if (!list_empty(&evm_config_xattrnames)) {
+ list_for_each_safe(pos, q, &evm_config_xattrnames) {
+ xattr = list_entry(pos, struct xattr_list,
+ list);
+ list_del(pos);
+ }
+ }
+ }
+
+ return error;
+}
+
+late_initcall(init_evm);
+
+MODULE_DESCRIPTION("Extended Verification Module");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 000000000..46408b9e6
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+
+int posix_xattr_acl(const char *xattr)
+{
+ int xattr_len = strlen(xattr);
+
+ if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+ return 1;
+ if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+ return 1;
+ return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 000000000..c5c44203a
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_secfs.c
+ * - Used to signal when key is on keyring
+ * - Get the key and enable EVM
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include "evm.h"
+
+static struct dentry *evm_dir;
+static struct dentry *evm_init_tpm;
+static struct dentry *evm_symlink;
+
+#ifdef CONFIG_EVM_ADD_XATTRS
+static struct dentry *evm_xattrs;
+static DEFINE_MUTEX(xattr_list_mutex);
+static int evm_xattrs_locked;
+#endif
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", (evm_initialized & ~EVM_SETUP_COMPLETE));
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned int i;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN) || (evm_initialized & EVM_SETUP_COMPLETE))
+ return -EPERM;
+
+ ret = kstrtouint_from_user(buf, count, 0, &i);
+
+ if (ret)
+ return ret;
+
+ /* Reject invalid values */
+ if (!i || (i & ~EVM_INIT_MASK) != 0)
+ return -EINVAL;
+
+ /*
+ * Don't allow a request to enable metadata writes if
+ * an HMAC key is loaded.
+ */
+ if ((i & EVM_ALLOW_METADATA_WRITES) &&
+ (evm_initialized & EVM_INIT_HMAC) != 0)
+ return -EPERM;
+
+ if (i & EVM_INIT_HMAC) {
+ ret = evm_init_key();
+ if (ret != 0)
+ return ret;
+ /* Forbid further writes after the symmetric key is loaded */
+ i |= EVM_SETUP_COMPLETE;
+ }
+
+ evm_initialized |= i;
+
+ /* Don't allow protected metadata modification if a symmetric key
+ * is loaded
+ */
+ if (evm_initialized & EVM_INIT_HMAC)
+ evm_initialized &= ~(EVM_ALLOW_METADATA_WRITES);
+
+ return count;
+}
+
+static const struct file_operations evm_key_ops = {
+ .read = evm_read_key,
+ .write = evm_write_key,
+};
+
+#ifdef CONFIG_EVM_ADD_XATTRS
+/**
+ * evm_read_xattrs - read() for <securityfs>/evm_xattrs
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_xattrs(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *temp;
+ int offset = 0;
+ ssize_t rc, size = 0;
+ struct xattr_list *xattr;
+
+ if (*ppos != 0)
+ return 0;
+
+ rc = mutex_lock_interruptible(&xattr_list_mutex);
+ if (rc)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list)
+ size += strlen(xattr->name) + 1;
+
+ temp = kmalloc(size + 1, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&xattr_list_mutex);
+ return -ENOMEM;
+ }
+
+ list_for_each_entry(xattr, &evm_config_xattrnames, list) {
+ sprintf(temp + offset, "%s\n", xattr->name);
+ offset += strlen(xattr->name) + 1;
+ }
+
+ mutex_unlock(&xattr_list_mutex);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ kfree(temp);
+
+ return rc;
+}
+
+/**
+ * evm_write_xattrs - write() for <securityfs>/evm_xattrs
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int len, err;
+ struct xattr_list *xattr, *tmp;
+ struct audit_buffer *ab;
+ struct iattr newattrs;
+ struct inode *inode;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_xattrs_locked)
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (count > XATTR_NAME_MAX)
+ return -E2BIG;
+
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_EVM_XATTR);
+ if (!ab)
+ return -ENOMEM;
+
+ xattr = kmalloc(sizeof(struct xattr_list), GFP_KERNEL);
+ if (!xattr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ xattr->name = memdup_user_nul(buf, count);
+ if (IS_ERR(xattr->name)) {
+ err = PTR_ERR(xattr->name);
+ xattr->name = NULL;
+ goto out;
+ }
+
+ /* Remove any trailing newline */
+ len = strlen(xattr->name);
+ if (len && xattr->name[len-1] == '\n')
+ xattr->name[len-1] = '\0';
+
+ if (strcmp(xattr->name, ".") == 0) {
+ evm_xattrs_locked = 1;
+ newattrs.ia_mode = S_IFREG | 0440;
+ newattrs.ia_valid = ATTR_MODE;
+ inode = evm_xattrs->d_inode;
+ inode_lock(inode);
+ err = simple_setattr(evm_xattrs, &newattrs);
+ inode_unlock(inode);
+ audit_log_format(ab, "locked");
+ if (!err)
+ err = count;
+ goto out;
+ }
+
+ audit_log_format(ab, "xattr=");
+ audit_log_untrustedstring(ab, xattr->name);
+
+ if (strncmp(xattr->name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) != 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * xattr_list_mutex guards against races in evm_read_xattrs().
+ * Entries are only added to the evm_config_xattrnames list
+ * and never deleted. Therefore, the list is traversed
+ * using list_for_each_entry_lockless() without holding
+ * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs()
+ * and evm_protected_xattr().
+ */
+ mutex_lock(&xattr_list_mutex);
+ list_for_each_entry(tmp, &evm_config_xattrnames, list) {
+ if (strcmp(xattr->name, tmp->name) == 0) {
+ err = -EEXIST;
+ mutex_unlock(&xattr_list_mutex);
+ goto out;
+ }
+ }
+ list_add_tail_rcu(&xattr->list, &evm_config_xattrnames);
+ mutex_unlock(&xattr_list_mutex);
+
+ audit_log_format(ab, " res=0");
+ audit_log_end(ab);
+ return count;
+out:
+ audit_log_format(ab, " res=%d", err);
+ audit_log_end(ab);
+ if (xattr) {
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+ return err;
+}
+
+static const struct file_operations evm_xattr_ops = {
+ .read = evm_read_xattrs,
+ .write = evm_write_xattrs,
+};
+
+static int evm_init_xattrs(void)
+{
+ evm_xattrs = securityfs_create_file("evm_xattrs", 0660, evm_dir, NULL,
+ &evm_xattr_ops);
+ if (!evm_xattrs || IS_ERR(evm_xattrs))
+ return -EFAULT;
+
+ return 0;
+}
+#else
+static int evm_init_xattrs(void)
+{
+ return 0;
+}
+#endif
+
+int __init evm_init_secfs(void)
+{
+ int error = 0;
+
+ evm_dir = securityfs_create_dir("evm", integrity_dir);
+ if (!evm_dir || IS_ERR(evm_dir))
+ return -EFAULT;
+
+ evm_init_tpm = securityfs_create_file("evm", 0660,
+ evm_dir, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ evm_symlink = securityfs_create_symlink("evm", NULL,
+ "integrity/evm/evm", NULL);
+ if (!evm_symlink || IS_ERR(evm_symlink)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ if (evm_init_xattrs() != 0) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ return 0;
+out:
+ securityfs_remove(evm_symlink);
+ securityfs_remove(evm_init_tpm);
+ securityfs_remove(evm_dir);
+ return error;
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 000000000..5a6810041
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: integrity_iint.c
+ * - implements the integrity hooks: integrity_inode_alloc,
+ * integrity_inode_free
+ * - cache integrity information associated with an inode
+ * using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/security.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_RWLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+struct dentry *integrity_dir;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ struct rb_node *n = integrity_iint_tree.rb_node;
+
+ while (n) {
+ iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ break;
+ }
+ if (!n)
+ return NULL;
+
+ return iint;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return NULL;
+
+ read_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ read_unlock(&integrity_iint_lock);
+
+ return iint;
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->atomic_flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_read_status = INTEGRITY_UNKNOWN;
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ iint->measured_pcrs = 0;
+ kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_get - find or allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ * @return: allocated iint
+ *
+ * Caller must lock i_mutex
+ */
+struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+{
+ struct rb_node **p;
+ struct rb_node *node, *parent = NULL;
+ struct integrity_iint_cache *iint, *test_iint;
+
+ iint = integrity_iint_find(inode);
+ if (iint)
+ return iint;
+
+ iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!iint)
+ return NULL;
+
+ write_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct integrity_iint_cache,
+ rb_node);
+ if (inode < test_iint->inode)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ iint->inode = inode;
+ node = &iint->rb_node;
+ inode->i_flags |= S_IMA;
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, &integrity_iint_tree);
+
+ write_unlock(&integrity_iint_lock);
+ return iint;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return;
+
+ write_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ rb_erase(&iint->rb_node, &integrity_iint_tree);
+ write_unlock(&integrity_iint_lock);
+
+ iint_free(iint);
+}
+
+static void init_once(void *foo)
+{
+ struct integrity_iint_cache *iint = foo;
+
+ memset(iint, 0, sizeof(*iint));
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_read_status = INTEGRITY_UNKNOWN;
+ iint->ima_creds_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ mutex_init(&iint->mutex);
+}
+
+static int __init integrity_iintcache_init(void)
+{
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+ 0, SLAB_PANIC, init_once);
+ return 0;
+}
+security_initcall(integrity_iintcache_init);
+
+
+/*
+ * integrity_kernel_read - read data from the file
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+int integrity_kernel_read(struct file *file, loff_t offset,
+ void *addr, unsigned long count)
+{
+ mm_segment_t old_fs;
+ char __user *buf = (char __user *)addr;
+ ssize_t ret;
+
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ ret = __vfs_read(file, buf, count, &offset);
+ set_fs(old_fs);
+
+ return ret;
+}
+
+/*
+ * integrity_load_keys - load integrity keys hook
+ *
+ * Hooks is called from init/main.c:kernel_init_freeable()
+ * when rootfs is ready
+ */
+void __init integrity_load_keys(void)
+{
+ ima_load_x509();
+ evm_load_x509();
+}
+
+static int __init integrity_fs_init(void)
+{
+ integrity_dir = securityfs_create_dir("integrity", NULL);
+ if (IS_ERR(integrity_dir)) {
+ int ret = PTR_ERR(integrity_dir);
+
+ if (ret != -ENODEV)
+ pr_err("Unable to create integrity sysfs dir: %d\n",
+ ret);
+ integrity_dir = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+late_initcall(integrity_fs_init)
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
new file mode 100644
index 000000000..5095b2e8f
--- /dev/null
+++ b/security/integrity/ima/Kconfig
@@ -0,0 +1,288 @@
+# IBM Integrity Measurement Architecture
+#
+config IMA
+ bool "Integrity Measurement Architecture(IMA)"
+ select SECURITYFS
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ select CRYPTO_HASH_INFO
+ select TCG_TPM if HAS_IOMEM && !UML
+ select TCG_TIS if TCG_TPM && X86
+ select TCG_CRB if TCG_TPM && ACPI
+ select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES
+ select INTEGRITY_AUDIT if AUDIT
+ help
+ The Trusted Computing Group(TCG) runtime Integrity
+ Measurement Architecture(IMA) maintains a list of hash
+ values of executables and other sensitive system files,
+ as they are read or executed. If an attacker manages
+ to change the contents of an important system file
+ being measured, we can tell.
+
+ If your system has a TPM chip, then IMA also maintains
+ an aggregate integrity value over this list inside the
+ TPM hardware, so that the TPM can prove to a third party
+ whether or not critical system files have been modified.
+ Read <http://www.usenix.org/events/sec04/tech/sailer.html>
+ to learn more about IMA.
+ If unsure, say N.
+
+config IMA_KEXEC
+ bool "Enable carrying the IMA measurement list across a soft boot"
+ depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
+ default n
+ help
+ TPM PCRs are only reset on a hard reboot. In order to validate
+ a TPM's quote after a soft boot, the IMA measurement list of the
+ running kernel must be saved and restored on boot.
+
+ Depending on the IMA policy, the measurement list can grow to
+ be very large.
+
+config IMA_MEASURE_PCR_IDX
+ int
+ depends on IMA
+ range 8 14
+ default 10
+ help
+ IMA_MEASURE_PCR_IDX determines the TPM PCR register index
+ that IMA uses to maintain the integrity aggregate of the
+ measurement list. If unsure, use the default 10.
+
+config IMA_LSM_RULES
+ bool
+ depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
+ default y
+ help
+ Disabling this option will disregard LSM based policy rules.
+
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ depends on IMA
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames.
+
+ config IMA_TEMPLATE
+ bool "ima"
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ depends on IMA
+ default "ima" if IMA_TEMPLATE
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ depends on IMA
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1=y
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256=y && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512=y && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512=y && !IMA_TEMPLATE
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ depends on IMA
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+
+config IMA_WRITE_POLICY
+ bool "Enable multiple writes to the IMA policy"
+ depends on IMA
+ default n
+ help
+ IMA policy can now be updated multiple times. The new rules get
+ appended to the original policy. Have in mind that the rules are
+ scanned in FIFO order so be careful when you design and add new ones.
+
+ If unsure, say N.
+
+config IMA_READ_POLICY
+ bool "Enable reading back the current IMA policy"
+ depends on IMA
+ default y if IMA_WRITE_POLICY
+ default n if !IMA_WRITE_POLICY
+ help
+ It is often useful to be able to read back the IMA policy. It is
+ even more important after introducing CONFIG_IMA_WRITE_POLICY.
+ This option allows the root user to see the current policy rules.
+
+config IMA_APPRAISE
+ bool "Appraise integrity measurements"
+ depends on IMA
+ default n
+ help
+ This option enables local measurement integrity appraisal.
+ It requires the system to be labeled with a security extended
+ attribute containing the file hash measurement. To protect
+ the security extended attributes from offline attack, enable
+ and configure EVM.
+
+ For more information on integrity appraisal refer to:
+ <http://linux-ima.sourceforge.net>
+ If unsure, say N.
+
+config IMA_APPRAISE_BUILD_POLICY
+ bool "IMA build time configured policy rules"
+ depends on IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS
+ default n
+ help
+ This option defines an IMA appraisal policy at build time, which
+ is enforced at run time without having to specify a builtin
+ policy name on the boot command line. The build time appraisal
+ policy rules persist after loading a custom policy.
+
+ Depending on the rules configured, this policy may require kernel
+ modules, firmware, the kexec kernel image, and/or the IMA policy
+ to be signed. Unsigned files might prevent the system from
+ booting or applications from working properly.
+
+config IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ bool "Appraise firmware signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ This option defines a policy requiring all firmware to be signed,
+ including the regulatory.db. If both this option and
+ CFG80211_REQUIRE_SIGNED_REGDB are enabled, then both signature
+ verification methods are necessary.
+
+config IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ bool "Appraise kexec kernel image signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kexec'ed kernel images to
+ be signed and verified by a public key on the trusted IMA
+ keyring.
+
+ Kernel image signatures can not be verified by the original
+ kexec_load syscall. Enabling this rule will prevent its
+ usage.
+
+config IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ bool "Appraise kernel modules signatures"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require all kernel modules to be signed
+ and verified by a public key on the trusted IMA keyring.
+
+ Kernel module signatures can only be verified by IMA-appraisal,
+ via the finit_module syscall. Enabling this rule will prevent
+ the usage of the init_module syscall.
+
+config IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ bool "Appraise IMA policy signature"
+ depends on IMA_APPRAISE_BUILD_POLICY
+ default n
+ help
+ Enabling this rule will require the IMA policy to be signed and
+ and verified by a key on the trusted IMA keyring.
+
+config IMA_APPRAISE_BOOTPARAM
+ bool "ima_appraise boot parameter"
+ depends on IMA_APPRAISE
+ default y
+ help
+ This option enables the different "ima_appraise=" modes
+ (eg. fix, log) from the boot command line.
+
+config IMA_TRUSTED_KEYRING
+ bool "Require all keys on the .ima keyring be signed (deprecated)"
+ depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select INTEGRITY_TRUSTED_KEYRING
+ default y
+ help
+ This option requires that all keys added to the .ima
+ keyring be signed by a key on the system trusted keyring.
+
+ This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING
+
+config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
+ bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on SECONDARY_TRUSTED_KEYRING
+ depends on INTEGRITY_ASYMMETRIC_KEYS
+ select INTEGRITY_TRUSTED_KEYRING
+ default n
+ help
+ Keys may be added to the IMA or IMA blacklist keyrings, if the
+ key is validly signed by a CA cert in the system built-in or
+ secondary trusted keyrings.
+
+ Intermediate keys between those the kernel has compiled in and the
+ IMA keys to be added may be added to the system secondary keyring,
+ provided they are validly signed by a key already resident in the
+ built-in or secondary trusted keyrings.
+
+config IMA_BLACKLIST_KEYRING
+ bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
+ depends on SYSTEM_TRUSTED_KEYRING
+ depends on IMA_TRUSTED_KEYRING
+ default n
+ help
+ This option creates an IMA blacklist keyring, which contains all
+ revoked IMA keys. It is consulted before any other keyring. If
+ the search is successful the requested operation is rejected and
+ an error is returned to the caller.
+
+config IMA_LOAD_X509
+ bool "Load X509 certificate onto the '.ima' trusted keyring"
+ depends on IMA_TRUSTED_KEYRING
+ default n
+ help
+ File signature verification is based on the public keys
+ loaded on the .ima trusted keyring. These public keys are
+ X509 certificates signed by a trusted key on the
+ .system keyring. This option enables X509 certificate
+ loading from the kernel onto the '.ima' trusted keyring.
+
+config IMA_X509_PATH
+ string "IMA X509 certificate path"
+ depends on IMA_LOAD_X509
+ default "/etc/keys/x509_ima.der"
+ help
+ This option defines IMA X509 certificate path.
+
+config IMA_APPRAISE_SIGNED_INIT
+ bool "Require signed user-space initialization"
+ depends on IMA_LOAD_X509
+ default n
+ help
+ This option requires user-space init to be signed.
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
new file mode 100644
index 000000000..d921dc4f9
--- /dev/null
+++ b/security/integrity/ima/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for building Trusted Computing Group's(TCG) runtime Integrity
+# Measurement Architecture(IMA).
+#
+
+obj-$(CONFIG_IMA) += ima.o
+
+ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
+ ima_policy.o ima_template.o ima_template_lib.o
+ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
+ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
+obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
new file mode 100644
index 000000000..d12b07eb3
--- /dev/null
+++ b/security/integrity/ima/ima.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima.h
+ * internal Integrity Measurement Architecture (IMA) definitions
+ */
+
+#ifndef __LINUX_IMA_H
+#define __LINUX_IMA_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/hash.h>
+#include <linux/tpm.h>
+#include <linux/audit.h>
+#include <crypto/hash_info.h>
+
+#include "../integrity.h"
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+#include <asm/ima.h>
+#endif
+
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+
+/* digest size for IMA, fits SHA1 or MD5 */
+#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
+#define IMA_EVENT_NAME_LEN_MAX 255
+
+#define IMA_HASH_BITS 10
+#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
+/* current content of the policy */
+extern int ima_policy_flag;
+
+/* set during initialization */
+extern int ima_hash_algo;
+extern int ima_appraise;
+extern struct tpm_chip *ima_tpm_chip;
+extern const char boot_aggregate_name[];
+
+/* IMA event related data */
+struct ima_event_data {
+ struct integrity_iint_cache *iint;
+ struct file *file;
+ const unsigned char *filename;
+ struct evm_ima_xattr_data *xattr_value;
+ int xattr_len;
+ const char *violation;
+};
+
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
+
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init)(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+ void (*field_show)(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ struct list_head list;
+ char *name;
+ char *fmt;
+ int num_fields;
+ struct ima_template_field **fields;
+};
+
+struct ima_template_entry {
+ int pcr;
+ u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[0]; /* template related data */
+};
+
+struct ima_queue_entry {
+ struct hlist_node hnext; /* place in hash collision list */
+ struct list_head later; /* place in ima_measurements list */
+ struct ima_template_entry *entry;
+};
+extern struct list_head ima_measurements; /* list of all measurements */
+
+/* Some details preceding the binary serialized measurement list */
+struct ima_kexec_hdr {
+ u16 version;
+ u16 _reserved0;
+ u32 _reserved1;
+ u64 buffer_size;
+ u64 count;
+};
+
+#ifdef CONFIG_HAVE_IMA_KEXEC
+void ima_load_kexec_buffer(void);
+#else
+static inline void ima_load_kexec_buffer(void) {}
+#endif /* CONFIG_HAVE_IMA_KEXEC */
+
+/*
+ * The default binary_runtime_measurements list format is defined as the
+ * platform native format. The canonical format is defined as little-endian.
+ */
+extern bool ima_canonical_fmt;
+
+/* Internal IMA function definitions */
+int ima_init(void);
+int ima_fs_init(void);
+int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_buffer_hash(const void *buf, loff_t len,
+ struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash);
+int ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct integrity_iint_cache *iint,
+ const char *op, const char *cause);
+int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+struct ima_template_desc *ima_template_desc_current(void);
+int ima_restore_measurement_entry(struct ima_template_entry *entry);
+int ima_restore_measurement_list(loff_t bufsize, void *buf);
+int ima_measurements_show(struct seq_file *m, void *v);
+unsigned long ima_get_binary_runtime_size(void);
+int ima_init_template(void);
+void ima_init_template_list(void);
+
+/*
+ * used to protect h_table and sha_table
+ */
+extern spinlock_t ima_queue_lock;
+
+struct ima_h_table {
+ atomic_long_t len; /* number of stored measurements in the list */
+ atomic_long_t violations;
+ struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
+};
+extern struct ima_h_table ima_htable;
+
+static inline unsigned int ima_hash_key(u8 *digest)
+{
+ /* there is no point in taking a hash of part of a digest */
+ return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE;
+}
+
+#define __ima_hooks(hook) \
+ hook(NONE) \
+ hook(FILE_CHECK) \
+ hook(MMAP_CHECK) \
+ hook(BPRM_CHECK) \
+ hook(CREDS_CHECK) \
+ hook(POST_SETATTR) \
+ hook(MODULE_CHECK) \
+ hook(FIRMWARE_CHECK) \
+ hook(KEXEC_KERNEL_CHECK) \
+ hook(KEXEC_INITRAMFS_CHECK) \
+ hook(POLICY_CHECK) \
+ hook(MAX_CHECK)
+#define __ima_hook_enumify(ENUM) ENUM,
+
+enum ima_hooks {
+ __ima_hooks(__ima_hook_enumify)
+};
+
+/* LIM API function definitions */
+int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
+ int mask, enum ima_hooks func, int *pcr);
+int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func);
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file, void *buf, loff_t size,
+ enum hash_algo algo);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, int pcr);
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+ const unsigned char *filename);
+int ima_alloc_init_template(struct ima_event_data *event_data,
+ struct ima_template_entry **entry);
+int ima_store_template(struct ima_template_entry *entry, int violation,
+ struct inode *inode,
+ const unsigned char *filename, int pcr);
+void ima_free_template_entry(struct ima_template_entry *entry);
+const char *ima_d_path(const struct path *path, char **pathbuf, char *filename);
+
+/* IMA policy related functions */
+int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
+ enum ima_hooks func, int mask, int flags, int *pcr);
+void ima_init_policy(void);
+void ima_update_policy(void);
+void ima_update_policy_flag(void);
+ssize_t ima_parse_add_rule(char *);
+void ima_delete_rules(void);
+int ima_check_policy(void);
+void *ima_policy_start(struct seq_file *m, loff_t *pos);
+void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos);
+void ima_policy_stop(struct seq_file *m, void *v);
+int ima_policy_show(struct seq_file *m, void *v);
+
+/* Appraise integrity measurements */
+#define IMA_APPRAISE_ENFORCE 0x01
+#define IMA_APPRAISE_FIX 0x02
+#define IMA_APPRAISE_LOG 0x04
+#define IMA_APPRAISE_MODULES 0x08
+#define IMA_APPRAISE_FIRMWARE 0x10
+#define IMA_APPRAISE_POLICY 0x20
+#define IMA_APPRAISE_KEXEC 0x40
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise_measurement(enum ima_hooks func,
+ struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ enum ima_hooks func);
+enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value);
+
+#else
+static inline int ima_appraise_measurement(enum ima_hooks func,
+ struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline int ima_must_appraise(struct inode *inode, int mask,
+ enum ima_hooks func)
+{
+ return 0;
+}
+
+static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+ struct file *file)
+{
+}
+
+static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache
+ *iint,
+ enum ima_hooks func)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline enum hash_algo
+ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len)
+{
+ return ima_hash_algo;
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ return 0;
+}
+
+#endif /* CONFIG_IMA_APPRAISE */
+
+/* LSM based policy rules require audit */
+#ifdef CONFIG_IMA_LSM_RULES
+
+#define security_filter_rule_init security_audit_rule_init
+#define security_filter_rule_match security_audit_rule_match
+
+#else
+
+static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr,
+ void **lsmrule)
+{
+ return -EINVAL;
+}
+
+static inline int security_filter_rule_match(u32 secid, u32 field, u32 op,
+ void *lsmrule,
+ struct audit_context *actx)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_IMA_LSM_RULES */
+
+#ifdef CONFIG_IMA_READ_POLICY
+#define POLICY_FILE_FLAGS (S_IWUSR | S_IRUSR)
+#else
+#define POLICY_FILE_FLAGS S_IWUSR
+#endif /* CONFIG_IMA_READ_POLICY */
+
+#endif /* __LINUX_IMA_H */
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
new file mode 100644
index 000000000..a02c5acfd
--- /dev/null
+++ b/security/integrity/ima/ima_api.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_api.c
+ * Implements must_appraise_or_measure, collect_measurement,
+ * appraise_measurement, store_measurement and store_template.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <linux/iversion.h>
+
+#include "ima.h"
+
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < entry->template_desc->num_fields; i++)
+ kfree(entry->template_data[i].data);
+
+ kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct ima_event_data *event_data,
+ struct ima_template_entry **entry)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i, result = 0;
+
+ *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
+ sizeof(struct ima_field_data), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_template_field *field = template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(event_data,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ return 0;
+out:
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ return result;
+}
+
+/*
+ * ima_store_template - store ima template measurements
+ *
+ * Calculate the hash of a template entry, add the template entry
+ * to an ordered list of measurement entries maintained inside the kernel,
+ * and also update the aggregate integrity value (maintained inside the
+ * configured TPM PCR) over the hashes of the current list of measurement
+ * entries.
+ *
+ * Applications retrieve the current kernel-held measurement list through
+ * the securityfs entries in /sys/kernel/security/ima. The signed aggregate
+ * TPM PCR (called quote) can be retrieved using a TPM user space library
+ * and is used to validate the measurement list.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int ima_store_template(struct ima_template_entry *entry,
+ int violation, struct inode *inode,
+ const unsigned char *filename, int pcr)
+{
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "hashing_error";
+ char *template_name = entry->template_desc->name;
+ int result;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
+
+ if (!violation) {
+ int num_fields = entry->template_desc->num_fields;
+
+ /* this function uses default algo */
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ entry->template_desc,
+ num_fields, &hash.hdr);
+ if (result < 0) {
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
+ template_name, op,
+ audit_cause, result, 0);
+ return result;
+ }
+ memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
+ }
+ entry->pcr = pcr;
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
+ return result;
+}
+
+/*
+ * ima_add_violation - add violation to measurement list.
+ *
+ * Violations are flagged in the measurement list with zero hash values.
+ * By extending the PCR with 0xFF's instead of with zeroes, the PCR
+ * value is invalidated.
+ */
+void ima_add_violation(struct file *file, const unsigned char *filename,
+ struct integrity_iint_cache *iint,
+ const char *op, const char *cause)
+{
+ struct ima_template_entry *entry;
+ struct inode *inode = file_inode(file);
+ struct ima_event_data event_data = {iint, file, filename, NULL, 0,
+ cause};
+ int violation = 1;
+ int result;
+
+ /* can overflow, only indicator */
+ atomic_long_inc(&ima_htable.violations);
+
+ result = ima_alloc_init_template(&event_data, &entry);
+ if (result < 0) {
+ result = -ENOMEM;
+ goto err_out;
+ }
+ result = ima_store_template(entry, violation, inode,
+ filename, CONFIG_IMA_MEASURE_PCR_IDX);
+ if (result < 0)
+ ima_free_template_entry(entry);
+err_out:
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+ op, cause, result, 0);
+}
+
+/**
+ * ima_get_action - appraise & measure decision based on policy.
+ * @inode: pointer to inode to measure
+ * @cred: pointer to credentials structure to validate
+ * @secid: secid of the task being validated
+ * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC,
+ * MAY_APPEND)
+ * @func: caller identifier
+ * @pcr: pointer filled in if matched measure policy sets pcr=
+ *
+ * The policy is defined in terms of keypairs:
+ * subj=, obj=, type=, func=, mask=, fsmagic=
+ * subj,obj, and type: are LSM specific.
+ * func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK
+ * mask: contains the permission mask
+ * fsmagic: hex value
+ *
+ * Returns IMA_MEASURE, IMA_APPRAISE mask.
+ *
+ */
+int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid,
+ int mask, enum ima_hooks func, int *pcr)
+{
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH;
+
+ flags &= ima_policy_flag;
+
+ return ima_match_policy(inode, cred, secid, func, mask, flags, pcr);
+}
+
+/*
+ * ima_collect_measurement - collect file measurement
+ *
+ * Calculate the file hash, if it doesn't already exist,
+ * storing the measurement and i_version in the iint.
+ *
+ * Must be called with iint->mutex held.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file, void *buf, loff_t size,
+ enum hash_algo algo)
+{
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
+ const char *filename = file->f_path.dentry->d_name.name;
+ int result = 0;
+ int length;
+ void *tmpbuf;
+ u64 i_version;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+
+ if (iint->flags & IMA_COLLECTED)
+ goto out;
+
+ /*
+ * Dectecting file change is based on i_version. On filesystems
+ * which do not support i_version, support is limited to an initial
+ * measurement/appraisal/audit.
+ */
+ i_version = inode_query_iversion(inode);
+ hash.hdr.algo = algo;
+
+ /* Initialize hash digest to 0's in case of failure */
+ memset(&hash.digest, 0, sizeof(hash.digest));
+
+ if (buf)
+ result = ima_calc_buffer_hash(buf, size, &hash.hdr);
+ else
+ result = ima_calc_file_hash(file, &hash.hdr);
+
+ if (result && result != -EBADF && result != -EINVAL)
+ goto out;
+
+ length = sizeof(hash.hdr) + hash.hdr.length;
+ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS);
+ if (!tmpbuf) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+
+ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */
+ if (!result)
+ iint->flags |= IMA_COLLECTED;
+out:
+ if (result) {
+ if (file->f_flags & O_DIRECT)
+ audit_cause = "failed(directio)";
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data", audit_cause,
+ result, 0);
+ }
+ return result;
+}
+
+/*
+ * ima_store_measurement - store file measurement
+ *
+ * Create an "ima" template and then store the template by calling
+ * ima_store_template.
+ *
+ * We only get here if the inode has not already been measured,
+ * but the measurement could already exist:
+ * - multiple copies of the same file on either the same or
+ * different filesystems.
+ * - the inode was previously flushed as well as the iint info,
+ * containing the hashing info.
+ *
+ * Must be called with iint->mutex held.
+ */
+void ima_store_measurement(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, int pcr)
+{
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "ENOMEM";
+ int result = -ENOMEM;
+ struct inode *inode = file_inode(file);
+ struct ima_template_entry *entry;
+ struct ima_event_data event_data = {iint, file, filename, xattr_value,
+ xattr_len, NULL};
+ int violation = 0;
+
+ if (iint->measured_pcrs & (0x1 << pcr))
+ return;
+
+ result = ima_alloc_init_template(&event_data, &entry);
+ if (result < 0) {
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+ op, audit_cause, result, 0);
+ return;
+ }
+
+ result = ima_store_template(entry, violation, inode, filename, pcr);
+ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) {
+ iint->flags |= IMA_MEASURED;
+ iint->measured_pcrs |= (0x1 << pcr);
+ }
+ if (result < 0)
+ ima_free_template_entry(entry);
+}
+
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+ const unsigned char *filename)
+{
+ struct audit_buffer *ab;
+ char *hash;
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+ int i;
+
+ if (iint->flags & IMA_AUDITED)
+ return;
+
+ hash = kzalloc((iint->ima_hash->length * 2) + 1, GFP_KERNEL);
+ if (!hash)
+ return;
+
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
+ hash[i * 2] = '\0';
+
+ ab = audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_RULE);
+ if (!ab)
+ goto out;
+
+ audit_log_format(ab, "file=");
+ audit_log_untrustedstring(ab, filename);
+ audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash);
+
+ audit_log_task_info(ab, current);
+ audit_log_end(ab);
+
+ iint->flags |= IMA_AUDITED;
+out:
+ kfree(hash);
+ return;
+}
+
+/*
+ * ima_d_path - return a pointer to the full pathname
+ *
+ * Attempt to return a pointer to the full pathname for use in the
+ * IMA measurement list, IMA audit records, and auditing logs.
+ *
+ * On failure, return a pointer to a copy of the filename, not dname.
+ * Returning a pointer to dname, could result in using the pointer
+ * after the memory has been freed.
+ */
+const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
+{
+ char *pathname = NULL;
+
+ *pathbuf = __getname();
+ if (*pathbuf) {
+ pathname = d_absolute_path(path, *pathbuf, PATH_MAX);
+ if (IS_ERR(pathname)) {
+ __putname(*pathbuf);
+ *pathbuf = NULL;
+ pathname = NULL;
+ }
+ }
+
+ if (!pathname) {
+ strlcpy(namebuf, path->dentry->d_name.name, NAME_MAX);
+ pathname = namebuf;
+ }
+
+ return pathname;
+}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
new file mode 100644
index 000000000..deec1804a
--- /dev/null
+++ b/security/integrity/ima/ima_appraise.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/magic.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+
+#include "ima.h"
+
+static int __init default_appraise_setup(char *str)
+{
+#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM
+ if (strncmp(str, "off", 3) == 0)
+ ima_appraise = 0;
+ else if (strncmp(str, "log", 3) == 0)
+ ima_appraise = IMA_APPRAISE_LOG;
+ else if (strncmp(str, "fix", 3) == 0)
+ ima_appraise = IMA_APPRAISE_FIX;
+#endif
+ return 1;
+}
+
+__setup("ima_appraise=", default_appraise_setup);
+
+/*
+ * is_ima_appraise_enabled - return appraise status
+ *
+ * Only return enabled, if not in ima_appraise="fix" or "log" modes.
+ */
+bool is_ima_appraise_enabled(void)
+{
+ return ima_appraise & IMA_APPRAISE_ENFORCE;
+}
+
+/*
+ * ima_must_appraise - set appraise flag
+ *
+ * Return 1 to appraise or hash
+ */
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
+{
+ u32 secid;
+
+ if (!ima_appraise)
+ return 0;
+
+ security_task_getsecid(current, &secid);
+ return ima_match_policy(inode, current_cred(), secid, func, mask,
+ IMA_APPRAISE | IMA_HASH, NULL);
+}
+
+static int ima_fix_xattr(struct dentry *dentry,
+ struct integrity_iint_cache *iint)
+{
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
+}
+
+/* Return specific func appraised cached result */
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ enum ima_hooks func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ return iint->ima_mmap_status;
+ case BPRM_CHECK:
+ return iint->ima_bprm_status;
+ case CREDS_CHECK:
+ return iint->ima_creds_status;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ return iint->ima_file_status;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ return iint->ima_read_status;
+ }
+}
+
+static void ima_set_cache_status(struct integrity_iint_cache *iint,
+ enum ima_hooks func,
+ enum integrity_status status)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->ima_mmap_status = status;
+ break;
+ case BPRM_CHECK:
+ iint->ima_bprm_status = status;
+ break;
+ case CREDS_CHECK:
+ iint->ima_creds_status = status;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ iint->ima_file_status = status;
+ break;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ iint->ima_read_status = status;
+ break;
+ }
+}
+
+static void ima_cache_flags(struct integrity_iint_cache *iint,
+ enum ima_hooks func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
+ break;
+ case BPRM_CHECK:
+ iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED);
+ break;
+ case CREDS_CHECK:
+ iint->flags |= (IMA_CREDS_APPRAISED | IMA_APPRAISED);
+ break;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED);
+ break;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ iint->flags |= (IMA_READ_APPRAISED | IMA_APPRAISED);
+ break;
+ }
+}
+
+enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ struct signature_v2_hdr *sig;
+ enum hash_algo ret;
+
+ if (!xattr_value || xattr_len < 2)
+ /* return default hash algo */
+ return ima_hash_algo;
+
+ switch (xattr_value->type) {
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig))
+ return ima_hash_algo;
+ return sig->hash_algo;
+ break;
+ case IMA_XATTR_DIGEST_NG:
+ ret = xattr_value->digest[0];
+ if (ret < HASH_ALGO__LAST)
+ return ret;
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ return HASH_ALGO_MD5;
+ else
+ return HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ return HASH_ALGO_MD5;
+ break;
+ }
+
+ /* return default hash algo */
+ return ima_hash_algo;
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ ssize_t ret;
+
+ ret = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+ 0, GFP_NOFS);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ return ret;
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(enum ima_hooks func,
+ struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ static const char op[] = "appraise_data";
+ const char *cause = "unknown";
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = d_backing_inode(dentry);
+ enum integrity_status status = INTEGRITY_UNKNOWN;
+ int rc = xattr_len, hash_start = 0;
+
+ if (!(inode->i_opflags & IOP_XATTR))
+ return INTEGRITY_UNKNOWN;
+
+ if (rc <= 0) {
+ if (rc && rc != -ENODATA)
+ goto out;
+
+ cause = iint->flags & IMA_DIGSIG_REQUIRED ?
+ "IMA-signature-required" : "missing-hash";
+ status = INTEGRITY_NOLABEL;
+ if (file->f_mode & FMODE_CREATED)
+ iint->flags |= IMA_NEW_FILE;
+ if ((iint->flags & IMA_NEW_FILE) &&
+ (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
+ (inode->i_size == 0)))
+ status = INTEGRITY_PASS;
+ goto out;
+ }
+
+ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+ switch (status) {
+ case INTEGRITY_PASS:
+ case INTEGRITY_PASS_IMMUTABLE:
+ case INTEGRITY_UNKNOWN:
+ break;
+ case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */
+ case INTEGRITY_NOLABEL: /* No security.evm xattr. */
+ cause = "missing-HMAC";
+ goto out;
+ case INTEGRITY_FAIL: /* Invalid HMAC/signature. */
+ cause = "invalid-HMAC";
+ goto out;
+ default:
+ WARN_ONCE(true, "Unexpected integrity status %d\n", status);
+ }
+
+ switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ /* fall through */
+ case IMA_XATTR_DIGEST:
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ cause = "IMA-signature-required";
+ status = INTEGRITY_FAIL;
+ break;
+ }
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /* xattr length may be longer. md5 hash in previous
+ version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->digest[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
+ if (rc) {
+ cause = "invalid-hash";
+ status = INTEGRITY_FAIL;
+ break;
+ }
+ status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value, rc,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc == -EOPNOTSUPP) {
+ status = INTEGRITY_UNKNOWN;
+ } else if (rc) {
+ cause = "invalid-signature";
+ status = INTEGRITY_FAIL;
+ } else {
+ status = INTEGRITY_PASS;
+ }
+ break;
+ default:
+ status = INTEGRITY_UNKNOWN;
+ cause = "unknown-ima-data";
+ break;
+ }
+
+out:
+ /*
+ * File signatures on some filesystems can not be properly verified.
+ * When such filesystems are mounted by an untrusted mounter or on a
+ * system not willing to accept such a risk, fail the file signature
+ * verification.
+ */
+ if ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) &&
+ ((inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) ||
+ (iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) {
+ status = INTEGRITY_FAIL;
+ cause = "unverifiable-signature";
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ op, cause, rc, 0);
+ } else if (status != INTEGRITY_PASS) {
+ /* Fix mode, but don't replace file signatures. */
+ if ((ima_appraise & IMA_APPRAISE_FIX) &&
+ (!xattr_value ||
+ xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
+ if (!ima_fix_xattr(dentry, iint))
+ status = INTEGRITY_PASS;
+ }
+
+ /* Permit new files with file signatures, but without data. */
+ if (inode->i_size == 0 && iint->flags & IMA_NEW_FILE &&
+ xattr_value && xattr_value->type == EVM_IMA_XATTR_DIGSIG) {
+ status = INTEGRITY_PASS;
+ }
+
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ op, cause, rc, 0);
+ } else {
+ ima_cache_flags(iint, func);
+ }
+
+ ima_set_cache_status(iint, func, status);
+ return status;
+}
+
+/*
+ * ima_update_xattr - update 'security.ima' hash value
+ */
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+{
+ struct dentry *dentry = file_dentry(file);
+ int rc = 0;
+
+ /* do not collect and update hash for digital signatures */
+ if (test_bit(IMA_DIGSIG, &iint->atomic_flags))
+ return;
+
+ if ((iint->ima_file_status != INTEGRITY_PASS) &&
+ !(iint->flags & IMA_HASH))
+ return;
+
+ rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo);
+ if (rc < 0)
+ return;
+
+ inode_lock(file_inode(file));
+ ima_fix_xattr(dentry, iint);
+ inode_unlock(file_inode(file));
+}
+
+/**
+ * ima_inode_post_setattr - reflect file metadata changes
+ * @dentry: pointer to the affected dentry
+ *
+ * Changes to a dentry's metadata might result in needing to appraise.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void ima_inode_post_setattr(struct dentry *dentry)
+{
+ struct inode *inode = d_backing_inode(dentry);
+ struct integrity_iint_cache *iint;
+ int action;
+
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)
+ || !(inode->i_opflags & IOP_XATTR))
+ return;
+
+ action = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ if (!action)
+ __vfs_removexattr(dentry, XATTR_NAME_IMA);
+ iint = integrity_iint_find(inode);
+ if (iint) {
+ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags);
+ if (!action)
+ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+}
+
+/*
+ * ima_protect_xattr - protect 'security.ima'
+ *
+ * Ensure that not just anyone can modify or remove 'security.ima'.
+ */
+static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 1;
+ }
+ return 0;
+}
+
+static void ima_reset_appraise_flags(struct inode *inode, int digsig)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode))
+ return;
+
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+ iint->measured_pcrs = 0;
+ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags);
+ if (digsig)
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ else
+ clear_bit(IMA_DIGSIG, &iint->atomic_flags);
+}
+
+int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xvalue = xattr_value;
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ if (result == 1) {
+ if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
+ return -EINVAL;
+ ima_reset_appraise_flags(d_backing_inode(dentry),
+ xvalue->type == EVM_IMA_XATTR_DIGSIG);
+ result = 0;
+ }
+ return result;
+}
+
+int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
+ if (result == 1) {
+ ima_reset_appraise_flags(d_backing_inode(dentry), 0);
+ result = 0;
+ }
+ return result;
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
new file mode 100644
index 000000000..f0b244989
--- /dev/null
+++ b/security/integrity/ima/ima_crypto.c
@@ -0,0 +1,698 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: ima_crypto.c
+ * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/ratelimit.h>
+#include <linux/file.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <crypto/hash.h>
+
+#include "ima.h"
+
+/* minimum file size for ahash use */
+static unsigned long ima_ahash_minsize;
+module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
+MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
+
+/* default is 0 - 1 page. */
+static int ima_maxorder;
+static unsigned int ima_bufsize = PAGE_SIZE;
+
+static int param_set_bufsize(const char *val, const struct kernel_param *kp)
+{
+ unsigned long long size;
+ int order;
+
+ size = memparse(val, NULL);
+ order = get_order(size);
+ if (order >= MAX_ORDER)
+ return -EINVAL;
+ ima_maxorder = order;
+ ima_bufsize = PAGE_SIZE << order;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_bufsize = {
+ .set = param_set_bufsize,
+ .get = param_get_uint,
+};
+#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
+
+module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
+MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
+
+static struct crypto_shash *ima_shash_tfm;
+static struct crypto_ahash *ima_ahash_tfm;
+
+int __init ima_init_crypto(void)
+{
+ long rc;
+
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
+ if (IS_ERR(ima_shash_tfm)) {
+ rc = PTR_ERR(ima_shash_tfm);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
+ return rc;
+ }
+ pr_info("Allocated hash algorithm: %s\n",
+ hash_algo_name[ima_hash_algo]);
+ return 0;
+}
+
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc;
+
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo != ima_hash_algo) {
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ if (tfm != ima_shash_tfm)
+ crypto_free_shash(tfm);
+}
+
+/**
+ * ima_alloc_pages() - Allocate contiguous pages.
+ * @max_size: Maximum amount of memory to allocate.
+ * @allocated_size: Returned size of actual allocation.
+ * @last_warn: Should the min_size allocation warn or not.
+ *
+ * Tries to do opportunistic allocation for memory first trying to allocate
+ * max_size amount of memory and then splitting that until zero order is
+ * reached. Allocation is tried without generating allocation warnings unless
+ * last_warn is set. Last_warn set affects only last allocation of zero order.
+ *
+ * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
+ *
+ * Return pointer to allocated memory, or NULL on failure.
+ */
+static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
+ int last_warn)
+{
+ void *ptr;
+ int order = ima_maxorder;
+ gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
+
+ if (order)
+ order = min(get_order(max_size), order);
+
+ for (; order; order--) {
+ ptr = (void *)__get_free_pages(gfp_mask, order);
+ if (ptr) {
+ *allocated_size = PAGE_SIZE << order;
+ return ptr;
+ }
+ }
+
+ /* order is zero - one page */
+
+ gfp_mask = GFP_KERNEL;
+
+ if (!last_warn)
+ gfp_mask |= __GFP_NOWARN;
+
+ ptr = (void *)__get_free_pages(gfp_mask, 0);
+ if (ptr) {
+ *allocated_size = PAGE_SIZE;
+ return ptr;
+ }
+
+ *allocated_size = 0;
+ return NULL;
+}
+
+/**
+ * ima_free_pages() - Free pages allocated by ima_alloc_pages().
+ * @ptr: Pointer to allocated pages.
+ * @size: Size of allocated buffer.
+ */
+static void ima_free_pages(void *ptr, size_t size)
+{
+ if (!ptr)
+ return;
+ free_pages((unsigned long)ptr, get_order(size));
+}
+
+static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
+{
+ struct crypto_ahash *tfm = ima_ahash_tfm;
+ int rc;
+
+ if (algo < 0 || algo >= HASH_ALGO__LAST)
+ algo = ima_hash_algo;
+
+ if (algo != ima_hash_algo || !tfm) {
+ tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
+ if (!IS_ERR(tfm)) {
+ if (algo == ima_hash_algo)
+ ima_ahash_tfm = tfm;
+ } else {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_atfm(struct crypto_ahash *tfm)
+{
+ if (tfm != ima_ahash_tfm)
+ crypto_free_ahash(tfm);
+}
+
+static inline int ahash_wait(int err, struct crypto_wait *wait)
+{
+
+ err = crypto_wait_req(err, wait);
+
+ if (err)
+ pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
+
+ return err;
+}
+
+static int ima_calc_file_hash_atfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_ahash *tfm)
+{
+ loff_t i_size, offset;
+ char *rbuf[2] = { NULL, };
+ int rc, rbuf_len, active = 0, ahash_rc = 0;
+ struct ahash_request *req;
+ struct scatterlist sg[1];
+ struct crypto_wait wait;
+ size_t rbuf_size[2];
+
+ hash->length = crypto_ahash_digestsize(tfm);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
+ if (rc)
+ goto out1;
+
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
+ goto out2;
+
+ /*
+ * Try to allocate maximum size of memory.
+ * Fail if even a single page cannot be allocated.
+ */
+ rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
+ if (!rbuf[0]) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+
+ /* Only allocate one buffer if that is enough. */
+ if (i_size > rbuf_size[0]) {
+ /*
+ * Try to allocate secondary buffer. If that fails fallback to
+ * using single buffering. Use previous memory allocation size
+ * as baseline for possible allocation size.
+ */
+ rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
+ &rbuf_size[1], 0);
+ }
+
+ for (offset = 0; offset < i_size; offset += rbuf_len) {
+ if (!rbuf[1] && offset) {
+ /* Not using two buffers, and it is not the first
+ * read/request, wait for the completion of the
+ * previous ahash_update() request.
+ */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (rc)
+ goto out3;
+ }
+ /* read buffer */
+ rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
+ rc = integrity_kernel_read(file, offset, rbuf[active],
+ rbuf_len);
+ if (rc != rbuf_len) {
+ if (rc >= 0)
+ rc = -EINVAL;
+ /*
+ * Forward current rc, do not overwrite with return value
+ * from ahash_wait()
+ */
+ ahash_wait(ahash_rc, &wait);
+ goto out3;
+ }
+
+ if (rbuf[1] && offset) {
+ /* Using two buffers, and it is not the first
+ * read/request, wait for the completion of the
+ * previous ahash_update() request.
+ */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (rc)
+ goto out3;
+ }
+
+ sg_init_one(&sg[0], rbuf[active], rbuf_len);
+ ahash_request_set_crypt(req, sg, NULL, rbuf_len);
+
+ ahash_rc = crypto_ahash_update(req);
+
+ if (rbuf[1])
+ active = !active; /* swap buffers, if we use two */
+ }
+ /* wait for the last update request to complete */
+ rc = ahash_wait(ahash_rc, &wait);
+out3:
+ ima_free_pages(rbuf[0], rbuf_size[0]);
+ ima_free_pages(rbuf[1], rbuf_size[1]);
+out2:
+ if (!rc) {
+ ahash_request_set_crypt(req, NULL, hash->digest, 0);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
+ }
+out1:
+ ahash_request_free(req);
+ return rc;
+}
+
+static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_ahash *tfm;
+ int rc;
+
+ tfm = ima_alloc_atfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_atfm(file, hash, tfm);
+
+ ima_free_atfm(tfm);
+
+ return rc;
+}
+
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ loff_t i_size, offset = 0;
+ char *rbuf;
+ int rc;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+ shash->flags = 0;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
+ goto out;
+
+ rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
+ while (offset < i_size) {
+ int rbuf_len;
+
+ rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
+ if (rbuf_len < 0) {
+ rc = rbuf_len;
+ break;
+ }
+ if (rbuf_len == 0)
+ break;
+ offset += rbuf_len;
+
+ rc = crypto_shash_update(shash, rbuf, rbuf_len);
+ if (rc)
+ break;
+ }
+ kfree(rbuf);
+out:
+ if (!rc)
+ rc = crypto_shash_final(shash, hash->digest);
+ return rc;
+}
+
+static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
+
+/*
+ * ima_calc_file_hash - calculate file hash
+ *
+ * Asynchronous hash (ahash) allows using HW acceleration for calculating
+ * a hash. ahash performance varies for different data sizes on different
+ * crypto accelerators. shash performance might be better for smaller files.
+ * The 'ima.ahash_minsize' module parameter allows specifying the best
+ * minimum file size for using ahash on the system.
+ *
+ * If the ima.ahash_minsize parameter is not specified, this function uses
+ * shash for the hash calculation. If ahash fails, it falls back to using
+ * shash.
+ */
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ loff_t i_size;
+ int rc;
+ struct file *f = file;
+ bool new_file_instance = false;
+
+ /*
+ * For consistency, fail file's opened with the O_DIRECT flag on
+ * filesystems mounted with/without DAX option.
+ */
+ if (file->f_flags & O_DIRECT) {
+ hash->length = hash_digest_size[ima_hash_algo];
+ hash->algo = ima_hash_algo;
+ return -EINVAL;
+ }
+
+ /* Open a new file instance in O_RDONLY if we cannot read */
+ if (!(file->f_mode & FMODE_READ)) {
+ int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
+ O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
+ flags |= O_RDONLY;
+ f = dentry_open(&file->f_path, flags, file->f_cred);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+
+ new_file_instance = true;
+ }
+
+ i_size = i_size_read(file_inode(f));
+
+ if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
+ rc = ima_calc_file_ahash(f, hash);
+ if (!rc)
+ goto out;
+ }
+
+ rc = ima_calc_file_shash(f, hash);
+out:
+ if (new_file_instance)
+ fput(f);
+ return rc;
+}
+
+/*
+ * Calculate the hash of template data
+ */
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ struct ima_template_desc *td,
+ int num_fields,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+ int rc, i;
+
+ shash->tfm = tfm;
+ shash->flags = 0;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ for (i = 0; i < num_fields; i++) {
+ u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
+ u8 *data_to_hash = field_data[i].data;
+ u32 datalen = field_data[i].len;
+ u32 datalen_to_hash =
+ !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
+
+ if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ rc = crypto_shash_update(shash,
+ (const u8 *) &datalen_to_hash,
+ sizeof(datalen_to_hash));
+ if (rc)
+ break;
+ } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
+ memcpy(buffer, data_to_hash, datalen);
+ data_to_hash = buffer;
+ datalen = IMA_EVENT_NAME_LEN_MAX + 1;
+ }
+ rc = crypto_shash_update(shash, data_to_hash, datalen);
+ if (rc)
+ break;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(shash, hash->digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
+ hash, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
+
+static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
+ struct ima_digest_data *hash,
+ struct crypto_ahash *tfm)
+{
+ struct ahash_request *req;
+ struct scatterlist sg;
+ struct crypto_wait wait;
+ int rc, ahash_rc = 0;
+
+ hash->length = crypto_ahash_digestsize(tfm);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ crypto_init_wait(&wait);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ crypto_req_done, &wait);
+
+ rc = ahash_wait(crypto_ahash_init(req), &wait);
+ if (rc)
+ goto out;
+
+ sg_init_one(&sg, buf, len);
+ ahash_request_set_crypt(req, &sg, NULL, len);
+
+ ahash_rc = crypto_ahash_update(req);
+
+ /* wait for the update request to complete */
+ rc = ahash_wait(ahash_rc, &wait);
+ if (!rc) {
+ ahash_request_set_crypt(req, NULL, hash->digest, 0);
+ rc = ahash_wait(crypto_ahash_final(req), &wait);
+ }
+out:
+ ahash_request_free(req);
+ return rc;
+}
+
+static int calc_buffer_ahash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ struct crypto_ahash *tfm;
+ int rc;
+
+ tfm = ima_alloc_atfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
+
+ ima_free_atfm(tfm);
+
+ return rc;
+}
+
+static int calc_buffer_shash_tfm(const void *buf, loff_t size,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+ unsigned int len;
+ int rc;
+
+ shash->tfm = tfm;
+ shash->flags = 0;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ while (size) {
+ len = size < PAGE_SIZE ? size : PAGE_SIZE;
+ rc = crypto_shash_update(shash, buf, len);
+ if (rc)
+ break;
+ buf += len;
+ size -= len;
+ }
+
+ if (!rc)
+ rc = crypto_shash_final(shash, hash->digest);
+ return rc;
+}
+
+static int calc_buffer_shash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
+
+ ima_free_tfm(tfm);
+ return rc;
+}
+
+int ima_calc_buffer_hash(const void *buf, loff_t len,
+ struct ima_digest_data *hash)
+{
+ int rc;
+
+ if (ima_ahash_minsize && len >= ima_ahash_minsize) {
+ rc = calc_buffer_ahash(buf, len, hash);
+ if (!rc)
+ return 0;
+ }
+
+ return calc_buffer_shash(buf, len, hash);
+}
+
+static void ima_pcrread(int idx, u8 *pcr)
+{
+ if (!ima_tpm_chip)
+ return;
+
+ if (tpm_pcr_read(ima_tpm_chip, idx, pcr) != 0)
+ pr_err("Error Communicating to TPM chip\n");
+}
+
+/*
+ * Calculate the boot aggregate hash
+ */
+static int ima_calc_boot_aggregate_tfm(char *digest,
+ struct crypto_shash *tfm)
+{
+ u8 pcr_i[TPM_DIGEST_SIZE];
+ int rc, i;
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+ shash->flags = 0;
+
+ rc = crypto_shash_init(shash);
+ if (rc != 0)
+ return rc;
+
+ /* cumulative sha1 over tpm registers 0-7 */
+ for (i = TPM_PCR0; i < TPM_PCR8; i++) {
+ ima_pcrread(i, pcr_i);
+ /* now accumulate with current aggregate */
+ rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE);
+ if (rc != 0)
+ return rc;
+ }
+ if (!rc)
+ crypto_shash_final(shash, digest);
+ return rc;
+}
+
+int ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+
+ ima_free_tfm(tfm);
+
+ return rc;
+}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
new file mode 100644
index 000000000..38bd565b9
--- /dev/null
+++ b/security/integrity/ima/ima_fs.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Reiner Sailer <sailer@us.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_fs.c
+ * implemenents security file system for reporting
+ * current measurement list and IMA statistics
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/parser.h>
+#include <linux/vmalloc.h>
+
+#include "ima.h"
+
+static DEFINE_MUTEX(ima_write_mutex);
+
+bool ima_canonical_fmt;
+static int __init default_canonical_fmt_setup(char *str)
+{
+#ifdef __BIG_ENDIAN
+ ima_canonical_fmt = true;
+#endif
+ return 1;
+}
+__setup("ima_canonical_fmt", default_canonical_fmt_setup);
+
+static int valid_policy = 1;
+
+static ssize_t ima_show_htable_value(char __user *buf, size_t count,
+ loff_t *ppos, atomic_long_t *val)
+{
+ char tmpbuf[32]; /* greater than largest 'long' string value */
+ ssize_t len;
+
+ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
+ return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
+}
+
+static ssize_t ima_show_htable_violations(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return ima_show_htable_value(buf, count, ppos, &ima_htable.violations);
+}
+
+static const struct file_operations ima_htable_violations_ops = {
+ .read = ima_show_htable_violations,
+ .llseek = generic_file_llseek,
+};
+
+static ssize_t ima_show_measurements_count(struct file *filp,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return ima_show_htable_value(buf, count, ppos, &ima_htable.len);
+
+}
+
+static const struct file_operations ima_measurements_count_ops = {
+ .read = ima_show_measurements_count,
+ .llseek = generic_file_llseek,
+};
+
+/* returns pointer to hlist_node */
+static void *ima_measurements_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t l = *pos;
+ struct ima_queue_entry *qe;
+
+ /* we need a lock since pos could point beyond last element */
+ rcu_read_lock();
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (!l--) {
+ rcu_read_unlock();
+ return qe;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ima_queue_entry *qe = v;
+
+ /* lock protects when reading beyond last element
+ * against concurrent list-extension
+ */
+ rcu_read_lock();
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
+ rcu_read_unlock();
+ (*pos)++;
+
+ return (&qe->later == &ima_measurements) ? NULL : qe;
+}
+
+static void ima_measurements_stop(struct seq_file *m, void *v)
+{
+}
+
+void ima_putc(struct seq_file *m, void *data, int datalen)
+{
+ while (datalen--)
+ seq_putc(m, *(char *)data++);
+}
+
+/* print format:
+ * 32bit-le=pcr#
+ * char[20]=template digest
+ * 32bit-le=template name size
+ * char[n]=template name
+ * [eventdata length]
+ * eventdata[n]=template specific data
+ */
+int ima_measurements_show(struct seq_file *m, void *v)
+{
+ /* the list never shrinks, so we don't need a lock here */
+ struct ima_queue_entry *qe = v;
+ struct ima_template_entry *e;
+ char *template_name;
+ u32 pcr, namelen, template_data_len; /* temporary fields */
+ bool is_ima_template = false;
+ int i;
+
+ /* get entry */
+ e = qe->entry;
+ if (e == NULL)
+ return -1;
+
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
+ /*
+ * 1st: PCRIndex
+ * PCR used defaults to the same (config option) in
+ * little-endian format, unless set in policy
+ */
+ pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
+ ima_putc(m, &pcr, sizeof(e->pcr));
+
+ /* 2nd: template digest */
+ ima_putc(m, e->digest, TPM_DIGEST_SIZE);
+
+ /* 3rd: template name size */
+ namelen = !ima_canonical_fmt ? strlen(template_name) :
+ cpu_to_le32(strlen(template_name));
+ ima_putc(m, &namelen, sizeof(namelen));
+
+ /* 4th: template name */
+ ima_putc(m, template_name, strlen(template_name));
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
+ is_ima_template = true;
+
+ if (!is_ima_template) {
+ template_data_len = !ima_canonical_fmt ? e->template_data_len :
+ cpu_to_le32(e->template_data_len);
+ ima_putc(m, &template_data_len, sizeof(e->template_data_len));
+ }
+
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ enum ima_show_type show = IMA_SHOW_BINARY;
+ struct ima_template_field *field = e->template_desc->fields[i];
+
+ if (is_ima_template && strcmp(field->field_id, "d") == 0)
+ show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+ if (is_ima_template && strcmp(field->field_id, "n") == 0)
+ show = IMA_SHOW_BINARY_OLD_STRING_FMT;
+ field->field_show(m, show, &e->template_data[i]);
+ }
+ return 0;
+}
+
+static const struct seq_operations ima_measurments_seqops = {
+ .start = ima_measurements_start,
+ .next = ima_measurements_next,
+ .stop = ima_measurements_stop,
+ .show = ima_measurements_show
+};
+
+static int ima_measurements_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ima_measurments_seqops);
+}
+
+static const struct file_operations ima_measurements_ops = {
+ .open = ima_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
+{
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ seq_printf(m, "%02x", *(digest + i));
+}
+
+/* print in ascii */
+static int ima_ascii_measurements_show(struct seq_file *m, void *v)
+{
+ /* the list never shrinks, so we don't need a lock here */
+ struct ima_queue_entry *qe = v;
+ struct ima_template_entry *e;
+ char *template_name;
+ int i;
+
+ /* get entry */
+ e = qe->entry;
+ if (e == NULL)
+ return -1;
+
+ template_name = (e->template_desc->name[0] != '\0') ?
+ e->template_desc->name : e->template_desc->fmt;
+
+ /* 1st: PCR used (config option) */
+ seq_printf(m, "%2d ", e->pcr);
+
+ /* 2nd: SHA1 template hash */
+ ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
+
+ /* 3th: template name */
+ seq_printf(m, " %s", template_name);
+
+ /* 4th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static const struct seq_operations ima_ascii_measurements_seqops = {
+ .start = ima_measurements_start,
+ .next = ima_measurements_next,
+ .stop = ima_measurements_stop,
+ .show = ima_ascii_measurements_show
+};
+
+static int ima_ascii_measurements_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ima_ascii_measurements_seqops);
+}
+
+static const struct file_operations ima_ascii_measurements_ops = {
+ .open = ima_ascii_measurements_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static ssize_t ima_read_policy(char *path)
+{
+ void *data;
+ char *datap;
+ loff_t size;
+ int rc, pathlen = strlen(path);
+
+ char *p;
+
+ /* remove \n */
+ datap = path;
+ strsep(&datap, "\n");
+
+ rc = kernel_read_file_from_path(path, &data, &size, 0, READING_POLICY);
+ if (rc < 0) {
+ pr_err("Unable to open file: %s (%d)", path, rc);
+ return rc;
+ }
+
+ datap = data;
+ while (size > 0 && (p = strsep(&datap, "\n"))) {
+ pr_debug("rule: %s\n", p);
+ rc = ima_parse_add_rule(p);
+ if (rc < 0)
+ break;
+ size -= rc;
+ }
+
+ vfree(data);
+ if (rc < 0)
+ return rc;
+ else if (size)
+ return -EINVAL;
+ else
+ return pathlen;
+}
+
+static ssize_t ima_write_policy(struct file *file, const char __user *buf,
+ size_t datalen, loff_t *ppos)
+{
+ char *data;
+ ssize_t result;
+
+ if (datalen >= PAGE_SIZE)
+ datalen = PAGE_SIZE - 1;
+
+ /* No partial writes. */
+ result = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ data = memdup_user_nul(buf, datalen);
+ if (IS_ERR(data)) {
+ result = PTR_ERR(data);
+ goto out;
+ }
+
+ result = mutex_lock_interruptible(&ima_write_mutex);
+ if (result < 0)
+ goto out_free;
+
+ if (data[0] == '/') {
+ result = ima_read_policy(data);
+ } else if (ima_appraise & IMA_APPRAISE_POLICY) {
+ pr_err("signed policy file (specified as an absolute pathname) required\n");
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", "signed policy required",
+ 1, 0);
+ result = -EACCES;
+ } else {
+ result = ima_parse_add_rule(data);
+ }
+ mutex_unlock(&ima_write_mutex);
+out_free:
+ kfree(data);
+out:
+ if (result < 0)
+ valid_policy = 0;
+
+ return result;
+}
+
+static struct dentry *ima_dir;
+static struct dentry *ima_symlink;
+static struct dentry *binary_runtime_measurements;
+static struct dentry *ascii_runtime_measurements;
+static struct dentry *runtime_measurements_count;
+static struct dentry *violations;
+static struct dentry *ima_policy;
+
+enum ima_fs_flags {
+ IMA_FS_BUSY,
+};
+
+static unsigned long ima_fs_flags;
+
+#ifdef CONFIG_IMA_READ_POLICY
+static const struct seq_operations ima_policy_seqops = {
+ .start = ima_policy_start,
+ .next = ima_policy_next,
+ .stop = ima_policy_stop,
+ .show = ima_policy_show,
+};
+#endif
+
+/*
+ * ima_open_policy: sequentialize access to the policy file
+ */
+static int ima_open_policy(struct inode *inode, struct file *filp)
+{
+ if (!(filp->f_flags & O_WRONLY)) {
+#ifndef CONFIG_IMA_READ_POLICY
+ return -EACCES;
+#else
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ return -EACCES;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return seq_open(filp, &ima_policy_seqops);
+#endif
+ }
+ if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags))
+ return -EBUSY;
+ return 0;
+}
+
+/*
+ * ima_release_policy - start using the new measure policy rules.
+ *
+ * Initially, ima_measure points to the default policy rules, now
+ * point to the new policy rules, and remove the securityfs policy file,
+ * assuming a valid policy.
+ */
+static int ima_release_policy(struct inode *inode, struct file *file)
+{
+ const char *cause = valid_policy ? "completed" : "failed";
+
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return seq_release(inode, file);
+
+ if (valid_policy && ima_check_policy() < 0) {
+ cause = "failed";
+ valid_policy = 0;
+ }
+
+ pr_info("policy update %s\n", cause);
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL,
+ "policy_update", cause, !valid_policy, 0);
+
+ if (!valid_policy) {
+ ima_delete_rules();
+ valid_policy = 1;
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+ return 0;
+ }
+
+ ima_update_policy();
+#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY)
+ securityfs_remove(ima_policy);
+ ima_policy = NULL;
+#elif defined(CONFIG_IMA_WRITE_POLICY)
+ clear_bit(IMA_FS_BUSY, &ima_fs_flags);
+#elif defined(CONFIG_IMA_READ_POLICY)
+ inode->i_mode &= ~S_IWUSR;
+#endif
+ return 0;
+}
+
+static const struct file_operations ima_measure_policy_ops = {
+ .open = ima_open_policy,
+ .write = ima_write_policy,
+ .read = seq_read,
+ .release = ima_release_policy,
+ .llseek = generic_file_llseek,
+};
+
+int __init ima_fs_init(void)
+{
+ ima_dir = securityfs_create_dir("ima", integrity_dir);
+ if (IS_ERR(ima_dir))
+ return -1;
+
+ ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima",
+ NULL);
+ if (IS_ERR(ima_symlink))
+ goto out;
+
+ binary_runtime_measurements =
+ securityfs_create_file("binary_runtime_measurements",
+ S_IRUSR | S_IRGRP, ima_dir, NULL,
+ &ima_measurements_ops);
+ if (IS_ERR(binary_runtime_measurements))
+ goto out;
+
+ ascii_runtime_measurements =
+ securityfs_create_file("ascii_runtime_measurements",
+ S_IRUSR | S_IRGRP, ima_dir, NULL,
+ &ima_ascii_measurements_ops);
+ if (IS_ERR(ascii_runtime_measurements))
+ goto out;
+
+ runtime_measurements_count =
+ securityfs_create_file("runtime_measurements_count",
+ S_IRUSR | S_IRGRP, ima_dir, NULL,
+ &ima_measurements_count_ops);
+ if (IS_ERR(runtime_measurements_count))
+ goto out;
+
+ violations =
+ securityfs_create_file("violations", S_IRUSR | S_IRGRP,
+ ima_dir, NULL, &ima_htable_violations_ops);
+ if (IS_ERR(violations))
+ goto out;
+
+ ima_policy = securityfs_create_file("policy", POLICY_FILE_FLAGS,
+ ima_dir, NULL,
+ &ima_measure_policy_ops);
+ if (IS_ERR(ima_policy))
+ goto out;
+
+ return 0;
+out:
+ securityfs_remove(ima_policy);
+ securityfs_remove(violations);
+ securityfs_remove(runtime_measurements_count);
+ securityfs_remove(ascii_runtime_measurements);
+ securityfs_remove(binary_runtime_measurements);
+ securityfs_remove(ima_symlink);
+ securityfs_remove(ima_dir);
+ return -1;
+}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
new file mode 100644
index 000000000..a2bc4cb44
--- /dev/null
+++ b/security/integrity/ima/ima_init.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_init.c
+ * initialization and cleanup functions
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include "ima.h"
+
+/* name for boot aggregate entry */
+const char boot_aggregate_name[] = "boot_aggregate";
+struct tpm_chip *ima_tpm_chip;
+
+/* Add the boot aggregate to the IMA measurement list and extend
+ * the PCR register.
+ *
+ * Calculate the boot aggregate, a SHA1 over tpm registers 0-7,
+ * assuming a TPM chip exists, and zeroes if the TPM chip does not
+ * exist. Add the boot aggregate measurement to the measurement
+ * list and extend the PCR register.
+ *
+ * If a tpm chip does not exist, indicate the core root of trust is
+ * not hardware based by invalidating the aggregate PCR value.
+ * (The aggregate PCR value is invalidated by adding one value to
+ * the measurement list and extending the aggregate PCR value with
+ * a different value.) Violations add a zero entry to the measurement
+ * list and extend the aggregate PCR value with ff...ff's.
+ */
+static int __init ima_add_boot_aggregate(void)
+{
+ static const char op[] = "add_boot_aggregate";
+ const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry;
+ struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
+ struct ima_event_data event_data = {iint, NULL, boot_aggregate_name,
+ NULL, 0, NULL};
+ int result = -ENOMEM;
+ int violation = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
+
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+ iint->ima_hash->algo = HASH_ALGO_SHA1;
+ iint->ima_hash->length = SHA1_DIGEST_SIZE;
+
+ if (ima_tpm_chip) {
+ result = ima_calc_boot_aggregate(&hash.hdr);
+ if (result < 0) {
+ audit_cause = "hashing_error";
+ goto err_out;
+ }
+ }
+
+ result = ima_alloc_init_template(&event_data, &entry);
+ if (result < 0) {
+ audit_cause = "alloc_entry";
+ goto err_out;
+ }
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name,
+ CONFIG_IMA_MEASURE_PCR_IDX);
+ if (result < 0) {
+ ima_free_template_entry(entry);
+ audit_cause = "store_entry";
+ goto err_out;
+ }
+ return 0;
+err_out:
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
+ audit_cause, result, 0);
+ return result;
+}
+
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void)
+{
+ int unset_flags = ima_policy_flag & IMA_APPRAISE;
+
+ ima_policy_flag &= ~unset_flags;
+ integrity_load_x509(INTEGRITY_KEYRING_IMA, CONFIG_IMA_X509_PATH);
+ ima_policy_flag |= unset_flags;
+}
+#endif
+
+int __init ima_init(void)
+{
+ int rc;
+
+ ima_tpm_chip = tpm_default_chip();
+ if (!ima_tpm_chip)
+ pr_info("No TPM chip found, activating TPM-bypass!\n");
+
+ rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA);
+ if (rc)
+ return rc;
+
+ rc = ima_init_crypto();
+ if (rc)
+ return rc;
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
+
+ ima_load_kexec_buffer();
+
+ rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
+ if (rc != 0)
+ return rc;
+
+ ima_init_policy();
+
+ return ima_fs_init();
+}
diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
new file mode 100644
index 000000000..6a10d4d8b
--- /dev/null
+++ b/security/integrity/ima/ima_kexec.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2016 IBM Corporation
+ *
+ * Authors:
+ * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
+ * Mimi Zohar <zohar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/kexec.h>
+#include "ima.h"
+
+#ifdef CONFIG_IMA_KEXEC
+static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
+ unsigned long segment_size)
+{
+ struct ima_queue_entry *qe;
+ struct seq_file file;
+ struct ima_kexec_hdr khdr;
+ int ret = 0;
+
+ /* segment size can't change between kexec load and execute */
+ file.buf = vmalloc(segment_size);
+ if (!file.buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ file.size = segment_size;
+ file.read_pos = 0;
+ file.count = sizeof(khdr); /* reserved space */
+
+ memset(&khdr, 0, sizeof(khdr));
+ khdr.version = 1;
+ list_for_each_entry_rcu(qe, &ima_measurements, later) {
+ if (file.count < file.size) {
+ khdr.count++;
+ ima_measurements_show(&file, qe);
+ } else {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto out;
+
+ /*
+ * fill in reserved space with some buffer details
+ * (eg. version, buffer size, number of measurements)
+ */
+ khdr.buffer_size = file.count;
+ if (ima_canonical_fmt) {
+ khdr.version = cpu_to_le16(khdr.version);
+ khdr.count = cpu_to_le64(khdr.count);
+ khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
+ }
+ memcpy(file.buf, &khdr, sizeof(khdr));
+
+ print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE,
+ 16, 1, file.buf,
+ file.count < 100 ? file.count : 100, true);
+
+ *buffer_size = file.count;
+ *buffer = file.buf;
+out:
+ if (ret == -EINVAL)
+ vfree(file.buf);
+ return ret;
+}
+
+/*
+ * Called during kexec_file_load so that IMA can add a segment to the kexec
+ * image for the measurement list for the next kernel.
+ *
+ * This function assumes that kexec_mutex is held.
+ */
+void ima_add_kexec_buffer(struct kimage *image)
+{
+ struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
+ .buf_min = 0, .buf_max = ULONG_MAX,
+ .top_down = true };
+ unsigned long binary_runtime_size;
+
+ /* use more understandable variable names than defined in kbuf */
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size;
+ size_t kexec_segment_size;
+ int ret;
+
+ /*
+ * Reserve an extra half page of memory for additional measurements
+ * added during the kexec load.
+ */
+ binary_runtime_size = ima_get_binary_runtime_size();
+ if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
+ kexec_segment_size = ULONG_MAX;
+ else
+ kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
+ PAGE_SIZE / 2, PAGE_SIZE);
+ if ((kexec_segment_size == ULONG_MAX) ||
+ ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) {
+ pr_err("Binary measurement list too large.\n");
+ return;
+ }
+
+ ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
+ kexec_segment_size);
+ if (!kexec_buffer) {
+ pr_err("Not enough memory for the kexec measurement buffer.\n");
+ return;
+ }
+
+ kbuf.buffer = kexec_buffer;
+ kbuf.bufsz = kexec_buffer_size;
+ kbuf.memsz = kexec_segment_size;
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ vfree(kexec_buffer);
+ return;
+ }
+
+ ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
+ return;
+ }
+
+ image->ima_buffer = kexec_buffer;
+
+ pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+}
+#endif /* IMA_KEXEC */
+
+/*
+ * Restore the measurement list from the previous kernel.
+ */
+void ima_load_kexec_buffer(void)
+{
+ void *kexec_buffer = NULL;
+ size_t kexec_buffer_size = 0;
+ int rc;
+
+ rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
+ switch (rc) {
+ case 0:
+ rc = ima_restore_measurement_list(kexec_buffer_size,
+ kexec_buffer);
+ if (rc != 0)
+ pr_err("Failed to restore the measurement list: %d\n",
+ rc);
+
+ ima_free_kexec_buffer();
+ break;
+ case -ENOTSUPP:
+ pr_debug("Restoring the measurement list not supported\n");
+ break;
+ case -ENOENT:
+ pr_debug("No measurement list to restore\n");
+ break;
+ default:
+ pr_debug("Error restoring the measurement list: %d\n", rc);
+ }
+}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
new file mode 100644
index 000000000..2d31921fb
--- /dev/null
+++ b/security/integrity/ima/ima_main.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Serge Hallyn <serue@us.ibm.com>
+ * Kylene Hall <kylene@us.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_main.c
+ * implements the IMA hooks: ima_bprm_check, ima_file_mmap,
+ * and ima_file_check.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/binfmts.h>
+#include <linux/mount.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/ima.h>
+#include <linux/iversion.h>
+#include <linux/fs.h>
+
+#include "ima.h"
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise = IMA_APPRAISE_ENFORCE;
+#else
+int ima_appraise;
+#endif
+
+int ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
+static int __init hash_setup(char *str)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
+
+ if (hash_setup_done)
+ return 1;
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0)
+ ima_hash_algo = HASH_ALGO_SHA1;
+ else if (strncmp(str, "md5", 3) == 0)
+ ima_hash_algo = HASH_ALGO_MD5;
+ else
+ return 1;
+ goto out;
+ }
+
+ i = match_string(hash_algo_name, HASH_ALGO__LAST, str);
+ if (i < 0)
+ return 1;
+
+ ima_hash_algo = i;
+out:
+ hash_setup_done = 1;
+ return 1;
+}
+__setup("ima_hash=", hash_setup);
+
+/*
+ * ima_rdwr_violation_check
+ *
+ * Only invalidate the PCR for measured files:
+ * - Opening a file for write when already open for read,
+ * results in a time of measure, time of use (ToMToU) error.
+ * - Opening a file for read when already open for write,
+ * could result in a file measurement error.
+ *
+ */
+static void ima_rdwr_violation_check(struct file *file,
+ struct integrity_iint_cache *iint,
+ int must_measure,
+ char **pathbuf,
+ const char **pathname,
+ char *filename)
+{
+ struct inode *inode = file_inode(file);
+ fmode_t mode = file->f_mode;
+ bool send_tomtou = false, send_writers = false;
+
+ if (mode & FMODE_WRITE) {
+ if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+ if (!iint)
+ iint = integrity_iint_find(inode);
+ /* IMA_MEASURE is set from reader side */
+ if (iint && test_bit(IMA_MUST_MEASURE,
+ &iint->atomic_flags))
+ send_tomtou = true;
+ }
+ } else {
+ if (must_measure)
+ set_bit(IMA_MUST_MEASURE, &iint->atomic_flags);
+ if ((atomic_read(&inode->i_writecount) > 0) && must_measure)
+ send_writers = true;
+ }
+
+ if (!send_tomtou && !send_writers)
+ return;
+
+ *pathname = ima_d_path(&file->f_path, pathbuf, filename);
+
+ if (send_tomtou)
+ ima_add_violation(file, *pathname, iint,
+ "invalid_pcr", "ToMToU");
+ if (send_writers)
+ ima_add_violation(file, *pathname, iint,
+ "invalid_pcr", "open_writers");
+}
+
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
+ struct inode *inode, struct file *file)
+{
+ fmode_t mode = file->f_mode;
+ bool update;
+
+ if (!(mode & FMODE_WRITE))
+ return;
+
+ mutex_lock(&iint->mutex);
+ if (atomic_read(&inode->i_writecount) == 1) {
+ update = test_and_clear_bit(IMA_UPDATE_XATTR,
+ &iint->atomic_flags);
+ if (!IS_I_VERSION(inode) ||
+ !inode_eq_iversion(inode, iint->version) ||
+ (iint->flags & IMA_NEW_FILE)) {
+ iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE);
+ iint->measured_pcrs = 0;
+ if (update)
+ ima_update_xattr(iint, file);
+ }
+ }
+ mutex_unlock(&iint->mutex);
+}
+
+/**
+ * ima_file_free - called on __fput()
+ * @file: pointer to file structure being freed
+ *
+ * Flag files that changed, based on i_version
+ */
+void ima_file_free(struct file *file)
+{
+ struct inode *inode = file_inode(file);
+ struct integrity_iint_cache *iint;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return;
+
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+
+ ima_check_last_writer(iint, inode, file);
+}
+
+static int process_measurement(struct file *file, const struct cred *cred,
+ u32 secid, char *buf, loff_t size, int mask,
+ enum ima_hooks func)
+{
+ struct inode *inode = file_inode(file);
+ struct integrity_iint_cache *iint = NULL;
+ struct ima_template_desc *template_desc;
+ char *pathbuf = NULL;
+ char filename[NAME_MAX];
+ const char *pathname = NULL;
+ int rc = 0, action, must_appraise = 0;
+ int pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ struct evm_ima_xattr_data *xattr_value = NULL;
+ int xattr_len = 0;
+ bool violation_check;
+ enum hash_algo hash_algo;
+
+ if (!ima_policy_flag || !S_ISREG(inode->i_mode))
+ return 0;
+
+ /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
+ * bitmask based on the appraise/audit/measurement policy.
+ * Included is the appraise submask.
+ */
+ action = ima_get_action(inode, cred, secid, mask, func, &pcr);
+ violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) &&
+ (ima_policy_flag & IMA_MEASURE));
+ if (!action && !violation_check)
+ return 0;
+
+ must_appraise = action & IMA_APPRAISE;
+
+ /* Is the appraise rule hook specific? */
+ if (action & IMA_FILE_APPRAISE)
+ func = FILE_CHECK;
+
+ inode_lock(inode);
+
+ if (action) {
+ iint = integrity_inode_get(inode);
+ if (!iint)
+ rc = -ENOMEM;
+ }
+
+ if (!rc && violation_check)
+ ima_rdwr_violation_check(file, iint, action & IMA_MEASURE,
+ &pathbuf, &pathname, filename);
+
+ inode_unlock(inode);
+
+ if (rc)
+ goto out;
+ if (!action)
+ goto out;
+
+ mutex_lock(&iint->mutex);
+
+ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags))
+ /* reset appraisal flags if ima_inode_post_setattr was called */
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_ACTION_FLAGS);
+
+ /*
+ * Re-evaulate the file if either the xattr has changed or the
+ * kernel has no way of detecting file change on the filesystem.
+ * (Limited to privileged mounted filesystems.)
+ */
+ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags) ||
+ ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) &&
+ !(inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) &&
+ !(action & IMA_FAIL_UNVERIFIABLE_SIGS))) {
+ iint->flags &= ~IMA_DONE_MASK;
+ iint->measured_pcrs = 0;
+ }
+
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+ */
+ iint->flags |= action;
+ action &= IMA_DO_MASK;
+ action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1);
+
+ /* If target pcr is already measured, unset IMA_MEASURE action */
+ if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr)))
+ action ^= IMA_MEASURE;
+
+ /* HASH sets the digital signature and update flags, nothing else */
+ if ((action & IMA_HASH) &&
+ !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) {
+ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+ if ((xattr_value && xattr_len > 2) &&
+ (xattr_value->type == EVM_IMA_XATTR_DIGSIG))
+ set_bit(IMA_DIGSIG, &iint->atomic_flags);
+ iint->flags |= IMA_HASHED;
+ action ^= IMA_HASH;
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+
+ /* Nothing to do, just return existing appraised status */
+ if (!action) {
+ if (must_appraise)
+ rc = ima_get_cache_status(iint, func);
+ goto out_locked;
+ }
+
+ template_desc = ima_template_desc_current();
+ if ((action & IMA_APPRAISE_SUBMASK) ||
+ strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0)
+ /* read 'security.ima' */
+ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value);
+
+ hash_algo = ima_get_hash_algo(xattr_value, xattr_len);
+
+ rc = ima_collect_measurement(iint, file, buf, size, hash_algo);
+ if (rc != 0 && rc != -EBADF && rc != -EINVAL)
+ goto out_locked;
+
+ if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */
+ pathname = ima_d_path(&file->f_path, &pathbuf, filename);
+
+ if (action & IMA_MEASURE)
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len, pcr);
+ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) {
+ inode_lock(inode);
+ rc = ima_appraise_measurement(func, iint, file, pathname,
+ xattr_value, xattr_len);
+ inode_unlock(inode);
+ }
+ if (action & IMA_AUDIT)
+ ima_audit_measurement(iint, pathname);
+
+ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO))
+ rc = 0;
+out_locked:
+ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) &&
+ !(iint->flags & IMA_NEW_FILE))
+ rc = -EACCES;
+ mutex_unlock(&iint->mutex);
+ kfree(xattr_value);
+out:
+ if (pathbuf)
+ __putname(pathbuf);
+ if (must_appraise) {
+ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ if (file->f_mode & FMODE_WRITE)
+ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags);
+ }
+ return 0;
+}
+
+/**
+ * ima_file_mmap - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured (May be NULL)
+ * @prot: contains the protection that will be applied by the kernel.
+ *
+ * Measure files being mmapped executable based on the ima_must_measure()
+ * policy decision.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_mmap(struct file *file, unsigned long prot)
+{
+ u32 secid;
+
+ if (file && (prot & PROT_EXEC)) {
+ security_task_getsecid(current, &secid);
+ return process_measurement(file, current_cred(), secid, NULL,
+ 0, MAY_EXEC, MMAP_CHECK);
+ }
+
+ return 0;
+}
+
+/**
+ * ima_bprm_check - based on policy, collect/store measurement.
+ * @bprm: contains the linux_binprm structure
+ *
+ * The OS protects against an executable file, already open for write,
+ * from being executed in deny_write_access() and an executable file,
+ * already open for execute, from being modified in get_write_access().
+ * So we can be certain that what we verify and measure here is actually
+ * what is being executed.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_bprm_check(struct linux_binprm *bprm)
+{
+ int ret;
+ u32 secid;
+
+ security_task_getsecid(current, &secid);
+ ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0,
+ MAY_EXEC, BPRM_CHECK);
+ if (ret)
+ return ret;
+
+ security_cred_getsecid(bprm->cred, &secid);
+ return process_measurement(bprm->file, bprm->cred, secid, NULL, 0,
+ MAY_EXEC, CREDS_CHECK);
+}
+
+/**
+ * ima_path_check - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured
+ * @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND
+ *
+ * Measure files based on the ima_must_measure() policy decision.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_check(struct file *file, int mask)
+{
+ u32 secid;
+
+ security_task_getsecid(current, &secid);
+ return process_measurement(file, current_cred(), secid, NULL, 0,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC |
+ MAY_APPEND), FILE_CHECK);
+}
+EXPORT_SYMBOL_GPL(ima_file_check);
+
+/**
+ * ima_post_path_mknod - mark as a new inode
+ * @dentry: newly created dentry
+ *
+ * Mark files created via the mknodat syscall as new, so that the
+ * file data can be written later.
+ */
+void ima_post_path_mknod(struct dentry *dentry)
+{
+ struct integrity_iint_cache *iint;
+ struct inode *inode = dentry->d_inode;
+ int must_appraise;
+
+ must_appraise = ima_must_appraise(inode, MAY_ACCESS, FILE_CHECK);
+ if (!must_appraise)
+ return;
+
+ iint = integrity_inode_get(inode);
+ if (iint)
+ iint->flags |= IMA_NEW_FILE;
+}
+
+/**
+ * ima_read_file - pre-measure/appraise hook decision based on policy
+ * @file: pointer to the file to be measured/appraised/audit
+ * @read_id: caller identifier
+ *
+ * Permit reading a file based on policy. The policy rules are written
+ * in terms of the policy identifier. Appraising the integrity of
+ * a file requires a file descriptor.
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
+{
+ /*
+ * READING_FIRMWARE_PREALLOC_BUFFER
+ *
+ * Do devices using pre-allocated memory run the risk of the
+ * firmware being accessible to the device prior to the completion
+ * of IMA's signature verification any more than when using two
+ * buffers?
+ */
+ return 0;
+}
+
+static int read_idmap[READING_MAX_ID] = {
+ [READING_FIRMWARE] = FIRMWARE_CHECK,
+ [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
+ [READING_MODULE] = MODULE_CHECK,
+ [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK,
+ [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK,
+ [READING_POLICY] = POLICY_CHECK
+};
+
+/**
+ * ima_post_read_file - in memory collect/appraise/audit measurement
+ * @file: pointer to the file to be measured/appraised/audit
+ * @buf: pointer to in memory file contents
+ * @size: size of in memory file contents
+ * @read_id: caller identifier
+ *
+ * Measure/appraise/audit in memory file based on policy. Policy rules
+ * are written in terms of a policy identifier.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_post_read_file(struct file *file, void *buf, loff_t size,
+ enum kernel_read_file_id read_id)
+{
+ enum ima_hooks func;
+ u32 secid;
+
+ if (!file && read_id == READING_FIRMWARE) {
+ if ((ima_appraise & IMA_APPRAISE_FIRMWARE) &&
+ (ima_appraise & IMA_APPRAISE_ENFORCE)) {
+ pr_err("Prevent firmware loading_store.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ return 0;
+ }
+
+ /* permit signed certs */
+ if (!file && read_id == READING_X509_CERTIFICATE)
+ return 0;
+
+ if (!file || !buf || size == 0) { /* should never happen */
+ if (ima_appraise & IMA_APPRAISE_ENFORCE)
+ return -EACCES;
+ return 0;
+ }
+
+ func = read_idmap[read_id] ?: FILE_CHECK;
+ security_task_getsecid(current, &secid);
+ return process_measurement(file, current_cred(), secid, buf, size,
+ MAY_READ, func);
+}
+
+/**
+ * ima_load_data - appraise decision based on policy
+ * @id: kernel load data caller identifier
+ *
+ * Callers of this LSM hook can not measure, appraise, or audit the
+ * data provided by userspace. Enforce policy rules requring a file
+ * signature (eg. kexec'ed kernel image).
+ *
+ * For permission return 0, otherwise return -EACCES.
+ */
+int ima_load_data(enum kernel_load_data_id id)
+{
+ bool sig_enforce;
+
+ if ((ima_appraise & IMA_APPRAISE_ENFORCE) != IMA_APPRAISE_ENFORCE)
+ return 0;
+
+ switch (id) {
+ case LOADING_KEXEC_IMAGE:
+ if (ima_appraise & IMA_APPRAISE_KEXEC) {
+ pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_FIRMWARE:
+ if (ima_appraise & IMA_APPRAISE_FIRMWARE) {
+ pr_err("Prevent firmware sysfs fallback loading.\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ break;
+ case LOADING_MODULE:
+ sig_enforce = is_module_sig_enforced();
+
+ if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES)) {
+ pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n");
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+ }
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int __init init_ima(void)
+{
+ int error;
+
+ ima_init_template_list();
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
+ error = ima_init();
+
+ if (error && strcmp(hash_algo_name[ima_hash_algo],
+ CONFIG_IMA_DEFAULT_HASH) != 0) {
+ pr_info("Allocating %s failed, going to use default hash algorithm %s\n",
+ hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH);
+ hash_setup_done = 0;
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
+ error = ima_init();
+ }
+
+ if (!error)
+ ima_update_policy_flag();
+
+ return error;
+}
+
+late_initcall(init_ima); /* Start IMA after the TPM is available */
+
+MODULE_DESCRIPTION("Integrity Measurement Architecture");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
new file mode 100644
index 000000000..daad75ee7
--- /dev/null
+++ b/security/integrity/ima/ima_mok.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Juniper Networks, Inc.
+ *
+ * Author:
+ * Petko Manolov <petko.manolov@konsulko.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <keys/system_keyring.h>
+
+
+struct key *ima_blacklist_keyring;
+
+/*
+ * Allocate the IMA blacklist keyring
+ */
+static __init int ima_mok_init(void)
+{
+ struct key_restriction *restriction;
+
+ pr_notice("Allocating IMA blacklist keyring.\n");
+
+ restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
+ if (!restriction)
+ panic("Can't allocate IMA blacklist restriction.");
+
+ restriction->check = restrict_link_by_builtin_trusted;
+
+ ima_blacklist_keyring = keyring_alloc(".ima_blacklist",
+ KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_WRITE | KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA |
+ KEY_ALLOC_SET_KEEP,
+ restriction, NULL);
+
+ if (IS_ERR(ima_blacklist_keyring))
+ panic("Can't allocate IMA blacklist keyring.");
+ return 0;
+}
+device_initcall(ima_mok_init);
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
new file mode 100644
index 000000000..2d5a3daa0
--- /dev/null
+++ b/security/integrity/ima/ima_policy.c
@@ -0,0 +1,1256 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * ima_policy.c
+ * - initialize default measure policy rules
+ *
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/security.h>
+#include <linux/magic.h>
+#include <linux/parser.h>
+#include <linux/slab.h>
+#include <linux/rculist.h>
+#include <linux/genhd.h>
+#include <linux/seq_file.h>
+
+#include "ima.h"
+
+/* flags definitions */
+#define IMA_FUNC 0x0001
+#define IMA_MASK 0x0002
+#define IMA_FSMAGIC 0x0004
+#define IMA_UID 0x0008
+#define IMA_FOWNER 0x0010
+#define IMA_FSUUID 0x0020
+#define IMA_INMASK 0x0040
+#define IMA_EUID 0x0080
+#define IMA_PCR 0x0100
+#define IMA_FSNAME 0x0200
+
+#define UNKNOWN 0
+#define MEASURE 0x0001 /* same as IMA_MEASURE */
+#define DONT_MEASURE 0x0002
+#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
+#define DONT_APPRAISE 0x0008
+#define AUDIT 0x0040
+#define HASH 0x0100
+#define DONT_HASH 0x0200
+
+#define INVALID_PCR(a) (((a) < 0) || \
+ (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8))
+
+int ima_policy_flag;
+static int temp_ima_appraise;
+static int build_ima_appraise __ro_after_init;
+
+#define MAX_LSM_RULES 6
+enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
+ LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
+};
+
+enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
+
+struct ima_rule_entry {
+ struct list_head list;
+ int action;
+ unsigned int flags;
+ enum ima_hooks func;
+ int mask;
+ unsigned long fsmagic;
+ uuid_t fsuuid;
+ kuid_t uid;
+ kuid_t fowner;
+ bool (*uid_op)(kuid_t, kuid_t); /* Handlers for operators */
+ bool (*fowner_op)(kuid_t, kuid_t); /* uid_eq(), uid_gt(), uid_lt() */
+ int pcr;
+ struct {
+ void *rule; /* LSM file metadata specific */
+ void *args_p; /* audit value */
+ int type; /* audit type */
+ } lsm[MAX_LSM_RULES];
+ char *fsname;
+};
+
+/*
+ * Without LSM specific knowledge, the default policy can only be
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner
+ */
+
+/*
+ * The minimum rule set to allow for full TCB coverage. Measures all files
+ * opened or mmap for exec and everything read by root. Dangerous because
+ * normal users can easily run the machine out of memory simply building
+ * and running executables.
+ */
+static struct ima_rule_entry dont_measure_rules[] __ro_after_init = {
+ {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
+ .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = CGROUP2_SUPER_MAGIC,
+ .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}
+};
+
+static struct ima_rule_entry original_measurement_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_MASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_measurement_rules[] __ro_after_init = {
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+ .flags = IMA_FUNC | IMA_MASK},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
+ .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq,
+ .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+ {.action = MEASURE, .func = POLICY_CHECK, .flags = IMA_FUNC},
+};
+
+static struct ima_rule_entry default_appraise_rules[] __ro_after_init = {
+ {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP2_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+#ifdef CONFIG_IMA_WRITE_POLICY
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &uid_eq,
+ .flags = IMA_FOWNER},
+#else
+ /* force signature */
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &uid_eq,
+ .flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED},
+#endif
+};
+
+static struct ima_rule_entry build_appraise_rules[] __ro_after_init = {
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+#ifdef CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+#endif
+};
+
+static struct ima_rule_entry secure_boot_rules[] __ro_after_init = {
+ {.action = APPRAISE, .func = MODULE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = FIRMWARE_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+ {.action = APPRAISE, .func = POLICY_CHECK,
+ .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED},
+};
+
+static LIST_HEAD(ima_default_rules);
+static LIST_HEAD(ima_policy_rules);
+static LIST_HEAD(ima_temp_rules);
+static struct list_head *ima_rules = &ima_default_rules;
+
+static int ima_policy __initdata;
+
+static int __init default_measure_policy_setup(char *str)
+{
+ if (ima_policy)
+ return 1;
+
+ ima_policy = ORIGINAL_TCB;
+ return 1;
+}
+__setup("ima_tcb", default_measure_policy_setup);
+
+static bool ima_use_appraise_tcb __initdata;
+static bool ima_use_secure_boot __initdata;
+static bool ima_fail_unverifiable_sigs __ro_after_init;
+static int __init policy_setup(char *str)
+{
+ char *p;
+
+ while ((p = strsep(&str, " |\n")) != NULL) {
+ if (*p == ' ')
+ continue;
+ if ((strcmp(p, "tcb") == 0) && !ima_policy)
+ ima_policy = DEFAULT_TCB;
+ else if (strcmp(p, "appraise_tcb") == 0)
+ ima_use_appraise_tcb = true;
+ else if (strcmp(p, "secure_boot") == 0)
+ ima_use_secure_boot = true;
+ else if (strcmp(p, "fail_securely") == 0)
+ ima_fail_unverifiable_sigs = true;
+ }
+
+ return 1;
+}
+__setup("ima_policy=", policy_setup);
+
+static int __init default_appraise_policy_setup(char *str)
+{
+ ima_use_appraise_tcb = true;
+ return 1;
+}
+__setup("ima_appraise_tcb", default_appraise_policy_setup);
+
+/*
+ * The LSM policy can be reloaded, leaving the IMA LSM based rules referring
+ * to the old, stale LSM policy. Update the IMA LSM based rules to reflect
+ * the reloaded LSM policy. We assume the rules still exist; and BUG_ON() if
+ * they don't.
+ */
+static void ima_lsm_update_rules(void)
+{
+ struct ima_rule_entry *entry;
+ int result;
+ int i;
+
+ list_for_each_entry(entry, &ima_policy_rules, list) {
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].rule)
+ continue;
+ result = security_filter_rule_init(entry->lsm[i].type,
+ Audit_equal,
+ entry->lsm[i].args_p,
+ &entry->lsm[i].rule);
+ BUG_ON(!entry->lsm[i].rule);
+ }
+ }
+}
+
+/**
+ * ima_match_rules - determine whether an inode matches the measure rule.
+ * @rule: a pointer to a rule
+ * @inode: a pointer to an inode
+ * @cred: a pointer to a credentials structure for user validation
+ * @secid: the secid of the task to be validated
+ * @func: LIM hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ *
+ * Returns true on rule match, false on failure.
+ */
+static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
+ const struct cred *cred, u32 secid,
+ enum ima_hooks func, int mask)
+{
+ int i;
+
+ if ((rule->flags & IMA_FUNC) &&
+ (rule->func != func && func != POST_SETATTR))
+ return false;
+ if ((rule->flags & IMA_MASK) &&
+ (rule->mask != mask && func != POST_SETATTR))
+ return false;
+ if ((rule->flags & IMA_INMASK) &&
+ (!(rule->mask & mask) && func != POST_SETATTR))
+ return false;
+ if ((rule->flags & IMA_FSMAGIC)
+ && rule->fsmagic != inode->i_sb->s_magic)
+ return false;
+ if ((rule->flags & IMA_FSNAME)
+ && strcmp(rule->fsname, inode->i_sb->s_type->name))
+ return false;
+ if ((rule->flags & IMA_FSUUID) &&
+ !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid))
+ return false;
+ if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+ if (rule->flags & IMA_EUID) {
+ if (has_capability_noaudit(current, CAP_SETUID)) {
+ if (!rule->uid_op(cred->euid, rule->uid)
+ && !rule->uid_op(cred->suid, rule->uid)
+ && !rule->uid_op(cred->uid, rule->uid))
+ return false;
+ } else if (!rule->uid_op(cred->euid, rule->uid))
+ return false;
+ }
+
+ if ((rule->flags & IMA_FOWNER) &&
+ !rule->fowner_op(inode->i_uid, rule->fowner))
+ return false;
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ int rc = 0;
+ u32 osid;
+ int retried = 0;
+
+ if (!rule->lsm[i].rule)
+ continue;
+retry:
+ switch (i) {
+ case LSM_OBJ_USER:
+ case LSM_OBJ_ROLE:
+ case LSM_OBJ_TYPE:
+ security_inode_getsecid(inode, &osid);
+ rc = security_filter_rule_match(osid,
+ rule->lsm[i].type,
+ Audit_equal,
+ rule->lsm[i].rule,
+ NULL);
+ break;
+ case LSM_SUBJ_USER:
+ case LSM_SUBJ_ROLE:
+ case LSM_SUBJ_TYPE:
+ rc = security_filter_rule_match(secid,
+ rule->lsm[i].type,
+ Audit_equal,
+ rule->lsm[i].rule,
+ NULL);
+ default:
+ break;
+ }
+ if ((rc < 0) && (!retried)) {
+ retried = 1;
+ ima_lsm_update_rules();
+ goto retry;
+ }
+ if (!rc)
+ return false;
+ }
+ return true;
+}
+
+/*
+ * In addition to knowing that we need to appraise the file in general,
+ * we need to differentiate between calling hooks, for hook specific rules.
+ */
+static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func)
+{
+ if (!(rule->flags & IMA_FUNC))
+ return IMA_FILE_APPRAISE;
+
+ switch (func) {
+ case MMAP_CHECK:
+ return IMA_MMAP_APPRAISE;
+ case BPRM_CHECK:
+ return IMA_BPRM_APPRAISE;
+ case CREDS_CHECK:
+ return IMA_CREDS_APPRAISE;
+ case FILE_CHECK:
+ case POST_SETATTR:
+ return IMA_FILE_APPRAISE;
+ case MODULE_CHECK ... MAX_CHECK - 1:
+ default:
+ return IMA_READ_APPRAISE;
+ }
+}
+
+/**
+ * ima_match_policy - decision based on LSM and other conditions
+ * @inode: pointer to an inode for which the policy decision is being made
+ * @cred: pointer to a credentials structure for which the policy decision is
+ * being made
+ * @secid: LSM secid of the task to be validated
+ * @func: IMA hook identifier
+ * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+ * @pcr: set the pcr to extend
+ *
+ * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type)
+ * conditions.
+ *
+ * Since the IMA policy may be updated multiple times we need to lock the
+ * list when walking it. Reads are many orders of magnitude more numerous
+ * than writes so ima_match_policy() is classical RCU candidate.
+ */
+int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid,
+ enum ima_hooks func, int mask, int flags, int *pcr)
+{
+ struct ima_rule_entry *entry;
+ int action = 0, actmask = flags | (flags << 1);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, ima_rules, list) {
+
+ if (!(entry->action & actmask))
+ continue;
+
+ if (!ima_match_rules(entry, inode, cred, secid, func, mask))
+ continue;
+
+ action |= entry->flags & IMA_ACTION_FLAGS;
+
+ action |= entry->action & IMA_DO_MASK;
+ if (entry->action & IMA_APPRAISE) {
+ action |= get_subaction(entry, func);
+ action &= ~IMA_HASH;
+ if (ima_fail_unverifiable_sigs)
+ action |= IMA_FAIL_UNVERIFIABLE_SIGS;
+ }
+
+ if (entry->action & IMA_DO_MASK)
+ actmask &= ~(entry->action | entry->action << 1);
+ else
+ actmask &= ~(entry->action | entry->action >> 1);
+
+ if ((pcr) && (entry->flags & IMA_PCR))
+ *pcr = entry->pcr;
+
+ if (!actmask)
+ break;
+ }
+ rcu_read_unlock();
+
+ return action;
+}
+
+/*
+ * Initialize the ima_policy_flag variable based on the currently
+ * loaded policy. Based on this flag, the decision to short circuit
+ * out of a function or not call the function in the first place
+ * can be made earlier.
+ */
+void ima_update_policy_flag(void)
+{
+ struct ima_rule_entry *entry;
+
+ list_for_each_entry(entry, ima_rules, list) {
+ if (entry->action & IMA_DO_MASK)
+ ima_policy_flag |= entry->action;
+ }
+
+ ima_appraise |= (build_ima_appraise | temp_ima_appraise);
+ if (!ima_appraise)
+ ima_policy_flag &= ~IMA_APPRAISE;
+}
+
+static int ima_appraise_flag(enum ima_hooks func)
+{
+ if (func == MODULE_CHECK)
+ return IMA_APPRAISE_MODULES;
+ else if (func == FIRMWARE_CHECK)
+ return IMA_APPRAISE_FIRMWARE;
+ else if (func == POLICY_CHECK)
+ return IMA_APPRAISE_POLICY;
+ else if (func == KEXEC_KERNEL_CHECK)
+ return IMA_APPRAISE_KEXEC;
+ return 0;
+}
+
+/**
+ * ima_init_policy - initialize the default measure rules.
+ *
+ * ima_rules points to either the ima_default_rules or the
+ * the new ima_policy_rules.
+ */
+void __init ima_init_policy(void)
+{
+ int i, measure_entries, appraise_entries, secure_boot_entries;
+
+ /* if !ima_policy set entries = 0 so we load NO default rules */
+ measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0;
+ appraise_entries = ima_use_appraise_tcb ?
+ ARRAY_SIZE(default_appraise_rules) : 0;
+ secure_boot_entries = ima_use_secure_boot ?
+ ARRAY_SIZE(secure_boot_rules) : 0;
+
+ for (i = 0; i < measure_entries; i++)
+ list_add_tail(&dont_measure_rules[i].list, &ima_default_rules);
+
+ switch (ima_policy) {
+ case ORIGINAL_TCB:
+ for (i = 0; i < ARRAY_SIZE(original_measurement_rules); i++)
+ list_add_tail(&original_measurement_rules[i].list,
+ &ima_default_rules);
+ break;
+ case DEFAULT_TCB:
+ for (i = 0; i < ARRAY_SIZE(default_measurement_rules); i++)
+ list_add_tail(&default_measurement_rules[i].list,
+ &ima_default_rules);
+ default:
+ break;
+ }
+
+ /*
+ * Insert the builtin "secure_boot" policy rules requiring file
+ * signatures, prior to any other appraise rules.
+ */
+ for (i = 0; i < secure_boot_entries; i++) {
+ list_add_tail(&secure_boot_rules[i].list, &ima_default_rules);
+ temp_ima_appraise |=
+ ima_appraise_flag(secure_boot_rules[i].func);
+ }
+
+ /*
+ * Insert the build time appraise rules requiring file signatures
+ * for both the initial and custom policies, prior to other appraise
+ * rules.
+ */
+ for (i = 0; i < ARRAY_SIZE(build_appraise_rules); i++) {
+ struct ima_rule_entry *entry;
+
+ if (!secure_boot_entries)
+ list_add_tail(&build_appraise_rules[i].list,
+ &ima_default_rules);
+
+ entry = kmemdup(&build_appraise_rules[i], sizeof(*entry),
+ GFP_KERNEL);
+ if (entry)
+ list_add_tail(&entry->list, &ima_policy_rules);
+ build_ima_appraise |=
+ ima_appraise_flag(build_appraise_rules[i].func);
+ }
+
+ for (i = 0; i < appraise_entries; i++) {
+ list_add_tail(&default_appraise_rules[i].list,
+ &ima_default_rules);
+ if (default_appraise_rules[i].func == POLICY_CHECK)
+ temp_ima_appraise |= IMA_APPRAISE_POLICY;
+ }
+
+ ima_update_policy_flag();
+}
+
+/* Make sure we have a valid policy, at least containing some rules. */
+int ima_check_policy(void)
+{
+ if (list_empty(&ima_temp_rules))
+ return -EINVAL;
+ return 0;
+}
+
+/**
+ * ima_update_policy - update default_rules with new measure rules
+ *
+ * Called on file .release to update the default rules with a complete new
+ * policy. What we do here is to splice ima_policy_rules and ima_temp_rules so
+ * they make a queue. The policy may be updated multiple times and this is the
+ * RCU updater.
+ *
+ * Policy rules are never deleted so ima_policy_flag gets zeroed only once when
+ * we switch from the default policy to user defined.
+ */
+void ima_update_policy(void)
+{
+ struct list_head *policy = &ima_policy_rules;
+
+ list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu);
+
+ if (ima_rules != policy) {
+ ima_policy_flag = 0;
+ ima_rules = policy;
+ }
+ ima_update_policy_flag();
+}
+
+enum {
+ Opt_err = -1,
+ Opt_measure = 1, Opt_dont_measure,
+ Opt_appraise, Opt_dont_appraise,
+ Opt_audit, Opt_hash, Opt_dont_hash,
+ Opt_obj_user, Opt_obj_role, Opt_obj_type,
+ Opt_subj_user, Opt_subj_role, Opt_subj_type,
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname,
+ Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq,
+ Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
+ Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
+ Opt_appraise_type, Opt_permit_directio,
+ Opt_pcr
+};
+
+static match_table_t policy_tokens = {
+ {Opt_measure, "measure"},
+ {Opt_dont_measure, "dont_measure"},
+ {Opt_appraise, "appraise"},
+ {Opt_dont_appraise, "dont_appraise"},
+ {Opt_audit, "audit"},
+ {Opt_hash, "hash"},
+ {Opt_dont_hash, "dont_hash"},
+ {Opt_obj_user, "obj_user=%s"},
+ {Opt_obj_role, "obj_role=%s"},
+ {Opt_obj_type, "obj_type=%s"},
+ {Opt_subj_user, "subj_user=%s"},
+ {Opt_subj_role, "subj_role=%s"},
+ {Opt_subj_type, "subj_type=%s"},
+ {Opt_func, "func=%s"},
+ {Opt_mask, "mask=%s"},
+ {Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsname, "fsname=%s"},
+ {Opt_fsuuid, "fsuuid=%s"},
+ {Opt_uid_eq, "uid=%s"},
+ {Opt_euid_eq, "euid=%s"},
+ {Opt_fowner_eq, "fowner=%s"},
+ {Opt_uid_gt, "uid>%s"},
+ {Opt_euid_gt, "euid>%s"},
+ {Opt_fowner_gt, "fowner>%s"},
+ {Opt_uid_lt, "uid<%s"},
+ {Opt_euid_lt, "euid<%s"},
+ {Opt_fowner_lt, "fowner<%s"},
+ {Opt_appraise_type, "appraise_type=%s"},
+ {Opt_permit_directio, "permit_directio"},
+ {Opt_pcr, "pcr=%s"},
+ {Opt_err, NULL}
+};
+
+static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ substring_t *args, int lsm_rule, int audit_type)
+{
+ int result;
+
+ if (entry->lsm[lsm_rule].rule)
+ return -EINVAL;
+
+ entry->lsm[lsm_rule].args_p = match_strdup(args);
+ if (!entry->lsm[lsm_rule].args_p)
+ return -ENOMEM;
+
+ entry->lsm[lsm_rule].type = audit_type;
+ result = security_filter_rule_init(entry->lsm[lsm_rule].type,
+ Audit_equal,
+ entry->lsm[lsm_rule].args_p,
+ &entry->lsm[lsm_rule].rule);
+ if (!entry->lsm[lsm_rule].rule) {
+ kfree(entry->lsm[lsm_rule].args_p);
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+static void ima_log_string_op(struct audit_buffer *ab, char *key, char *value,
+ bool (*rule_operator)(kuid_t, kuid_t))
+{
+ if (!ab)
+ return;
+
+ if (rule_operator == &uid_gt)
+ audit_log_format(ab, "%s>", key);
+ else if (rule_operator == &uid_lt)
+ audit_log_format(ab, "%s<", key);
+ else
+ audit_log_format(ab, "%s=", key);
+ audit_log_format(ab, "%s ", value);
+}
+static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+{
+ ima_log_string_op(ab, key, value, NULL);
+}
+
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+{
+ struct audit_buffer *ab;
+ char *from;
+ char *p;
+ bool uid_token;
+ int result = 0;
+
+ ab = integrity_audit_log_start(audit_context(), GFP_KERNEL,
+ AUDIT_INTEGRITY_POLICY_RULE);
+
+ entry->uid = INVALID_UID;
+ entry->fowner = INVALID_UID;
+ entry->uid_op = &uid_eq;
+ entry->fowner_op = &uid_eq;
+ entry->action = UNKNOWN;
+ while ((p = strsep(&rule, " \t")) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ int token;
+ unsigned long lnum;
+
+ if (result < 0)
+ break;
+ if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
+ continue;
+ token = match_token(p, policy_tokens, args);
+ switch (token) {
+ case Opt_measure:
+ ima_log_string(ab, "action", "measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = MEASURE;
+ break;
+ case Opt_dont_measure:
+ ima_log_string(ab, "action", "dont_measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_MEASURE;
+ break;
+ case Opt_appraise:
+ ima_log_string(ab, "action", "appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = APPRAISE;
+ break;
+ case Opt_dont_appraise:
+ ima_log_string(ab, "action", "dont_appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_APPRAISE;
+ break;
+ case Opt_audit:
+ ima_log_string(ab, "action", "audit");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = AUDIT;
+ break;
+ case Opt_hash:
+ ima_log_string(ab, "action", "hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = HASH;
+ break;
+ case Opt_dont_hash:
+ ima_log_string(ab, "action", "dont_hash");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_HASH;
+ break;
+ case Opt_func:
+ ima_log_string(ab, "func", args[0].from);
+
+ if (entry->func)
+ result = -EINVAL;
+
+ if (strcmp(args[0].from, "FILE_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ /* PATH_CHECK is for backwards compat */
+ else if (strcmp(args[0].from, "PATH_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
+ entry->func = MODULE_CHECK;
+ else if (strcmp(args[0].from, "FIRMWARE_CHECK") == 0)
+ entry->func = FIRMWARE_CHECK;
+ else if ((strcmp(args[0].from, "FILE_MMAP") == 0)
+ || (strcmp(args[0].from, "MMAP_CHECK") == 0))
+ entry->func = MMAP_CHECK;
+ else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
+ entry->func = BPRM_CHECK;
+ else if (strcmp(args[0].from, "CREDS_CHECK") == 0)
+ entry->func = CREDS_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_KERNEL_CHECK") ==
+ 0)
+ entry->func = KEXEC_KERNEL_CHECK;
+ else if (strcmp(args[0].from, "KEXEC_INITRAMFS_CHECK")
+ == 0)
+ entry->func = KEXEC_INITRAMFS_CHECK;
+ else if (strcmp(args[0].from, "POLICY_CHECK") == 0)
+ entry->func = POLICY_CHECK;
+ else
+ result = -EINVAL;
+ if (!result)
+ entry->flags |= IMA_FUNC;
+ break;
+ case Opt_mask:
+ ima_log_string(ab, "mask", args[0].from);
+
+ if (entry->mask)
+ result = -EINVAL;
+
+ from = args[0].from;
+ if (*from == '^')
+ from++;
+
+ if ((strcmp(from, "MAY_EXEC")) == 0)
+ entry->mask = MAY_EXEC;
+ else if (strcmp(from, "MAY_WRITE") == 0)
+ entry->mask = MAY_WRITE;
+ else if (strcmp(from, "MAY_READ") == 0)
+ entry->mask = MAY_READ;
+ else if (strcmp(from, "MAY_APPEND") == 0)
+ entry->mask = MAY_APPEND;
+ else
+ result = -EINVAL;
+ if (!result)
+ entry->flags |= (*args[0].from == '^')
+ ? IMA_INMASK : IMA_MASK;
+ break;
+ case Opt_fsmagic:
+ ima_log_string(ab, "fsmagic", args[0].from);
+
+ if (entry->fsmagic) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 16, &entry->fsmagic);
+ if (!result)
+ entry->flags |= IMA_FSMAGIC;
+ break;
+ case Opt_fsname:
+ ima_log_string(ab, "fsname", args[0].from);
+
+ entry->fsname = kstrdup(args[0].from, GFP_KERNEL);
+ if (!entry->fsname) {
+ result = -ENOMEM;
+ break;
+ }
+ result = 0;
+ entry->flags |= IMA_FSNAME;
+ break;
+ case Opt_fsuuid:
+ ima_log_string(ab, "fsuuid", args[0].from);
+
+ if (!uuid_is_null(&entry->fsuuid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = uuid_parse(args[0].from, &entry->fsuuid);
+ if (!result)
+ entry->flags |= IMA_FSUUID;
+ break;
+ case Opt_uid_gt:
+ case Opt_euid_gt:
+ entry->uid_op = &uid_gt;
+ case Opt_uid_lt:
+ case Opt_euid_lt:
+ if ((token == Opt_uid_lt) || (token == Opt_euid_lt))
+ entry->uid_op = &uid_lt;
+ case Opt_uid_eq:
+ case Opt_euid_eq:
+ uid_token = (token == Opt_uid_eq) ||
+ (token == Opt_uid_gt) ||
+ (token == Opt_uid_lt);
+
+ ima_log_string_op(ab, uid_token ? "uid" : "euid",
+ args[0].from, entry->uid_op);
+
+ if (uid_valid(entry->uid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->uid = make_kuid(current_user_ns(),
+ (uid_t) lnum);
+ if (!uid_valid(entry->uid) ||
+ (uid_t)lnum != lnum)
+ result = -EINVAL;
+ else
+ entry->flags |= uid_token
+ ? IMA_UID : IMA_EUID;
+ }
+ break;
+ case Opt_fowner_gt:
+ entry->fowner_op = &uid_gt;
+ case Opt_fowner_lt:
+ if (token == Opt_fowner_lt)
+ entry->fowner_op = &uid_lt;
+ case Opt_fowner_eq:
+ ima_log_string_op(ab, "fowner", args[0].from,
+ entry->fowner_op);
+
+ if (uid_valid(entry->fowner)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum);
+ if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_FOWNER;
+ }
+ break;
+ case Opt_obj_user:
+ ima_log_string(ab, "obj_user", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_OBJ_USER,
+ AUDIT_OBJ_USER);
+ break;
+ case Opt_obj_role:
+ ima_log_string(ab, "obj_role", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_OBJ_ROLE,
+ AUDIT_OBJ_ROLE);
+ break;
+ case Opt_obj_type:
+ ima_log_string(ab, "obj_type", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_OBJ_TYPE,
+ AUDIT_OBJ_TYPE);
+ break;
+ case Opt_subj_user:
+ ima_log_string(ab, "subj_user", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_SUBJ_USER,
+ AUDIT_SUBJ_USER);
+ break;
+ case Opt_subj_role:
+ ima_log_string(ab, "subj_role", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_SUBJ_ROLE,
+ AUDIT_SUBJ_ROLE);
+ break;
+ case Opt_subj_type:
+ ima_log_string(ab, "subj_type", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
+ LSM_SUBJ_TYPE,
+ AUDIT_SUBJ_TYPE);
+ break;
+ case Opt_appraise_type:
+ if (entry->action != APPRAISE) {
+ result = -EINVAL;
+ break;
+ }
+
+ ima_log_string(ab, "appraise_type", args[0].from);
+ if ((strcmp(args[0].from, "imasig")) == 0)
+ entry->flags |= IMA_DIGSIG_REQUIRED;
+ else
+ result = -EINVAL;
+ break;
+ case Opt_permit_directio:
+ entry->flags |= IMA_PERMIT_DIRECTIO;
+ break;
+ case Opt_pcr:
+ if (entry->action != MEASURE) {
+ result = -EINVAL;
+ break;
+ }
+ ima_log_string(ab, "pcr", args[0].from);
+
+ result = kstrtoint(args[0].from, 10, &entry->pcr);
+ if (result || INVALID_PCR(entry->pcr))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_PCR;
+
+ break;
+ case Opt_err:
+ ima_log_string(ab, "UNKNOWN", p);
+ result = -EINVAL;
+ break;
+ }
+ }
+ if (!result && (entry->action == UNKNOWN))
+ result = -EINVAL;
+ else if (entry->action == APPRAISE)
+ temp_ima_appraise |= ima_appraise_flag(entry->func);
+
+ audit_log_format(ab, "res=%d", !result);
+ audit_log_end(ab);
+ return result;
+}
+
+/**
+ * ima_parse_add_rule - add a rule to ima_policy_rules
+ * @rule - ima measurement policy rule
+ *
+ * Avoid locking by allowing just one writer at a time in ima_write_policy()
+ * Returns the length of the rule parsed, an error code on failure
+ */
+ssize_t ima_parse_add_rule(char *rule)
+{
+ static const char op[] = "update_policy";
+ char *p;
+ struct ima_rule_entry *entry;
+ ssize_t result, len;
+ int audit_info = 0;
+
+ p = strsep(&rule, "\n");
+ len = strlen(p) + 1;
+ p += strspn(p, " \t");
+
+ if (*p == '#' || *p == '\0')
+ return len;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
+ NULL, op, "-ENOMEM", -ENOMEM, audit_info);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&entry->list);
+
+ result = ima_parse_rule(p, entry);
+ if (result) {
+ kfree(entry);
+ integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
+ NULL, op, "invalid-policy", result,
+ audit_info);
+ return result;
+ }
+
+ list_add_tail(&entry->list, &ima_temp_rules);
+
+ return len;
+}
+
+/**
+ * ima_delete_rules() called to cleanup invalid in-flight policy.
+ * We don't need locking as we operate on the temp list, which is
+ * different from the active one. There is also only one user of
+ * ima_delete_rules() at a time.
+ */
+void ima_delete_rules(void)
+{
+ struct ima_rule_entry *entry, *tmp;
+ int i;
+
+ temp_ima_appraise = 0;
+ list_for_each_entry_safe(entry, tmp, &ima_temp_rules, list) {
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ kfree(entry->lsm[i].args_p);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+#ifdef CONFIG_IMA_READ_POLICY
+enum {
+ mask_exec = 0, mask_write, mask_read, mask_append
+};
+
+static const char *const mask_tokens[] = {
+ "^MAY_EXEC",
+ "^MAY_WRITE",
+ "^MAY_READ",
+ "^MAY_APPEND"
+};
+
+#define __ima_hook_stringify(str) (#str),
+
+static const char *const func_tokens[] = {
+ __ima_hooks(__ima_hook_stringify)
+};
+
+void *ima_policy_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t l = *pos;
+ struct ima_rule_entry *entry;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, ima_rules, list) {
+ if (!l--) {
+ rcu_read_unlock();
+ return entry;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ima_rule_entry *entry = v;
+
+ rcu_read_lock();
+ entry = list_entry_rcu(entry->list.next, struct ima_rule_entry, list);
+ rcu_read_unlock();
+ (*pos)++;
+
+ return (&entry->list == ima_rules) ? NULL : entry;
+}
+
+void ima_policy_stop(struct seq_file *m, void *v)
+{
+}
+
+#define pt(token) policy_tokens[token + Opt_err].pattern
+#define mt(token) mask_tokens[token]
+
+/*
+ * policy_func_show - display the ima_hooks policy rule
+ */
+static void policy_func_show(struct seq_file *m, enum ima_hooks func)
+{
+ if (func > 0 && func < MAX_CHECK)
+ seq_printf(m, "func=%s ", func_tokens[func]);
+ else
+ seq_printf(m, "func=%d ", func);
+}
+
+int ima_policy_show(struct seq_file *m, void *v)
+{
+ struct ima_rule_entry *entry = v;
+ int i;
+ char tbuf[64] = {0,};
+ int offset = 0;
+
+ rcu_read_lock();
+
+ if (entry->action & MEASURE)
+ seq_puts(m, pt(Opt_measure));
+ if (entry->action & DONT_MEASURE)
+ seq_puts(m, pt(Opt_dont_measure));
+ if (entry->action & APPRAISE)
+ seq_puts(m, pt(Opt_appraise));
+ if (entry->action & DONT_APPRAISE)
+ seq_puts(m, pt(Opt_dont_appraise));
+ if (entry->action & AUDIT)
+ seq_puts(m, pt(Opt_audit));
+ if (entry->action & HASH)
+ seq_puts(m, pt(Opt_hash));
+ if (entry->action & DONT_HASH)
+ seq_puts(m, pt(Opt_dont_hash));
+
+ seq_puts(m, " ");
+
+ if (entry->flags & IMA_FUNC)
+ policy_func_show(m, entry->func);
+
+ if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) {
+ if (entry->flags & IMA_MASK)
+ offset = 1;
+ if (entry->mask & MAY_EXEC)
+ seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset);
+ if (entry->mask & MAY_WRITE)
+ seq_printf(m, pt(Opt_mask), mt(mask_write) + offset);
+ if (entry->mask & MAY_READ)
+ seq_printf(m, pt(Opt_mask), mt(mask_read) + offset);
+ if (entry->mask & MAY_APPEND)
+ seq_printf(m, pt(Opt_mask), mt(mask_append) + offset);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSMAGIC) {
+ snprintf(tbuf, sizeof(tbuf), "0x%lx", entry->fsmagic);
+ seq_printf(m, pt(Opt_fsmagic), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSNAME) {
+ snprintf(tbuf, sizeof(tbuf), "%s", entry->fsname);
+ seq_printf(m, pt(Opt_fsname), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_PCR) {
+ snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr);
+ seq_printf(m, pt(Opt_pcr), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FSUUID) {
+ seq_printf(m, "fsuuid=%pU", &entry->fsuuid);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_UID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
+ if (entry->uid_op == &uid_gt)
+ seq_printf(m, pt(Opt_uid_gt), tbuf);
+ else if (entry->uid_op == &uid_lt)
+ seq_printf(m, pt(Opt_uid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_uid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_EUID) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid));
+ if (entry->uid_op == &uid_gt)
+ seq_printf(m, pt(Opt_euid_gt), tbuf);
+ else if (entry->uid_op == &uid_lt)
+ seq_printf(m, pt(Opt_euid_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_euid_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ if (entry->flags & IMA_FOWNER) {
+ snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->fowner));
+ if (entry->fowner_op == &uid_gt)
+ seq_printf(m, pt(Opt_fowner_gt), tbuf);
+ else if (entry->fowner_op == &uid_lt)
+ seq_printf(m, pt(Opt_fowner_lt), tbuf);
+ else
+ seq_printf(m, pt(Opt_fowner_eq), tbuf);
+ seq_puts(m, " ");
+ }
+
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].rule) {
+ switch (i) {
+ case LSM_OBJ_USER:
+ seq_printf(m, pt(Opt_obj_user),
+ (char *)entry->lsm[i].args_p);
+ break;
+ case LSM_OBJ_ROLE:
+ seq_printf(m, pt(Opt_obj_role),
+ (char *)entry->lsm[i].args_p);
+ break;
+ case LSM_OBJ_TYPE:
+ seq_printf(m, pt(Opt_obj_type),
+ (char *)entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_USER:
+ seq_printf(m, pt(Opt_subj_user),
+ (char *)entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_ROLE:
+ seq_printf(m, pt(Opt_subj_role),
+ (char *)entry->lsm[i].args_p);
+ break;
+ case LSM_SUBJ_TYPE:
+ seq_printf(m, pt(Opt_subj_type),
+ (char *)entry->lsm[i].args_p);
+ break;
+ }
+ }
+ }
+ if (entry->flags & IMA_DIGSIG_REQUIRED)
+ seq_puts(m, "appraise_type=imasig ");
+ if (entry->flags & IMA_PERMIT_DIRECTIO)
+ seq_puts(m, "permit_directio ");
+ rcu_read_unlock();
+ seq_puts(m, "\n");
+ return 0;
+}
+#endif /* CONFIG_IMA_READ_POLICY */
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
new file mode 100644
index 000000000..b186819bd
--- /dev/null
+++ b/security/integrity/ima/ima_queue.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2005,2006,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Serge Hallyn <serue@us.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_queue.c
+ * Implements queues that store template measurements and
+ * maintains aggregate over the stored measurements
+ * in the pre-configured TPM PCR (if available).
+ * The measurement list is append-only. No entry is
+ * ever removed or changed during the boot-cycle.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include "ima.h"
+
+#define AUDIT_CAUSE_LEN_MAX 32
+
+LIST_HEAD(ima_measurements); /* list of all measurements */
+#ifdef CONFIG_IMA_KEXEC
+static unsigned long binary_runtime_size;
+#else
+static unsigned long binary_runtime_size = ULONG_MAX;
+#endif
+
+/* key: inode (before secure-hashing a file) */
+struct ima_h_table ima_htable = {
+ .len = ATOMIC_LONG_INIT(0),
+ .violations = ATOMIC_LONG_INIT(0),
+ .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
+};
+
+/* mutex protects atomicity of extending measurement list
+ * and extending the TPM PCR aggregate. Since tpm_extend can take
+ * long (and the tpm driver uses a mutex), we can't use the spinlock.
+ */
+static DEFINE_MUTEX(ima_extend_list_mutex);
+
+/* lookup up the digest value in the hash table, and return the entry */
+static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
+ int pcr)
+{
+ struct ima_queue_entry *qe, *ret = NULL;
+ unsigned int key;
+ int rc;
+
+ key = ima_hash_key(digest_value);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
+ rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
+ if ((rc == 0) && (qe->entry->pcr == pcr)) {
+ ret = qe;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Calculate the memory required for serializing a single
+ * binary_runtime_measurement list entry, which contains a
+ * couple of variable length fields (e.g template name and data).
+ */
+static int get_binary_runtime_size(struct ima_template_entry *entry)
+{
+ int size = 0;
+
+ size += sizeof(u32); /* pcr */
+ size += sizeof(entry->digest);
+ size += sizeof(int); /* template name size field */
+ size += strlen(entry->template_desc->name);
+ size += sizeof(entry->template_data_len);
+ size += entry->template_data_len;
+ return size;
+}
+
+/* ima_add_template_entry helper function:
+ * - Add template entry to the measurement list and hash table, for
+ * all entries except those carried across kexec.
+ *
+ * (Called with ima_extend_list_mutex held.)
+ */
+static int ima_add_digest_entry(struct ima_template_entry *entry,
+ bool update_htable)
+{
+ struct ima_queue_entry *qe;
+ unsigned int key;
+
+ qe = kmalloc(sizeof(*qe), GFP_KERNEL);
+ if (qe == NULL) {
+ pr_err("OUT OF MEMORY ERROR creating queue entry\n");
+ return -ENOMEM;
+ }
+ qe->entry = entry;
+
+ INIT_LIST_HEAD(&qe->later);
+ list_add_tail_rcu(&qe->later, &ima_measurements);
+
+ atomic_long_inc(&ima_htable.len);
+ if (update_htable) {
+ key = ima_hash_key(entry->digest);
+ hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
+ }
+
+ if (binary_runtime_size != ULONG_MAX) {
+ int size;
+
+ size = get_binary_runtime_size(entry);
+ binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
+ binary_runtime_size + size : ULONG_MAX;
+ }
+ return 0;
+}
+
+/*
+ * Return the amount of memory required for serializing the
+ * entire binary_runtime_measurement list, including the ima_kexec_hdr
+ * structure.
+ */
+unsigned long ima_get_binary_runtime_size(void)
+{
+ if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
+ return ULONG_MAX;
+ else
+ return binary_runtime_size + sizeof(struct ima_kexec_hdr);
+};
+
+static int ima_pcr_extend(const u8 *hash, int pcr)
+{
+ int result = 0;
+
+ if (!ima_tpm_chip)
+ return result;
+
+ result = tpm_pcr_extend(ima_tpm_chip, pcr, hash);
+ if (result != 0)
+ pr_err("Error Communicating to TPM chip, result: %d\n", result);
+ return result;
+}
+
+/*
+ * Add template entry to the measurement list and hash table, and
+ * extend the pcr.
+ *
+ * On systems which support carrying the IMA measurement list across
+ * kexec, maintain the total memory size required for serializing the
+ * binary_runtime_measurements.
+ */
+int ima_add_template_entry(struct ima_template_entry *entry, int violation,
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
+{
+ u8 digest[TPM_DIGEST_SIZE];
+ const char *audit_cause = "hash_added";
+ char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
+ int audit_info = 1;
+ int result = 0, tpmresult = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ if (!violation) {
+ memcpy(digest, entry->digest, sizeof(digest));
+ if (ima_lookup_digest_entry(digest, entry->pcr)) {
+ audit_cause = "hash_exists";
+ result = -EEXIST;
+ goto out;
+ }
+ }
+
+ result = ima_add_digest_entry(entry, 1);
+ if (result < 0) {
+ audit_cause = "ENOMEM";
+ audit_info = 0;
+ goto out;
+ }
+
+ if (violation) /* invalidate pcr */
+ memset(digest, 0xff, sizeof(digest));
+
+ tpmresult = ima_pcr_extend(digest, entry->pcr);
+ if (tpmresult != 0) {
+ snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
+ tpmresult);
+ audit_cause = tpm_audit_cause;
+ audit_info = 0;
+ }
+out:
+ mutex_unlock(&ima_extend_list_mutex);
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
+ op, audit_cause, result, audit_info);
+ return result;
+}
+
+int ima_restore_measurement_entry(struct ima_template_entry *entry)
+{
+ int result = 0;
+
+ mutex_lock(&ima_extend_list_mutex);
+ result = ima_add_digest_entry(entry, 0);
+ mutex_unlock(&ima_extend_list_mutex);
+ return result;
+}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 000000000..4dfdccce4
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/rculist.h>
+#include "ima.h"
+#include "ima_template_lib.h"
+
+enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME,
+ HDR_TEMPLATE_DATA, HDR__LAST };
+
+static struct ima_template_desc builtin_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng", .fmt = "d-ng|n-ng"},
+ {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+ {.name = "", .fmt = ""}, /* placeholder for a custom format */
+};
+
+static LIST_HEAD(defined_templates);
+static DEFINE_SPINLOCK(template_list);
+static int template_setup_done;
+
+static struct ima_template_field supported_fields[] = {
+ {.field_id = "d", .field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n", .field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "n-ng", .field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig", .field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+};
+#define MAX_TEMPLATE_NAME_LEN 15
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *lookup_template_desc(const char *name);
+static int template_desc_init_fields(const char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields);
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ if (template_setup_done)
+ return 1;
+
+ if (!ima_template)
+ ima_init_template_list();
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc) {
+ pr_err("template %s not found, using %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ template_setup_done = 1;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static int __init ima_template_fmt_setup(char *str)
+{
+ int num_templates = ARRAY_SIZE(builtin_templates);
+
+ if (template_setup_done)
+ return 1;
+
+ if (template_desc_init_fields(str, NULL, NULL) < 0) {
+ pr_err("format string '%s' not valid, using template %s\n",
+ str, CONFIG_IMA_DEFAULT_TEMPLATE);
+ return 1;
+ }
+
+ builtin_templates[num_templates - 1].fmt = str;
+ ima_template = builtin_templates + num_templates - 1;
+ template_setup_done = 1;
+
+ return 1;
+}
+__setup("ima_template_fmt=", ima_template_fmt_setup);
+
+static struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ struct ima_template_desc *template_desc;
+ int found = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(template_desc, &defined_templates, list) {
+ if ((strcmp(template_desc->name, name) == 0) ||
+ (strcmp(template_desc->fmt, name) == 0)) {
+ found = 1;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? template_desc : NULL;
+}
+
+static struct ima_template_field *lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+static int template_desc_init_fields(const char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields)
+{
+ const char *template_fmt_ptr;
+ struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
+ int template_num_fields;
+ int i, len;
+
+ if (num_fields && *num_fields > 0) /* already initialized? */
+ return 0;
+
+ template_num_fields = template_fmt_size(template_fmt);
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
+ pr_err("format string '%s' contains too many fields\n",
+ template_fmt);
+ return -EINVAL;
+ }
+
+ for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields;
+ i++, template_fmt_ptr += len + 1) {
+ char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1];
+
+ len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr;
+ if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) {
+ pr_err("Invalid field with length %d\n", len);
+ return -EINVAL;
+ }
+
+ memcpy(tmp_field_id, template_fmt_ptr, len);
+ tmp_field_id[len] = '\0';
+ found_fields[i] = lookup_template_field(tmp_field_id);
+ if (!found_fields[i]) {
+ pr_err("field '%s' not found\n", tmp_field_id);
+ return -ENOENT;
+ }
+ }
+
+ if (fields && num_fields) {
+ *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL);
+ if (*fields == NULL)
+ return -ENOMEM;
+
+ memcpy(*fields, found_fields, i * sizeof(*fields));
+ *num_fields = i;
+ }
+
+ return 0;
+}
+
+void ima_init_template_list(void)
+{
+ int i;
+
+ if (!list_empty(&defined_templates))
+ return;
+
+ spin_lock(&template_list);
+ for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
+ list_add_tail_rcu(&builtin_templates[i].list,
+ &defined_templates);
+ }
+ spin_unlock(&template_list);
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template) {
+ ima_init_template_list();
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ }
+ return ima_template;
+}
+
+int __init ima_init_template(void)
+{
+ struct ima_template_desc *template = ima_template_desc_current();
+ int result;
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ pr_err("template %s init failed, result: %d\n",
+ (strlen(template->name) ?
+ template->name : template->fmt), result);
+
+ return result;
+}
+
+static struct ima_template_desc *restore_template_fmt(char *template_name)
+{
+ struct ima_template_desc *template_desc = NULL;
+ int ret;
+
+ ret = template_desc_init_fields(template_name, NULL, NULL);
+ if (ret < 0) {
+ pr_err("attempting to initialize the template \"%s\" failed\n",
+ template_name);
+ goto out;
+ }
+
+ template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
+ if (!template_desc)
+ goto out;
+
+ template_desc->name = "";
+ template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
+ if (!template_desc->fmt)
+ goto out;
+
+ spin_lock(&template_list);
+ list_add_tail_rcu(&template_desc->list, &defined_templates);
+ spin_unlock(&template_list);
+out:
+ return template_desc;
+}
+
+static int ima_restore_template_data(struct ima_template_desc *template_desc,
+ void *template_data,
+ int template_data_size,
+ struct ima_template_entry **entry)
+{
+ int ret = 0;
+ int i;
+
+ *entry = kzalloc(sizeof(**entry) +
+ template_desc->num_fields * sizeof(struct ima_field_data),
+ GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ ret = ima_parse_buf(template_data, template_data + template_data_size,
+ NULL, template_desc->num_fields,
+ (*entry)->template_data, NULL, NULL,
+ ENFORCE_FIELDS | ENFORCE_BUFEND, "template data");
+ if (ret < 0) {
+ kfree(*entry);
+ return ret;
+ }
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_field_data *field_data = &(*entry)->template_data[i];
+ u8 *data = field_data->data;
+
+ (*entry)->template_data[i].data =
+ kzalloc(field_data->len + 1, GFP_KERNEL);
+ if (!(*entry)->template_data[i].data) {
+ ret = -ENOMEM;
+ break;
+ }
+ memcpy((*entry)->template_data[i].data, data, field_data->len);
+ (*entry)->template_data_len += sizeof(field_data->len);
+ (*entry)->template_data_len += field_data->len;
+ }
+
+ if (ret < 0) {
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ }
+
+ return ret;
+}
+
+/* Restore the serialized binary measurement list without extending PCRs. */
+int ima_restore_measurement_list(loff_t size, void *buf)
+{
+ char template_name[MAX_TEMPLATE_NAME_LEN];
+
+ struct ima_kexec_hdr *khdr = buf;
+ struct ima_field_data hdr[HDR__LAST] = {
+ [HDR_PCR] = {.len = sizeof(u32)},
+ [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE},
+ };
+
+ void *bufp = buf + sizeof(*khdr);
+ void *bufendp;
+ struct ima_template_entry *entry;
+ struct ima_template_desc *template_desc;
+ DECLARE_BITMAP(hdr_mask, HDR__LAST);
+ unsigned long count = 0;
+ int ret = 0;
+
+ if (!buf || size < sizeof(*khdr))
+ return 0;
+
+ if (ima_canonical_fmt) {
+ khdr->version = le16_to_cpu(khdr->version);
+ khdr->count = le64_to_cpu(khdr->count);
+ khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
+ }
+
+ if (khdr->version != 1) {
+ pr_err("attempting to restore a incompatible measurement list");
+ return -EINVAL;
+ }
+
+ if (khdr->count > ULONG_MAX - 1) {
+ pr_err("attempting to restore too many measurements");
+ return -EINVAL;
+ }
+
+ bitmap_zero(hdr_mask, HDR__LAST);
+ bitmap_set(hdr_mask, HDR_PCR, 1);
+ bitmap_set(hdr_mask, HDR_DIGEST, 1);
+
+ /*
+ * ima kexec buffer prefix: version, buffer size, count
+ * v1 format: pcr, digest, template-name-len, template-name,
+ * template-data-size, template-data
+ */
+ bufendp = buf + khdr->buffer_size;
+ while ((bufp < bufendp) && (count++ < khdr->count)) {
+ int enforce_mask = ENFORCE_FIELDS;
+
+ enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0;
+ ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL,
+ hdr_mask, enforce_mask, "entry header");
+ if (ret < 0)
+ break;
+
+ if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) {
+ pr_err("attempting to restore a template name that is too long\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* template name is not null terminated */
+ memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data,
+ hdr[HDR_TEMPLATE_NAME].len);
+ template_name[hdr[HDR_TEMPLATE_NAME].len] = 0;
+
+ if (strcmp(template_name, "ima") == 0) {
+ pr_err("attempting to restore an unsupported template \"%s\" failed\n",
+ template_name);
+ ret = -EINVAL;
+ break;
+ }
+
+ template_desc = lookup_template_desc(template_name);
+ if (!template_desc) {
+ template_desc = restore_template_fmt(template_name);
+ if (!template_desc)
+ break;
+ }
+
+ /*
+ * Only the running system's template format is initialized
+ * on boot. As needed, initialize the other template formats.
+ */
+ ret = template_desc_init_fields(template_desc->fmt,
+ &(template_desc->fields),
+ &(template_desc->num_fields));
+ if (ret < 0) {
+ pr_err("attempting to restore the template fmt \"%s\" failed\n",
+ template_desc->fmt);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = ima_restore_template_data(template_desc,
+ hdr[HDR_TEMPLATE_DATA].data,
+ hdr[HDR_TEMPLATE_DATA].len,
+ &entry);
+ if (ret < 0)
+ break;
+
+ memcpy(entry->digest, hdr[HDR_DIGEST].data,
+ hdr[HDR_DIGEST].len);
+ entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
+ le32_to_cpu(*(hdr[HDR_PCR].data));
+ ret = ima_restore_measurement_entry(entry);
+ if (ret < 0)
+ break;
+
+ }
+ return ret;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 000000000..48c5a1be8
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "ima_template_lib.h"
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen = datalen;
+
+ if (datafmt == DATA_FMT_STRING)
+ buflen = datalen + 1;
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data;
+ u32 buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strnchr(field_data->data, buflen, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
+ strlen(field_data->data) : field_data->len;
+
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
+ u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len);
+
+ ima_putc(m, &field_len, sizeof(field_len));
+ }
+
+ if (!len)
+ return;
+
+ ima_putc(m, field_data->data, len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ case IMA_SHOW_BINARY_NO_FIELD_LEN:
+ case IMA_SHOW_BINARY_OLD_STRING_FMT:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+/**
+ * ima_parse_buf() - Parses lengths and data from an input buffer
+ * @bufstartp: Buffer start address.
+ * @bufendp: Buffer end address.
+ * @bufcurp: Pointer to remaining (non-parsed) data.
+ * @maxfields: Length of fields array.
+ * @fields: Array containing lengths and pointers of parsed data.
+ * @curfields: Number of array items containing parsed data.
+ * @len_mask: Bitmap (if bit is set, data length should not be parsed).
+ * @enforce_mask: Check if curfields == maxfields and/or bufcurp == bufendp.
+ * @bufname: String identifier of the input buffer.
+ *
+ * Return: 0 on success, -EINVAL on error.
+ */
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+ int maxfields, struct ima_field_data *fields, int *curfields,
+ unsigned long *len_mask, int enforce_mask, char *bufname)
+{
+ void *bufp = bufstartp;
+ int i;
+
+ for (i = 0; i < maxfields; i++) {
+ if (len_mask == NULL || !test_bit(i, len_mask)) {
+ if (bufp > (bufendp - sizeof(u32)))
+ break;
+
+ fields[i].len = *(u32 *)bufp;
+ if (ima_canonical_fmt)
+ fields[i].len = le32_to_cpu(fields[i].len);
+
+ bufp += sizeof(u32);
+ }
+
+ if (bufp > (bufendp - fields[i].len))
+ break;
+
+ fields[i].data = bufp;
+ bufp += fields[i].len;
+ }
+
+ if ((enforce_mask & ENFORCE_FIELDS) && i != maxfields) {
+ pr_err("%s: nr of fields mismatch: expected: %d, current: %d\n",
+ bufname, maxfields, i);
+ return -EINVAL;
+ }
+
+ if ((enforce_mask & ENFORCE_BUFEND) && bufp != bufendp) {
+ pr_err("%s: buf end mismatch: expected: %p, current: %p\n",
+ bufname, bufendp, bufp);
+ return -EINVAL;
+ }
+
+ if (curfields)
+ *curfields = i;
+
+ if (bufcurp)
+ *bufcurp = bufp;
+
+ return 0;
+}
+
+static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+ struct ima_field_data *field_data)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
+ * where <hash algo> is provided if the hash algoritm is not
+ * SHA1 or MD5
+ */
+ u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s",
+ hash_algo_name[hash_algo]);
+ buffer[offset] = ':';
+ offset += 2;
+ }
+
+ if (digest)
+ memcpy(buffer + offset, digest, digestsize);
+ else
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset of
+ * IMA_DIGEST_SIZE.
+ */
+ offset += IMA_DIGEST_SIZE;
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(event_data->iint->ima_hash->algo)) {
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+ goto out;
+ }
+
+ if ((const char *)event_data->filename == boot_aggregate_name) {
+ if (ima_tpm_chip) {
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_boot_aggregate(&hash.hdr);
+
+ /* algo can change depending on available PCR banks */
+ if (!result && hash.hdr.algo != HASH_ALGO_SHA1)
+ result = -EINVAL;
+
+ if (result < 0)
+ memset(&hash, 0, sizeof(hash));
+ }
+
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash_digest_size[HASH_ALGO_SHA1];
+ goto out;
+ }
+
+ if (!event_data->file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(event_data->file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(event_data->file, &hash.hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ event_data->filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ HASH_ALGO__LAST, field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1;
+ u32 cur_digestsize = 0;
+
+ if (event_data->violation) /* recording a violation. */
+ goto out;
+
+ cur_digest = event_data->iint->ima_hash->digest;
+ cur_digestsize = event_data->iint->ima_hash->length;
+
+ hash_algo = event_data->iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data);
+}
+
+static int ima_eventname_init_common(struct ima_event_data *event_data,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ u32 cur_filename_len = 0;
+
+ BUG_ON(event_data->filename == NULL && event_data->file == NULL);
+
+ if (event_data->filename) {
+ cur_filename = event_data->filename;
+ cur_filename_len = strlen(event_data->filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (event_data->file) {
+ cur_filename = event_data->file->f_path.dentry->d_name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ return ima_write_template_field_data(cur_filename, cur_filename_len,
+ DATA_FMT_STRING, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(event_data, field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(event_data, field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data)
+{
+ struct evm_ima_xattr_data *xattr_value = event_data->xattr_value;
+
+ if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
+ return 0;
+
+ return ima_write_template_field_data(xattr_value, event_data->xattr_len,
+ DATA_FMT_HEX, field_data);
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 000000000..6a3d8b831
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+#define ENFORCE_FIELDS 0x00000001
+#define ENFORCE_BUFEND 0x00000002
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp,
+ int maxfields, struct ima_field_data *fields, int *curfields,
+ unsigned long *len_mask, int enforce_mask, char *bufname);
+int ima_eventdigest_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct ima_event_data *event_data,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 000000000..e60473b13
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha.h>
+#include <linux/key.h>
+#include <linux/audit.h>
+
+/* iint action cache flags */
+#define IMA_MEASURE 0x00000001
+#define IMA_MEASURED 0x00000002
+#define IMA_APPRAISE 0x00000004
+#define IMA_APPRAISED 0x00000008
+/*#define IMA_COLLECT 0x00000010 do not use this flag */
+#define IMA_COLLECTED 0x00000020
+#define IMA_AUDIT 0x00000040
+#define IMA_AUDITED 0x00000080
+#define IMA_HASH 0x00000100
+#define IMA_HASHED 0x00000200
+
+/* iint cache flags */
+#define IMA_ACTION_FLAGS 0xff000000
+#define IMA_DIGSIG_REQUIRED 0x01000000
+#define IMA_PERMIT_DIRECTIO 0x02000000
+#define IMA_NEW_FILE 0x04000000
+#define EVM_IMMUTABLE_DIGSIG 0x08000000
+#define IMA_FAIL_UNVERIFIABLE_SIGS 0x10000000
+
+#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_HASH | IMA_APPRAISE_SUBMASK)
+#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+ IMA_HASHED | IMA_COLLECTED | \
+ IMA_APPRAISED_SUBMASK)
+
+/* iint subaction appraise cache flags */
+#define IMA_FILE_APPRAISE 0x00001000
+#define IMA_FILE_APPRAISED 0x00002000
+#define IMA_MMAP_APPRAISE 0x00004000
+#define IMA_MMAP_APPRAISED 0x00008000
+#define IMA_BPRM_APPRAISE 0x00010000
+#define IMA_BPRM_APPRAISED 0x00020000
+#define IMA_READ_APPRAISE 0x00040000
+#define IMA_READ_APPRAISED 0x00080000
+#define IMA_CREDS_APPRAISE 0x00100000
+#define IMA_CREDS_APPRAISED 0x00200000
+#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
+ IMA_BPRM_APPRAISE | IMA_READ_APPRAISE | \
+ IMA_CREDS_APPRAISE)
+#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+ IMA_BPRM_APPRAISED | IMA_READ_APPRAISED | \
+ IMA_CREDS_APPRAISED)
+
+/* iint cache atomic_flags */
+#define IMA_CHANGE_XATTR 0
+#define IMA_UPDATE_XATTR 1
+#define IMA_CHANGE_ATTR 2
+#define IMA_DIGSIG 3
+#define IMA_MUST_MEASURE 4
+
+enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
+ EVM_XATTR_PORTABLE_DIGSIG,
+ IMA_XATTR_LAST
+};
+
+struct evm_ima_xattr_data {
+ u8 type;
+ u8 digest[SHA1_DIGEST_SIZE];
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE 64
+
+struct ima_digest_data {
+ u8 algo;
+ u8 length;
+ union {
+ struct {
+ u8 unused;
+ u8 type;
+ } sha1;
+ struct {
+ u8 type;
+ u8 algo;
+ } ng;
+ u8 data[2];
+ } xattr;
+ u8 digest[0];
+} __packed;
+
+/*
+ * signature format v2 - for using with asymmetric keys
+ */
+struct signature_v2_hdr {
+ uint8_t type; /* xattr type */
+ uint8_t version; /* signature format version */
+ uint8_t hash_algo; /* Digest algorithm [enum hash_algo] */
+ __be32 keyid; /* IMA key identifier - not X509/PGP specific */
+ __be16 sig_size; /* signature size */
+ uint8_t sig[0]; /* signature payload */
+} __packed;
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct mutex mutex; /* protects: version, flags, digest */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned long flags;
+ unsigned long measured_pcrs;
+ unsigned long atomic_flags;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+ enum integrity_status ima_read_status:4;
+ enum integrity_status ima_creds_status:4;
+ enum integrity_status evm_status:4;
+ struct ima_digest_data *ima_hash;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+int integrity_kernel_read(struct file *file, loff_t offset,
+ void *addr, unsigned long count);
+
+#define INTEGRITY_KEYRING_EVM 0
+#define INTEGRITY_KEYRING_IMA 1
+#define INTEGRITY_KEYRING_MODULE 2
+#define INTEGRITY_KEYRING_MAX 3
+
+extern struct dentry *integrity_dir;
+
+#ifdef CONFIG_INTEGRITY_SIGNATURE
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen);
+
+int __init integrity_init_keyring(const unsigned int id);
+int __init integrity_load_x509(const unsigned int id, const char *path);
+#else
+
+static inline int integrity_digsig_verify(const unsigned int id,
+ const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int integrity_init_keyring(const unsigned int id)
+{
+ return 0;
+}
+#endif /* CONFIG_INTEGRITY_SIGNATURE */
+
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen);
+#else
+static inline int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_IMA_LOAD_X509
+void __init ima_load_x509(void);
+#else
+static inline void ima_load_x509(void)
+{
+}
+#endif
+
+#ifdef CONFIG_EVM_LOAD_X509
+void __init evm_load_x509(void);
+#else
+static inline void evm_load_x509(void)
+{
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_AUDIT
+/* declarations */
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int info);
+
+static inline struct audit_buffer *
+integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
+{
+ return audit_log_start(ctx, gfp_mask, type);
+}
+
+#else
+static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname,
+ const char *op, const char *cause,
+ int result, int info)
+{
+}
+
+static inline struct audit_buffer *
+integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
+{
+ return NULL;
+}
+
+#endif
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c
new file mode 100644
index 000000000..d03fbdfc9
--- /dev/null
+++ b/security/integrity/integrity_audit.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: integrity_audit.c
+ * Audit calls for the integrity subsystem
+ */
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/audit.h>
+#include "integrity.h"
+
+static int integrity_audit_info;
+
+/* ima_audit_setup - enable informational auditing messages */
+static int __init integrity_audit_setup(char *str)
+{
+ unsigned long audit;
+
+ if (!kstrtoul(str, 0, &audit))
+ integrity_audit_info = audit ? 1 : 0;
+ return 1;
+}
+__setup("integrity_audit=", integrity_audit_setup);
+
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int audit_info)
+{
+ struct audit_buffer *ab;
+ char name[TASK_COMM_LEN];
+
+ if (!integrity_audit_info && audit_info == 1) /* Skip info messages */
+ return;
+
+ ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno);
+ if (!ab)
+ return;
+ audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
+ task_pid_nr(current),
+ from_kuid(&init_user_ns, current_cred()->uid),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
+ audit_log_task_context(ab);
+ audit_log_format(ab, " op=%s cause=%s comm=", op, cause);
+ audit_log_untrustedstring(ab, get_task_comm(name, current));
+ if (fname) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, fname);
+ }
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ audit_log_format(ab, " res=%d", !result);
+ audit_log_end(ab);
+}