diff options
Diffstat (limited to 'security/integrity/ima')
-rw-r--r-- | security/integrity/ima/Kconfig | 288 | ||||
-rw-r--r-- | security/integrity/ima/Makefile | 13 | ||||
-rw-r--r-- | security/integrity/ima/ima.h | 325 | ||||
-rw-r--r-- | security/integrity/ima/ima_api.c | 377 | ||||
-rw-r--r-- | security/integrity/ima/ima_appraise.c | 466 | ||||
-rw-r--r-- | security/integrity/ima/ima_crypto.c | 698 | ||||
-rw-r--r-- | security/integrity/ima/ima_fs.c | 508 | ||||
-rw-r--r-- | security/integrity/ima/ima_init.c | 135 | ||||
-rw-r--r-- | security/integrity/ima/ima_kexec.c | 173 | ||||
-rw-r--r-- | security/integrity/ima/ima_main.c | 565 | ||||
-rw-r--r-- | security/integrity/ima/ima_mok.c | 54 | ||||
-rw-r--r-- | security/integrity/ima/ima_policy.c | 1256 | ||||
-rw-r--r-- | security/integrity/ima/ima_queue.c | 214 | ||||
-rw-r--r-- | security/integrity/ima/ima_template.c | 439 | ||||
-rw-r--r-- | security/integrity/ima/ima_template_lib.c | 408 | ||||
-rw-r--r-- | security/integrity/ima/ima_template_lib.h | 45 |
16 files changed, 5964 insertions, 0 deletions
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig new file mode 100644 index 000000000..5095b2e8f --- /dev/null +++ b/security/integrity/ima/Kconfig @@ -0,0 +1,288 @@ +# IBM Integrity Measurement Architecture +# +config IMA + bool "Integrity Measurement Architecture(IMA)" + select SECURITYFS + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_SHA1 + select CRYPTO_HASH_INFO + select TCG_TPM if HAS_IOMEM && !UML + select TCG_TIS if TCG_TPM && X86 + select TCG_CRB if TCG_TPM && ACPI + select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES + select INTEGRITY_AUDIT if AUDIT + help + The Trusted Computing Group(TCG) runtime Integrity + Measurement Architecture(IMA) maintains a list of hash + values of executables and other sensitive system files, + as they are read or executed. If an attacker manages + to change the contents of an important system file + being measured, we can tell. + + If your system has a TPM chip, then IMA also maintains + an aggregate integrity value over this list inside the + TPM hardware, so that the TPM can prove to a third party + whether or not critical system files have been modified. + Read <http://www.usenix.org/events/sec04/tech/sailer.html> + to learn more about IMA. + If unsure, say N. + +config IMA_KEXEC + bool "Enable carrying the IMA measurement list across a soft boot" + depends on IMA && TCG_TPM && HAVE_IMA_KEXEC + default n + help + TPM PCRs are only reset on a hard reboot. In order to validate + a TPM's quote after a soft boot, the IMA measurement list of the + running kernel must be saved and restored on boot. + + Depending on the IMA policy, the measurement list can grow to + be very large. + +config IMA_MEASURE_PCR_IDX + int + depends on IMA + range 8 14 + default 10 + help + IMA_MEASURE_PCR_IDX determines the TPM PCR register index + that IMA uses to maintain the integrity aggregate of the + measurement list. If unsure, use the default 10. + +config IMA_LSM_RULES + bool + depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK) + default y + help + Disabling this option will disregard LSM based policy rules. + +choice + prompt "Default template" + default IMA_NG_TEMPLATE + depends on IMA + help + Select the default IMA measurement template. + + The original 'ima' measurement list template contains a + hash, defined as 20 bytes, and a null terminated pathname, + limited to 255 characters. The 'ima-ng' measurement list + template permits both larger hash digests and longer + pathnames. + + config IMA_TEMPLATE + bool "ima" + config IMA_NG_TEMPLATE + bool "ima-ng (default)" + config IMA_SIG_TEMPLATE + bool "ima-sig" +endchoice + +config IMA_DEFAULT_TEMPLATE + string + depends on IMA + default "ima" if IMA_TEMPLATE + default "ima-ng" if IMA_NG_TEMPLATE + default "ima-sig" if IMA_SIG_TEMPLATE + +choice + prompt "Default integrity hash algorithm" + default IMA_DEFAULT_HASH_SHA1 + depends on IMA + help + Select the default hash algorithm used for the measurement + list, integrity appraisal and audit log. The compiled default + hash algorithm can be overwritten using the kernel command + line 'ima_hash=' option. + + config IMA_DEFAULT_HASH_SHA1 + bool "SHA1 (default)" + depends on CRYPTO_SHA1=y + + config IMA_DEFAULT_HASH_SHA256 + bool "SHA256" + depends on CRYPTO_SHA256=y && !IMA_TEMPLATE + + config IMA_DEFAULT_HASH_SHA512 + bool "SHA512" + depends on CRYPTO_SHA512=y && !IMA_TEMPLATE + + config IMA_DEFAULT_HASH_WP512 + bool "WP512" + depends on CRYPTO_WP512=y && !IMA_TEMPLATE +endchoice + +config IMA_DEFAULT_HASH + string + depends on IMA + default "sha1" if IMA_DEFAULT_HASH_SHA1 + default "sha256" if IMA_DEFAULT_HASH_SHA256 + default "sha512" if IMA_DEFAULT_HASH_SHA512 + default "wp512" if IMA_DEFAULT_HASH_WP512 + +config IMA_WRITE_POLICY + bool "Enable multiple writes to the IMA policy" + depends on IMA + default n + help + IMA policy can now be updated multiple times. The new rules get + appended to the original policy. Have in mind that the rules are + scanned in FIFO order so be careful when you design and add new ones. + + If unsure, say N. + +config IMA_READ_POLICY + bool "Enable reading back the current IMA policy" + depends on IMA + default y if IMA_WRITE_POLICY + default n if !IMA_WRITE_POLICY + help + It is often useful to be able to read back the IMA policy. It is + even more important after introducing CONFIG_IMA_WRITE_POLICY. + This option allows the root user to see the current policy rules. + +config IMA_APPRAISE + bool "Appraise integrity measurements" + depends on IMA + default n + help + This option enables local measurement integrity appraisal. + It requires the system to be labeled with a security extended + attribute containing the file hash measurement. To protect + the security extended attributes from offline attack, enable + and configure EVM. + + For more information on integrity appraisal refer to: + <http://linux-ima.sourceforge.net> + If unsure, say N. + +config IMA_APPRAISE_BUILD_POLICY + bool "IMA build time configured policy rules" + depends on IMA_APPRAISE && INTEGRITY_ASYMMETRIC_KEYS + default n + help + This option defines an IMA appraisal policy at build time, which + is enforced at run time without having to specify a builtin + policy name on the boot command line. The build time appraisal + policy rules persist after loading a custom policy. + + Depending on the rules configured, this policy may require kernel + modules, firmware, the kexec kernel image, and/or the IMA policy + to be signed. Unsigned files might prevent the system from + booting or applications from working properly. + +config IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS + bool "Appraise firmware signatures" + depends on IMA_APPRAISE_BUILD_POLICY + default n + help + This option defines a policy requiring all firmware to be signed, + including the regulatory.db. If both this option and + CFG80211_REQUIRE_SIGNED_REGDB are enabled, then both signature + verification methods are necessary. + +config IMA_APPRAISE_REQUIRE_KEXEC_SIGS + bool "Appraise kexec kernel image signatures" + depends on IMA_APPRAISE_BUILD_POLICY + default n + help + Enabling this rule will require all kexec'ed kernel images to + be signed and verified by a public key on the trusted IMA + keyring. + + Kernel image signatures can not be verified by the original + kexec_load syscall. Enabling this rule will prevent its + usage. + +config IMA_APPRAISE_REQUIRE_MODULE_SIGS + bool "Appraise kernel modules signatures" + depends on IMA_APPRAISE_BUILD_POLICY + default n + help + Enabling this rule will require all kernel modules to be signed + and verified by a public key on the trusted IMA keyring. + + Kernel module signatures can only be verified by IMA-appraisal, + via the finit_module syscall. Enabling this rule will prevent + the usage of the init_module syscall. + +config IMA_APPRAISE_REQUIRE_POLICY_SIGS + bool "Appraise IMA policy signature" + depends on IMA_APPRAISE_BUILD_POLICY + default n + help + Enabling this rule will require the IMA policy to be signed and + and verified by a key on the trusted IMA keyring. + +config IMA_APPRAISE_BOOTPARAM + bool "ima_appraise boot parameter" + depends on IMA_APPRAISE + default y + help + This option enables the different "ima_appraise=" modes + (eg. fix, log) from the boot command line. + +config IMA_TRUSTED_KEYRING + bool "Require all keys on the .ima keyring be signed (deprecated)" + depends on IMA_APPRAISE && SYSTEM_TRUSTED_KEYRING + depends on INTEGRITY_ASYMMETRIC_KEYS + select INTEGRITY_TRUSTED_KEYRING + default y + help + This option requires that all keys added to the .ima + keyring be signed by a key on the system trusted keyring. + + This option is deprecated in favor of INTEGRITY_TRUSTED_KEYRING + +config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY + bool "Permit keys validly signed by a built-in or secondary CA cert (EXPERIMENTAL)" + depends on SYSTEM_TRUSTED_KEYRING + depends on SECONDARY_TRUSTED_KEYRING + depends on INTEGRITY_ASYMMETRIC_KEYS + select INTEGRITY_TRUSTED_KEYRING + default n + help + Keys may be added to the IMA or IMA blacklist keyrings, if the + key is validly signed by a CA cert in the system built-in or + secondary trusted keyrings. + + Intermediate keys between those the kernel has compiled in and the + IMA keys to be added may be added to the system secondary keyring, + provided they are validly signed by a key already resident in the + built-in or secondary trusted keyrings. + +config IMA_BLACKLIST_KEYRING + bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)" + depends on SYSTEM_TRUSTED_KEYRING + depends on IMA_TRUSTED_KEYRING + default n + help + This option creates an IMA blacklist keyring, which contains all + revoked IMA keys. It is consulted before any other keyring. If + the search is successful the requested operation is rejected and + an error is returned to the caller. + +config IMA_LOAD_X509 + bool "Load X509 certificate onto the '.ima' trusted keyring" + depends on IMA_TRUSTED_KEYRING + default n + help + File signature verification is based on the public keys + loaded on the .ima trusted keyring. These public keys are + X509 certificates signed by a trusted key on the + .system keyring. This option enables X509 certificate + loading from the kernel onto the '.ima' trusted keyring. + +config IMA_X509_PATH + string "IMA X509 certificate path" + depends on IMA_LOAD_X509 + default "/etc/keys/x509_ima.der" + help + This option defines IMA X509 certificate path. + +config IMA_APPRAISE_SIGNED_INIT + bool "Require signed user-space initialization" + depends on IMA_LOAD_X509 + default n + help + This option requires user-space init to be signed. diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile new file mode 100644 index 000000000..d921dc4f9 --- /dev/null +++ b/security/integrity/ima/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for building Trusted Computing Group's(TCG) runtime Integrity +# Measurement Architecture(IMA). +# + +obj-$(CONFIG_IMA) += ima.o + +ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \ + ima_policy.o ima_template.o ima_template_lib.o +ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o +ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o +obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h new file mode 100644 index 000000000..d12b07eb3 --- /dev/null +++ b/security/integrity/ima/ima.h @@ -0,0 +1,325 @@ +/* + * Copyright (C) 2005,2006,2007,2008 IBM Corporation + * + * Authors: + * Reiner Sailer <sailer@watson.ibm.com> + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima.h + * internal Integrity Measurement Architecture (IMA) definitions + */ + +#ifndef __LINUX_IMA_H +#define __LINUX_IMA_H + +#include <linux/types.h> +#include <linux/crypto.h> +#include <linux/fs.h> +#include <linux/security.h> +#include <linux/hash.h> +#include <linux/tpm.h> +#include <linux/audit.h> +#include <crypto/hash_info.h> + +#include "../integrity.h" + +#ifdef CONFIG_HAVE_IMA_KEXEC +#include <asm/ima.h> +#endif + +enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN, + IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII }; +enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; + +/* digest size for IMA, fits SHA1 or MD5 */ +#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE +#define IMA_EVENT_NAME_LEN_MAX 255 + +#define IMA_HASH_BITS 10 +#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) + +#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +#define IMA_TEMPLATE_NUM_FIELDS_MAX 15 + +#define IMA_TEMPLATE_IMA_NAME "ima" +#define IMA_TEMPLATE_IMA_FMT "d|n" + +/* current content of the policy */ +extern int ima_policy_flag; + +/* set during initialization */ +extern int ima_hash_algo; +extern int ima_appraise; +extern struct tpm_chip *ima_tpm_chip; +extern const char boot_aggregate_name[]; + +/* IMA event related data */ +struct ima_event_data { + struct integrity_iint_cache *iint; + struct file *file; + const unsigned char *filename; + struct evm_ima_xattr_data *xattr_value; + int xattr_len; + const char *violation; +}; + +/* IMA template field data definition */ +struct ima_field_data { + u8 *data; + u32 len; +}; + +/* IMA template field definition */ +struct ima_template_field { + const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN]; + int (*field_init)(struct ima_event_data *event_data, + struct ima_field_data *field_data); + void (*field_show)(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +}; + +/* IMA template descriptor definition */ +struct ima_template_desc { + struct list_head list; + char *name; + char *fmt; + int num_fields; + struct ima_template_field **fields; +}; + +struct ima_template_entry { + int pcr; + u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ + struct ima_template_desc *template_desc; /* template descriptor */ + u32 template_data_len; + struct ima_field_data template_data[0]; /* template related data */ +}; + +struct ima_queue_entry { + struct hlist_node hnext; /* place in hash collision list */ + struct list_head later; /* place in ima_measurements list */ + struct ima_template_entry *entry; +}; +extern struct list_head ima_measurements; /* list of all measurements */ + +/* Some details preceding the binary serialized measurement list */ +struct ima_kexec_hdr { + u16 version; + u16 _reserved0; + u32 _reserved1; + u64 buffer_size; + u64 count; +}; + +#ifdef CONFIG_HAVE_IMA_KEXEC +void ima_load_kexec_buffer(void); +#else +static inline void ima_load_kexec_buffer(void) {} +#endif /* CONFIG_HAVE_IMA_KEXEC */ + +/* + * The default binary_runtime_measurements list format is defined as the + * platform native format. The canonical format is defined as little-endian. + */ +extern bool ima_canonical_fmt; + +/* Internal IMA function definitions */ +int ima_init(void); +int ima_fs_init(void); +int ima_add_template_entry(struct ima_template_entry *entry, int violation, + const char *op, struct inode *inode, + const unsigned char *filename); +int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash); +int ima_calc_buffer_hash(const void *buf, loff_t len, + struct ima_digest_data *hash); +int ima_calc_field_array_hash(struct ima_field_data *field_data, + struct ima_template_desc *desc, int num_fields, + struct ima_digest_data *hash); +int ima_calc_boot_aggregate(struct ima_digest_data *hash); +void ima_add_violation(struct file *file, const unsigned char *filename, + struct integrity_iint_cache *iint, + const char *op, const char *cause); +int ima_init_crypto(void); +void ima_putc(struct seq_file *m, void *data, int datalen); +void ima_print_digest(struct seq_file *m, u8 *digest, u32 size); +struct ima_template_desc *ima_template_desc_current(void); +int ima_restore_measurement_entry(struct ima_template_entry *entry); +int ima_restore_measurement_list(loff_t bufsize, void *buf); +int ima_measurements_show(struct seq_file *m, void *v); +unsigned long ima_get_binary_runtime_size(void); +int ima_init_template(void); +void ima_init_template_list(void); + +/* + * used to protect h_table and sha_table + */ +extern spinlock_t ima_queue_lock; + +struct ima_h_table { + atomic_long_t len; /* number of stored measurements in the list */ + atomic_long_t violations; + struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; +}; +extern struct ima_h_table ima_htable; + +static inline unsigned int ima_hash_key(u8 *digest) +{ + /* there is no point in taking a hash of part of a digest */ + return (digest[0] | digest[1] << 8) % IMA_MEASURE_HTABLE_SIZE; +} + +#define __ima_hooks(hook) \ + hook(NONE) \ + hook(FILE_CHECK) \ + hook(MMAP_CHECK) \ + hook(BPRM_CHECK) \ + hook(CREDS_CHECK) \ + hook(POST_SETATTR) \ + hook(MODULE_CHECK) \ + hook(FIRMWARE_CHECK) \ + hook(KEXEC_KERNEL_CHECK) \ + hook(KEXEC_INITRAMFS_CHECK) \ + hook(POLICY_CHECK) \ + hook(MAX_CHECK) +#define __ima_hook_enumify(ENUM) ENUM, + +enum ima_hooks { + __ima_hooks(__ima_hook_enumify) +}; + +/* LIM API function definitions */ +int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid, + int mask, enum ima_hooks func, int *pcr); +int ima_must_measure(struct inode *inode, int mask, enum ima_hooks func); +int ima_collect_measurement(struct integrity_iint_cache *iint, + struct file *file, void *buf, loff_t size, + enum hash_algo algo); +void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, int pcr); +void ima_audit_measurement(struct integrity_iint_cache *iint, + const unsigned char *filename); +int ima_alloc_init_template(struct ima_event_data *event_data, + struct ima_template_entry **entry); +int ima_store_template(struct ima_template_entry *entry, int violation, + struct inode *inode, + const unsigned char *filename, int pcr); +void ima_free_template_entry(struct ima_template_entry *entry); +const char *ima_d_path(const struct path *path, char **pathbuf, char *filename); + +/* IMA policy related functions */ +int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid, + enum ima_hooks func, int mask, int flags, int *pcr); +void ima_init_policy(void); +void ima_update_policy(void); +void ima_update_policy_flag(void); +ssize_t ima_parse_add_rule(char *); +void ima_delete_rules(void); +int ima_check_policy(void); +void *ima_policy_start(struct seq_file *m, loff_t *pos); +void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos); +void ima_policy_stop(struct seq_file *m, void *v); +int ima_policy_show(struct seq_file *m, void *v); + +/* Appraise integrity measurements */ +#define IMA_APPRAISE_ENFORCE 0x01 +#define IMA_APPRAISE_FIX 0x02 +#define IMA_APPRAISE_LOG 0x04 +#define IMA_APPRAISE_MODULES 0x08 +#define IMA_APPRAISE_FIRMWARE 0x10 +#define IMA_APPRAISE_POLICY 0x20 +#define IMA_APPRAISE_KEXEC 0x40 + +#ifdef CONFIG_IMA_APPRAISE +int ima_appraise_measurement(enum ima_hooks func, + struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len); +int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func); +void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file); +enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, + enum ima_hooks func); +enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, + int xattr_len); +int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value); + +#else +static inline int ima_appraise_measurement(enum ima_hooks func, + struct integrity_iint_cache *iint, + struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) +{ + return INTEGRITY_UNKNOWN; +} + +static inline int ima_must_appraise(struct inode *inode, int mask, + enum ima_hooks func) +{ + return 0; +} + +static inline void ima_update_xattr(struct integrity_iint_cache *iint, + struct file *file) +{ +} + +static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache + *iint, + enum ima_hooks func) +{ + return INTEGRITY_UNKNOWN; +} + +static inline enum hash_algo +ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len) +{ + return ima_hash_algo; +} + +static inline int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value) +{ + return 0; +} + +#endif /* CONFIG_IMA_APPRAISE */ + +/* LSM based policy rules require audit */ +#ifdef CONFIG_IMA_LSM_RULES + +#define security_filter_rule_init security_audit_rule_init +#define security_filter_rule_match security_audit_rule_match + +#else + +static inline int security_filter_rule_init(u32 field, u32 op, char *rulestr, + void **lsmrule) +{ + return -EINVAL; +} + +static inline int security_filter_rule_match(u32 secid, u32 field, u32 op, + void *lsmrule, + struct audit_context *actx) +{ + return -EINVAL; +} +#endif /* CONFIG_IMA_LSM_RULES */ + +#ifdef CONFIG_IMA_READ_POLICY +#define POLICY_FILE_FLAGS (S_IWUSR | S_IRUSR) +#else +#define POLICY_FILE_FLAGS S_IWUSR +#endif /* CONFIG_IMA_READ_POLICY */ + +#endif /* __LINUX_IMA_H */ diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c new file mode 100644 index 000000000..a02c5acfd --- /dev/null +++ b/security/integrity/ima/ima_api.c @@ -0,0 +1,377 @@ +/* + * Copyright (C) 2008 IBM Corporation + * + * Author: Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_api.c + * Implements must_appraise_or_measure, collect_measurement, + * appraise_measurement, store_measurement and store_template. + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/xattr.h> +#include <linux/evm.h> +#include <linux/iversion.h> + +#include "ima.h" + +/* + * ima_free_template_entry - free an existing template entry + */ +void ima_free_template_entry(struct ima_template_entry *entry) +{ + int i; + + for (i = 0; i < entry->template_desc->num_fields; i++) + kfree(entry->template_data[i].data); + + kfree(entry); +} + +/* + * ima_alloc_init_template - create and initialize a new template entry + */ +int ima_alloc_init_template(struct ima_event_data *event_data, + struct ima_template_entry **entry) +{ + struct ima_template_desc *template_desc = ima_template_desc_current(); + int i, result = 0; + + *entry = kzalloc(sizeof(**entry) + template_desc->num_fields * + sizeof(struct ima_field_data), GFP_NOFS); + if (!*entry) + return -ENOMEM; + + (*entry)->template_desc = template_desc; + for (i = 0; i < template_desc->num_fields; i++) { + struct ima_template_field *field = template_desc->fields[i]; + u32 len; + + result = field->field_init(event_data, + &((*entry)->template_data[i])); + if (result != 0) + goto out; + + len = (*entry)->template_data[i].len; + (*entry)->template_data_len += sizeof(len); + (*entry)->template_data_len += len; + } + return 0; +out: + ima_free_template_entry(*entry); + *entry = NULL; + return result; +} + +/* + * ima_store_template - store ima template measurements + * + * Calculate the hash of a template entry, add the template entry + * to an ordered list of measurement entries maintained inside the kernel, + * and also update the aggregate integrity value (maintained inside the + * configured TPM PCR) over the hashes of the current list of measurement + * entries. + * + * Applications retrieve the current kernel-held measurement list through + * the securityfs entries in /sys/kernel/security/ima. The signed aggregate + * TPM PCR (called quote) can be retrieved using a TPM user space library + * and is used to validate the measurement list. + * + * Returns 0 on success, error code otherwise + */ +int ima_store_template(struct ima_template_entry *entry, + int violation, struct inode *inode, + const unsigned char *filename, int pcr) +{ + static const char op[] = "add_template_measure"; + static const char audit_cause[] = "hashing_error"; + char *template_name = entry->template_desc->name; + int result; + struct { + struct ima_digest_data hdr; + char digest[TPM_DIGEST_SIZE]; + } hash; + + if (!violation) { + int num_fields = entry->template_desc->num_fields; + + /* this function uses default algo */ + hash.hdr.algo = HASH_ALGO_SHA1; + result = ima_calc_field_array_hash(&entry->template_data[0], + entry->template_desc, + num_fields, &hash.hdr); + if (result < 0) { + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, + template_name, op, + audit_cause, result, 0); + return result; + } + memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); + } + entry->pcr = pcr; + result = ima_add_template_entry(entry, violation, op, inode, filename); + return result; +} + +/* + * ima_add_violation - add violation to measurement list. + * + * Violations are flagged in the measurement list with zero hash values. + * By extending the PCR with 0xFF's instead of with zeroes, the PCR + * value is invalidated. + */ +void ima_add_violation(struct file *file, const unsigned char *filename, + struct integrity_iint_cache *iint, + const char *op, const char *cause) +{ + struct ima_template_entry *entry; + struct inode *inode = file_inode(file); + struct ima_event_data event_data = {iint, file, filename, NULL, 0, + cause}; + int violation = 1; + int result; + + /* can overflow, only indicator */ + atomic_long_inc(&ima_htable.violations); + + result = ima_alloc_init_template(&event_data, &entry); + if (result < 0) { + result = -ENOMEM; + goto err_out; + } + result = ima_store_template(entry, violation, inode, + filename, CONFIG_IMA_MEASURE_PCR_IDX); + if (result < 0) + ima_free_template_entry(entry); +err_out: + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, + op, cause, result, 0); +} + +/** + * ima_get_action - appraise & measure decision based on policy. + * @inode: pointer to inode to measure + * @cred: pointer to credentials structure to validate + * @secid: secid of the task being validated + * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXEC, + * MAY_APPEND) + * @func: caller identifier + * @pcr: pointer filled in if matched measure policy sets pcr= + * + * The policy is defined in terms of keypairs: + * subj=, obj=, type=, func=, mask=, fsmagic= + * subj,obj, and type: are LSM specific. + * func: FILE_CHECK | BPRM_CHECK | CREDS_CHECK | MMAP_CHECK | MODULE_CHECK + * mask: contains the permission mask + * fsmagic: hex value + * + * Returns IMA_MEASURE, IMA_APPRAISE mask. + * + */ +int ima_get_action(struct inode *inode, const struct cred *cred, u32 secid, + int mask, enum ima_hooks func, int *pcr) +{ + int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE | IMA_HASH; + + flags &= ima_policy_flag; + + return ima_match_policy(inode, cred, secid, func, mask, flags, pcr); +} + +/* + * ima_collect_measurement - collect file measurement + * + * Calculate the file hash, if it doesn't already exist, + * storing the measurement and i_version in the iint. + * + * Must be called with iint->mutex held. + * + * Return 0 on success, error code otherwise + */ +int ima_collect_measurement(struct integrity_iint_cache *iint, + struct file *file, void *buf, loff_t size, + enum hash_algo algo) +{ + const char *audit_cause = "failed"; + struct inode *inode = file_inode(file); + const char *filename = file->f_path.dentry->d_name.name; + int result = 0; + int length; + void *tmpbuf; + u64 i_version; + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + + if (iint->flags & IMA_COLLECTED) + goto out; + + /* + * Dectecting file change is based on i_version. On filesystems + * which do not support i_version, support is limited to an initial + * measurement/appraisal/audit. + */ + i_version = inode_query_iversion(inode); + hash.hdr.algo = algo; + + /* Initialize hash digest to 0's in case of failure */ + memset(&hash.digest, 0, sizeof(hash.digest)); + + if (buf) + result = ima_calc_buffer_hash(buf, size, &hash.hdr); + else + result = ima_calc_file_hash(file, &hash.hdr); + + if (result && result != -EBADF && result != -EINVAL) + goto out; + + length = sizeof(hash.hdr) + hash.hdr.length; + tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS); + if (!tmpbuf) { + result = -ENOMEM; + goto out; + } + + iint->ima_hash = tmpbuf; + memcpy(iint->ima_hash, &hash, length); + iint->version = i_version; + + /* Possibly temporary failure due to type of read (eg. O_DIRECT) */ + if (!result) + iint->flags |= IMA_COLLECTED; +out: + if (result) { + if (file->f_flags & O_DIRECT) + audit_cause = "failed(directio)"; + + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, + filename, "collect_data", audit_cause, + result, 0); + } + return result; +} + +/* + * ima_store_measurement - store file measurement + * + * Create an "ima" template and then store the template by calling + * ima_store_template. + * + * We only get here if the inode has not already been measured, + * but the measurement could already exist: + * - multiple copies of the same file on either the same or + * different filesystems. + * - the inode was previously flushed as well as the iint info, + * containing the hashing info. + * + * Must be called with iint->mutex held. + */ +void ima_store_measurement(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, int pcr) +{ + static const char op[] = "add_template_measure"; + static const char audit_cause[] = "ENOMEM"; + int result = -ENOMEM; + struct inode *inode = file_inode(file); + struct ima_template_entry *entry; + struct ima_event_data event_data = {iint, file, filename, xattr_value, + xattr_len, NULL}; + int violation = 0; + + if (iint->measured_pcrs & (0x1 << pcr)) + return; + + result = ima_alloc_init_template(&event_data, &entry); + if (result < 0) { + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, + op, audit_cause, result, 0); + return; + } + + result = ima_store_template(entry, violation, inode, filename, pcr); + if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) { + iint->flags |= IMA_MEASURED; + iint->measured_pcrs |= (0x1 << pcr); + } + if (result < 0) + ima_free_template_entry(entry); +} + +void ima_audit_measurement(struct integrity_iint_cache *iint, + const unsigned char *filename) +{ + struct audit_buffer *ab; + char *hash; + const char *algo_name = hash_algo_name[iint->ima_hash->algo]; + int i; + + if (iint->flags & IMA_AUDITED) + return; + + hash = kzalloc((iint->ima_hash->length * 2) + 1, GFP_KERNEL); + if (!hash) + return; + + for (i = 0; i < iint->ima_hash->length; i++) + hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]); + hash[i * 2] = '\0'; + + ab = audit_log_start(audit_context(), GFP_KERNEL, + AUDIT_INTEGRITY_RULE); + if (!ab) + goto out; + + audit_log_format(ab, "file="); + audit_log_untrustedstring(ab, filename); + audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash); + + audit_log_task_info(ab, current); + audit_log_end(ab); + + iint->flags |= IMA_AUDITED; +out: + kfree(hash); + return; +} + +/* + * ima_d_path - return a pointer to the full pathname + * + * Attempt to return a pointer to the full pathname for use in the + * IMA measurement list, IMA audit records, and auditing logs. + * + * On failure, return a pointer to a copy of the filename, not dname. + * Returning a pointer to dname, could result in using the pointer + * after the memory has been freed. + */ +const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf) +{ + char *pathname = NULL; + + *pathbuf = __getname(); + if (*pathbuf) { + pathname = d_absolute_path(path, *pathbuf, PATH_MAX); + if (IS_ERR(pathname)) { + __putname(*pathbuf); + *pathbuf = NULL; + pathname = NULL; + } + } + + if (!pathname) { + strlcpy(namebuf, path->dentry->d_name.name, NAME_MAX); + pathname = namebuf; + } + + return pathname; +} diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c new file mode 100644 index 000000000..deec1804a --- /dev/null +++ b/security/integrity/ima/ima_appraise.c @@ -0,0 +1,466 @@ +/* + * Copyright (C) 2011 IBM Corporation + * + * Author: + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ +#include <linux/module.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/xattr.h> +#include <linux/magic.h> +#include <linux/ima.h> +#include <linux/evm.h> + +#include "ima.h" + +static int __init default_appraise_setup(char *str) +{ +#ifdef CONFIG_IMA_APPRAISE_BOOTPARAM + if (strncmp(str, "off", 3) == 0) + ima_appraise = 0; + else if (strncmp(str, "log", 3) == 0) + ima_appraise = IMA_APPRAISE_LOG; + else if (strncmp(str, "fix", 3) == 0) + ima_appraise = IMA_APPRAISE_FIX; +#endif + return 1; +} + +__setup("ima_appraise=", default_appraise_setup); + +/* + * is_ima_appraise_enabled - return appraise status + * + * Only return enabled, if not in ima_appraise="fix" or "log" modes. + */ +bool is_ima_appraise_enabled(void) +{ + return ima_appraise & IMA_APPRAISE_ENFORCE; +} + +/* + * ima_must_appraise - set appraise flag + * + * Return 1 to appraise or hash + */ +int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func) +{ + u32 secid; + + if (!ima_appraise) + return 0; + + security_task_getsecid(current, &secid); + return ima_match_policy(inode, current_cred(), secid, func, mask, + IMA_APPRAISE | IMA_HASH, NULL); +} + +static int ima_fix_xattr(struct dentry *dentry, + struct integrity_iint_cache *iint) +{ + int rc, offset; + u8 algo = iint->ima_hash->algo; + + if (algo <= HASH_ALGO_SHA1) { + offset = 1; + iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST; + } else { + offset = 0; + iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG; + iint->ima_hash->xattr.ng.algo = algo; + } + rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA, + &iint->ima_hash->xattr.data[offset], + (sizeof(iint->ima_hash->xattr) - offset) + + iint->ima_hash->length, 0); + return rc; +} + +/* Return specific func appraised cached result */ +enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, + enum ima_hooks func) +{ + switch (func) { + case MMAP_CHECK: + return iint->ima_mmap_status; + case BPRM_CHECK: + return iint->ima_bprm_status; + case CREDS_CHECK: + return iint->ima_creds_status; + case FILE_CHECK: + case POST_SETATTR: + return iint->ima_file_status; + case MODULE_CHECK ... MAX_CHECK - 1: + default: + return iint->ima_read_status; + } +} + +static void ima_set_cache_status(struct integrity_iint_cache *iint, + enum ima_hooks func, + enum integrity_status status) +{ + switch (func) { + case MMAP_CHECK: + iint->ima_mmap_status = status; + break; + case BPRM_CHECK: + iint->ima_bprm_status = status; + break; + case CREDS_CHECK: + iint->ima_creds_status = status; + case FILE_CHECK: + case POST_SETATTR: + iint->ima_file_status = status; + break; + case MODULE_CHECK ... MAX_CHECK - 1: + default: + iint->ima_read_status = status; + break; + } +} + +static void ima_cache_flags(struct integrity_iint_cache *iint, + enum ima_hooks func) +{ + switch (func) { + case MMAP_CHECK: + iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED); + break; + case BPRM_CHECK: + iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED); + break; + case CREDS_CHECK: + iint->flags |= (IMA_CREDS_APPRAISED | IMA_APPRAISED); + break; + case FILE_CHECK: + case POST_SETATTR: + iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED); + break; + case MODULE_CHECK ... MAX_CHECK - 1: + default: + iint->flags |= (IMA_READ_APPRAISED | IMA_APPRAISED); + break; + } +} + +enum hash_algo ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, + int xattr_len) +{ + struct signature_v2_hdr *sig; + enum hash_algo ret; + + if (!xattr_value || xattr_len < 2) + /* return default hash algo */ + return ima_hash_algo; + + switch (xattr_value->type) { + case EVM_IMA_XATTR_DIGSIG: + sig = (typeof(sig))xattr_value; + if (sig->version != 2 || xattr_len <= sizeof(*sig)) + return ima_hash_algo; + return sig->hash_algo; + break; + case IMA_XATTR_DIGEST_NG: + ret = xattr_value->digest[0]; + if (ret < HASH_ALGO__LAST) + return ret; + break; + case IMA_XATTR_DIGEST: + /* this is for backward compatibility */ + if (xattr_len == 21) { + unsigned int zero = 0; + if (!memcmp(&xattr_value->digest[16], &zero, 4)) + return HASH_ALGO_MD5; + else + return HASH_ALGO_SHA1; + } else if (xattr_len == 17) + return HASH_ALGO_MD5; + break; + } + + /* return default hash algo */ + return ima_hash_algo; +} + +int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value) +{ + ssize_t ret; + + ret = vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value, + 0, GFP_NOFS); + if (ret == -EOPNOTSUPP) + ret = 0; + return ret; +} + +/* + * ima_appraise_measurement - appraise file measurement + * + * Call evm_verifyxattr() to verify the integrity of 'security.ima'. + * Assuming success, compare the xattr hash with the collected measurement. + * + * Return 0 on success, error code otherwise + */ +int ima_appraise_measurement(enum ima_hooks func, + struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) +{ + static const char op[] = "appraise_data"; + const char *cause = "unknown"; + struct dentry *dentry = file_dentry(file); + struct inode *inode = d_backing_inode(dentry); + enum integrity_status status = INTEGRITY_UNKNOWN; + int rc = xattr_len, hash_start = 0; + + if (!(inode->i_opflags & IOP_XATTR)) + return INTEGRITY_UNKNOWN; + + if (rc <= 0) { + if (rc && rc != -ENODATA) + goto out; + + cause = iint->flags & IMA_DIGSIG_REQUIRED ? + "IMA-signature-required" : "missing-hash"; + status = INTEGRITY_NOLABEL; + if (file->f_mode & FMODE_CREATED) + iint->flags |= IMA_NEW_FILE; + if ((iint->flags & IMA_NEW_FILE) && + (!(iint->flags & IMA_DIGSIG_REQUIRED) || + (inode->i_size == 0))) + status = INTEGRITY_PASS; + goto out; + } + + status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); + switch (status) { + case INTEGRITY_PASS: + case INTEGRITY_PASS_IMMUTABLE: + case INTEGRITY_UNKNOWN: + break; + case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */ + case INTEGRITY_NOLABEL: /* No security.evm xattr. */ + cause = "missing-HMAC"; + goto out; + case INTEGRITY_FAIL: /* Invalid HMAC/signature. */ + cause = "invalid-HMAC"; + goto out; + default: + WARN_ONCE(true, "Unexpected integrity status %d\n", status); + } + + switch (xattr_value->type) { + case IMA_XATTR_DIGEST_NG: + /* first byte contains algorithm id */ + hash_start = 1; + /* fall through */ + case IMA_XATTR_DIGEST: + if (iint->flags & IMA_DIGSIG_REQUIRED) { + cause = "IMA-signature-required"; + status = INTEGRITY_FAIL; + break; + } + clear_bit(IMA_DIGSIG, &iint->atomic_flags); + if (xattr_len - sizeof(xattr_value->type) - hash_start >= + iint->ima_hash->length) + /* xattr length may be longer. md5 hash in previous + version occupied 20 bytes in xattr, instead of 16 + */ + rc = memcmp(&xattr_value->digest[hash_start], + iint->ima_hash->digest, + iint->ima_hash->length); + else + rc = -EINVAL; + if (rc) { + cause = "invalid-hash"; + status = INTEGRITY_FAIL; + break; + } + status = INTEGRITY_PASS; + break; + case EVM_IMA_XATTR_DIGSIG: + set_bit(IMA_DIGSIG, &iint->atomic_flags); + rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, + (const char *)xattr_value, rc, + iint->ima_hash->digest, + iint->ima_hash->length); + if (rc == -EOPNOTSUPP) { + status = INTEGRITY_UNKNOWN; + } else if (rc) { + cause = "invalid-signature"; + status = INTEGRITY_FAIL; + } else { + status = INTEGRITY_PASS; + } + break; + default: + status = INTEGRITY_UNKNOWN; + cause = "unknown-ima-data"; + break; + } + +out: + /* + * File signatures on some filesystems can not be properly verified. + * When such filesystems are mounted by an untrusted mounter or on a + * system not willing to accept such a risk, fail the file signature + * verification. + */ + if ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) && + ((inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) || + (iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) { + status = INTEGRITY_FAIL; + cause = "unverifiable-signature"; + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, + op, cause, rc, 0); + } else if (status != INTEGRITY_PASS) { + /* Fix mode, but don't replace file signatures. */ + if ((ima_appraise & IMA_APPRAISE_FIX) && + (!xattr_value || + xattr_value->type != EVM_IMA_XATTR_DIGSIG)) { + if (!ima_fix_xattr(dentry, iint)) + status = INTEGRITY_PASS; + } + + /* Permit new files with file signatures, but without data. */ + if (inode->i_size == 0 && iint->flags & IMA_NEW_FILE && + xattr_value && xattr_value->type == EVM_IMA_XATTR_DIGSIG) { + status = INTEGRITY_PASS; + } + + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, + op, cause, rc, 0); + } else { + ima_cache_flags(iint, func); + } + + ima_set_cache_status(iint, func, status); + return status; +} + +/* + * ima_update_xattr - update 'security.ima' hash value + */ +void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) +{ + struct dentry *dentry = file_dentry(file); + int rc = 0; + + /* do not collect and update hash for digital signatures */ + if (test_bit(IMA_DIGSIG, &iint->atomic_flags)) + return; + + if ((iint->ima_file_status != INTEGRITY_PASS) && + !(iint->flags & IMA_HASH)) + return; + + rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo); + if (rc < 0) + return; + + inode_lock(file_inode(file)); + ima_fix_xattr(dentry, iint); + inode_unlock(file_inode(file)); +} + +/** + * ima_inode_post_setattr - reflect file metadata changes + * @dentry: pointer to the affected dentry + * + * Changes to a dentry's metadata might result in needing to appraise. + * + * This function is called from notify_change(), which expects the caller + * to lock the inode's i_mutex. + */ +void ima_inode_post_setattr(struct dentry *dentry) +{ + struct inode *inode = d_backing_inode(dentry); + struct integrity_iint_cache *iint; + int action; + + if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode) + || !(inode->i_opflags & IOP_XATTR)) + return; + + action = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR); + if (!action) + __vfs_removexattr(dentry, XATTR_NAME_IMA); + iint = integrity_iint_find(inode); + if (iint) { + set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags); + if (!action) + clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); + } +} + +/* + * ima_protect_xattr - protect 'security.ima' + * + * Ensure that not just anyone can modify or remove 'security.ima'. + */ +static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 1; + } + return 0; +} + +static void ima_reset_appraise_flags(struct inode *inode, int digsig) +{ + struct integrity_iint_cache *iint; + + if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)) + return; + + iint = integrity_iint_find(inode); + if (!iint) + return; + iint->measured_pcrs = 0; + set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags); + if (digsig) + set_bit(IMA_DIGSIG, &iint->atomic_flags); + else + clear_bit(IMA_DIGSIG, &iint->atomic_flags); +} + +int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + const struct evm_ima_xattr_data *xvalue = xattr_value; + int result; + + result = ima_protect_xattr(dentry, xattr_name, xattr_value, + xattr_value_len); + if (result == 1) { + if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST)) + return -EINVAL; + ima_reset_appraise_flags(d_backing_inode(dentry), + xvalue->type == EVM_IMA_XATTR_DIGSIG); + result = 0; + } + return result; +} + +int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name) +{ + int result; + + result = ima_protect_xattr(dentry, xattr_name, NULL, 0); + if (result == 1) { + ima_reset_appraise_flags(d_backing_inode(dentry), 0); + result = 0; + } + return result; +} diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c new file mode 100644 index 000000000..f0b244989 --- /dev/null +++ b/security/integrity/ima/ima_crypto.c @@ -0,0 +1,698 @@ +/* + * Copyright (C) 2005,2006,2007,2008 IBM Corporation + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * File: ima_crypto.c + * Calculates md5/sha1 file hash, template hash, boot-aggreate hash + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/ratelimit.h> +#include <linux/file.h> +#include <linux/crypto.h> +#include <linux/scatterlist.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <crypto/hash.h> + +#include "ima.h" + +/* minimum file size for ahash use */ +static unsigned long ima_ahash_minsize; +module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644); +MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use"); + +/* default is 0 - 1 page. */ +static int ima_maxorder; +static unsigned int ima_bufsize = PAGE_SIZE; + +static int param_set_bufsize(const char *val, const struct kernel_param *kp) +{ + unsigned long long size; + int order; + + size = memparse(val, NULL); + order = get_order(size); + if (order >= MAX_ORDER) + return -EINVAL; + ima_maxorder = order; + ima_bufsize = PAGE_SIZE << order; + return 0; +} + +static const struct kernel_param_ops param_ops_bufsize = { + .set = param_set_bufsize, + .get = param_get_uint, +}; +#define param_check_bufsize(name, p) __param_check(name, p, unsigned int) + +module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644); +MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size"); + +static struct crypto_shash *ima_shash_tfm; +static struct crypto_ahash *ima_ahash_tfm; + +int __init ima_init_crypto(void) +{ + long rc; + + ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); + if (IS_ERR(ima_shash_tfm)) { + rc = PTR_ERR(ima_shash_tfm); + pr_err("Can not allocate %s (reason: %ld)\n", + hash_algo_name[ima_hash_algo], rc); + return rc; + } + pr_info("Allocated hash algorithm: %s\n", + hash_algo_name[ima_hash_algo]); + return 0; +} + +static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) +{ + struct crypto_shash *tfm = ima_shash_tfm; + int rc; + + if (algo < 0 || algo >= HASH_ALGO__LAST) + algo = ima_hash_algo; + + if (algo != ima_hash_algo) { + tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); + if (IS_ERR(tfm)) { + rc = PTR_ERR(tfm); + pr_err("Can not allocate %s (reason: %d)\n", + hash_algo_name[algo], rc); + } + } + return tfm; +} + +static void ima_free_tfm(struct crypto_shash *tfm) +{ + if (tfm != ima_shash_tfm) + crypto_free_shash(tfm); +} + +/** + * ima_alloc_pages() - Allocate contiguous pages. + * @max_size: Maximum amount of memory to allocate. + * @allocated_size: Returned size of actual allocation. + * @last_warn: Should the min_size allocation warn or not. + * + * Tries to do opportunistic allocation for memory first trying to allocate + * max_size amount of memory and then splitting that until zero order is + * reached. Allocation is tried without generating allocation warnings unless + * last_warn is set. Last_warn set affects only last allocation of zero order. + * + * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL) + * + * Return pointer to allocated memory, or NULL on failure. + */ +static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size, + int last_warn) +{ + void *ptr; + int order = ima_maxorder; + gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; + + if (order) + order = min(get_order(max_size), order); + + for (; order; order--) { + ptr = (void *)__get_free_pages(gfp_mask, order); + if (ptr) { + *allocated_size = PAGE_SIZE << order; + return ptr; + } + } + + /* order is zero - one page */ + + gfp_mask = GFP_KERNEL; + + if (!last_warn) + gfp_mask |= __GFP_NOWARN; + + ptr = (void *)__get_free_pages(gfp_mask, 0); + if (ptr) { + *allocated_size = PAGE_SIZE; + return ptr; + } + + *allocated_size = 0; + return NULL; +} + +/** + * ima_free_pages() - Free pages allocated by ima_alloc_pages(). + * @ptr: Pointer to allocated pages. + * @size: Size of allocated buffer. + */ +static void ima_free_pages(void *ptr, size_t size) +{ + if (!ptr) + return; + free_pages((unsigned long)ptr, get_order(size)); +} + +static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo) +{ + struct crypto_ahash *tfm = ima_ahash_tfm; + int rc; + + if (algo < 0 || algo >= HASH_ALGO__LAST) + algo = ima_hash_algo; + + if (algo != ima_hash_algo || !tfm) { + tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0); + if (!IS_ERR(tfm)) { + if (algo == ima_hash_algo) + ima_ahash_tfm = tfm; + } else { + rc = PTR_ERR(tfm); + pr_err("Can not allocate %s (reason: %d)\n", + hash_algo_name[algo], rc); + } + } + return tfm; +} + +static void ima_free_atfm(struct crypto_ahash *tfm) +{ + if (tfm != ima_ahash_tfm) + crypto_free_ahash(tfm); +} + +static inline int ahash_wait(int err, struct crypto_wait *wait) +{ + + err = crypto_wait_req(err, wait); + + if (err) + pr_crit_ratelimited("ahash calculation failed: err: %d\n", err); + + return err; +} + +static int ima_calc_file_hash_atfm(struct file *file, + struct ima_digest_data *hash, + struct crypto_ahash *tfm) +{ + loff_t i_size, offset; + char *rbuf[2] = { NULL, }; + int rc, rbuf_len, active = 0, ahash_rc = 0; + struct ahash_request *req; + struct scatterlist sg[1]; + struct crypto_wait wait; + size_t rbuf_size[2]; + + hash->length = crypto_ahash_digestsize(tfm); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + crypto_init_wait(&wait); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + + rc = ahash_wait(crypto_ahash_init(req), &wait); + if (rc) + goto out1; + + i_size = i_size_read(file_inode(file)); + + if (i_size == 0) + goto out2; + + /* + * Try to allocate maximum size of memory. + * Fail if even a single page cannot be allocated. + */ + rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1); + if (!rbuf[0]) { + rc = -ENOMEM; + goto out1; + } + + /* Only allocate one buffer if that is enough. */ + if (i_size > rbuf_size[0]) { + /* + * Try to allocate secondary buffer. If that fails fallback to + * using single buffering. Use previous memory allocation size + * as baseline for possible allocation size. + */ + rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0], + &rbuf_size[1], 0); + } + + for (offset = 0; offset < i_size; offset += rbuf_len) { + if (!rbuf[1] && offset) { + /* Not using two buffers, and it is not the first + * read/request, wait for the completion of the + * previous ahash_update() request. + */ + rc = ahash_wait(ahash_rc, &wait); + if (rc) + goto out3; + } + /* read buffer */ + rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]); + rc = integrity_kernel_read(file, offset, rbuf[active], + rbuf_len); + if (rc != rbuf_len) { + if (rc >= 0) + rc = -EINVAL; + /* + * Forward current rc, do not overwrite with return value + * from ahash_wait() + */ + ahash_wait(ahash_rc, &wait); + goto out3; + } + + if (rbuf[1] && offset) { + /* Using two buffers, and it is not the first + * read/request, wait for the completion of the + * previous ahash_update() request. + */ + rc = ahash_wait(ahash_rc, &wait); + if (rc) + goto out3; + } + + sg_init_one(&sg[0], rbuf[active], rbuf_len); + ahash_request_set_crypt(req, sg, NULL, rbuf_len); + + ahash_rc = crypto_ahash_update(req); + + if (rbuf[1]) + active = !active; /* swap buffers, if we use two */ + } + /* wait for the last update request to complete */ + rc = ahash_wait(ahash_rc, &wait); +out3: + ima_free_pages(rbuf[0], rbuf_size[0]); + ima_free_pages(rbuf[1], rbuf_size[1]); +out2: + if (!rc) { + ahash_request_set_crypt(req, NULL, hash->digest, 0); + rc = ahash_wait(crypto_ahash_final(req), &wait); + } +out1: + ahash_request_free(req); + return rc; +} + +static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash) +{ + struct crypto_ahash *tfm; + int rc; + + tfm = ima_alloc_atfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_file_hash_atfm(file, hash, tfm); + + ima_free_atfm(tfm); + + return rc; +} + +static int ima_calc_file_hash_tfm(struct file *file, + struct ima_digest_data *hash, + struct crypto_shash *tfm) +{ + loff_t i_size, offset = 0; + char *rbuf; + int rc; + SHASH_DESC_ON_STACK(shash, tfm); + + shash->tfm = tfm; + shash->flags = 0; + + hash->length = crypto_shash_digestsize(tfm); + + rc = crypto_shash_init(shash); + if (rc != 0) + return rc; + + i_size = i_size_read(file_inode(file)); + + if (i_size == 0) + goto out; + + rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!rbuf) + return -ENOMEM; + + while (offset < i_size) { + int rbuf_len; + + rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE); + if (rbuf_len < 0) { + rc = rbuf_len; + break; + } + if (rbuf_len == 0) + break; + offset += rbuf_len; + + rc = crypto_shash_update(shash, rbuf, rbuf_len); + if (rc) + break; + } + kfree(rbuf); +out: + if (!rc) + rc = crypto_shash_final(shash, hash->digest); + return rc; +} + +static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_file_hash_tfm(file, hash, tfm); + + ima_free_tfm(tfm); + + return rc; +} + +/* + * ima_calc_file_hash - calculate file hash + * + * Asynchronous hash (ahash) allows using HW acceleration for calculating + * a hash. ahash performance varies for different data sizes on different + * crypto accelerators. shash performance might be better for smaller files. + * The 'ima.ahash_minsize' module parameter allows specifying the best + * minimum file size for using ahash on the system. + * + * If the ima.ahash_minsize parameter is not specified, this function uses + * shash for the hash calculation. If ahash fails, it falls back to using + * shash. + */ +int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) +{ + loff_t i_size; + int rc; + struct file *f = file; + bool new_file_instance = false; + + /* + * For consistency, fail file's opened with the O_DIRECT flag on + * filesystems mounted with/without DAX option. + */ + if (file->f_flags & O_DIRECT) { + hash->length = hash_digest_size[ima_hash_algo]; + hash->algo = ima_hash_algo; + return -EINVAL; + } + + /* Open a new file instance in O_RDONLY if we cannot read */ + if (!(file->f_mode & FMODE_READ)) { + int flags = file->f_flags & ~(O_WRONLY | O_APPEND | + O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL); + flags |= O_RDONLY; + f = dentry_open(&file->f_path, flags, file->f_cred); + if (IS_ERR(f)) + return PTR_ERR(f); + + new_file_instance = true; + } + + i_size = i_size_read(file_inode(f)); + + if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { + rc = ima_calc_file_ahash(f, hash); + if (!rc) + goto out; + } + + rc = ima_calc_file_shash(f, hash); +out: + if (new_file_instance) + fput(f); + return rc; +} + +/* + * Calculate the hash of template data + */ +static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, + struct ima_template_desc *td, + int num_fields, + struct ima_digest_data *hash, + struct crypto_shash *tfm) +{ + SHASH_DESC_ON_STACK(shash, tfm); + int rc, i; + + shash->tfm = tfm; + shash->flags = 0; + + hash->length = crypto_shash_digestsize(tfm); + + rc = crypto_shash_init(shash); + if (rc != 0) + return rc; + + for (i = 0; i < num_fields; i++) { + u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; + u8 *data_to_hash = field_data[i].data; + u32 datalen = field_data[i].len; + u32 datalen_to_hash = + !ima_canonical_fmt ? datalen : cpu_to_le32(datalen); + + if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { + rc = crypto_shash_update(shash, + (const u8 *) &datalen_to_hash, + sizeof(datalen_to_hash)); + if (rc) + break; + } else if (strcmp(td->fields[i]->field_id, "n") == 0) { + memcpy(buffer, data_to_hash, datalen); + data_to_hash = buffer; + datalen = IMA_EVENT_NAME_LEN_MAX + 1; + } + rc = crypto_shash_update(shash, data_to_hash, datalen); + if (rc) + break; + } + + if (!rc) + rc = crypto_shash_final(shash, hash->digest); + + return rc; +} + +int ima_calc_field_array_hash(struct ima_field_data *field_data, + struct ima_template_desc *desc, int num_fields, + struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields, + hash, tfm); + + ima_free_tfm(tfm); + + return rc; +} + +static int calc_buffer_ahash_atfm(const void *buf, loff_t len, + struct ima_digest_data *hash, + struct crypto_ahash *tfm) +{ + struct ahash_request *req; + struct scatterlist sg; + struct crypto_wait wait; + int rc, ahash_rc = 0; + + hash->length = crypto_ahash_digestsize(tfm); + + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) + return -ENOMEM; + + crypto_init_wait(&wait); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | + CRYPTO_TFM_REQ_MAY_SLEEP, + crypto_req_done, &wait); + + rc = ahash_wait(crypto_ahash_init(req), &wait); + if (rc) + goto out; + + sg_init_one(&sg, buf, len); + ahash_request_set_crypt(req, &sg, NULL, len); + + ahash_rc = crypto_ahash_update(req); + + /* wait for the update request to complete */ + rc = ahash_wait(ahash_rc, &wait); + if (!rc) { + ahash_request_set_crypt(req, NULL, hash->digest, 0); + rc = ahash_wait(crypto_ahash_final(req), &wait); + } +out: + ahash_request_free(req); + return rc; +} + +static int calc_buffer_ahash(const void *buf, loff_t len, + struct ima_digest_data *hash) +{ + struct crypto_ahash *tfm; + int rc; + + tfm = ima_alloc_atfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = calc_buffer_ahash_atfm(buf, len, hash, tfm); + + ima_free_atfm(tfm); + + return rc; +} + +static int calc_buffer_shash_tfm(const void *buf, loff_t size, + struct ima_digest_data *hash, + struct crypto_shash *tfm) +{ + SHASH_DESC_ON_STACK(shash, tfm); + unsigned int len; + int rc; + + shash->tfm = tfm; + shash->flags = 0; + + hash->length = crypto_shash_digestsize(tfm); + + rc = crypto_shash_init(shash); + if (rc != 0) + return rc; + + while (size) { + len = size < PAGE_SIZE ? size : PAGE_SIZE; + rc = crypto_shash_update(shash, buf, len); + if (rc) + break; + buf += len; + size -= len; + } + + if (!rc) + rc = crypto_shash_final(shash, hash->digest); + return rc; +} + +static int calc_buffer_shash(const void *buf, loff_t len, + struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = calc_buffer_shash_tfm(buf, len, hash, tfm); + + ima_free_tfm(tfm); + return rc; +} + +int ima_calc_buffer_hash(const void *buf, loff_t len, + struct ima_digest_data *hash) +{ + int rc; + + if (ima_ahash_minsize && len >= ima_ahash_minsize) { + rc = calc_buffer_ahash(buf, len, hash); + if (!rc) + return 0; + } + + return calc_buffer_shash(buf, len, hash); +} + +static void ima_pcrread(int idx, u8 *pcr) +{ + if (!ima_tpm_chip) + return; + + if (tpm_pcr_read(ima_tpm_chip, idx, pcr) != 0) + pr_err("Error Communicating to TPM chip\n"); +} + +/* + * Calculate the boot aggregate hash + */ +static int ima_calc_boot_aggregate_tfm(char *digest, + struct crypto_shash *tfm) +{ + u8 pcr_i[TPM_DIGEST_SIZE]; + int rc, i; + SHASH_DESC_ON_STACK(shash, tfm); + + shash->tfm = tfm; + shash->flags = 0; + + rc = crypto_shash_init(shash); + if (rc != 0) + return rc; + + /* cumulative sha1 over tpm registers 0-7 */ + for (i = TPM_PCR0; i < TPM_PCR8; i++) { + ima_pcrread(i, pcr_i); + /* now accumulate with current aggregate */ + rc = crypto_shash_update(shash, pcr_i, TPM_DIGEST_SIZE); + if (rc != 0) + return rc; + } + if (!rc) + crypto_shash_final(shash, digest); + return rc; +} + +int ima_calc_boot_aggregate(struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + hash->length = crypto_shash_digestsize(tfm); + rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); + + ima_free_tfm(tfm); + + return rc; +} diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c new file mode 100644 index 000000000..38bd565b9 --- /dev/null +++ b/security/integrity/ima/ima_fs.c @@ -0,0 +1,508 @@ +/* + * Copyright (C) 2005,2006,2007,2008 IBM Corporation + * + * Authors: + * Kylene Hall <kjhall@us.ibm.com> + * Reiner Sailer <sailer@us.ibm.com> + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_fs.c + * implemenents security file system for reporting + * current measurement list and IMA statistics + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/fcntl.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/seq_file.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/parser.h> +#include <linux/vmalloc.h> + +#include "ima.h" + +static DEFINE_MUTEX(ima_write_mutex); + +bool ima_canonical_fmt; +static int __init default_canonical_fmt_setup(char *str) +{ +#ifdef __BIG_ENDIAN + ima_canonical_fmt = true; +#endif + return 1; +} +__setup("ima_canonical_fmt", default_canonical_fmt_setup); + +static int valid_policy = 1; + +static ssize_t ima_show_htable_value(char __user *buf, size_t count, + loff_t *ppos, atomic_long_t *val) +{ + char tmpbuf[32]; /* greater than largest 'long' string value */ + ssize_t len; + + len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val)); + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); +} + +static ssize_t ima_show_htable_violations(struct file *filp, + char __user *buf, + size_t count, loff_t *ppos) +{ + return ima_show_htable_value(buf, count, ppos, &ima_htable.violations); +} + +static const struct file_operations ima_htable_violations_ops = { + .read = ima_show_htable_violations, + .llseek = generic_file_llseek, +}; + +static ssize_t ima_show_measurements_count(struct file *filp, + char __user *buf, + size_t count, loff_t *ppos) +{ + return ima_show_htable_value(buf, count, ppos, &ima_htable.len); + +} + +static const struct file_operations ima_measurements_count_ops = { + .read = ima_show_measurements_count, + .llseek = generic_file_llseek, +}; + +/* returns pointer to hlist_node */ +static void *ima_measurements_start(struct seq_file *m, loff_t *pos) +{ + loff_t l = *pos; + struct ima_queue_entry *qe; + + /* we need a lock since pos could point beyond last element */ + rcu_read_lock(); + list_for_each_entry_rcu(qe, &ima_measurements, later) { + if (!l--) { + rcu_read_unlock(); + return qe; + } + } + rcu_read_unlock(); + return NULL; +} + +static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct ima_queue_entry *qe = v; + + /* lock protects when reading beyond last element + * against concurrent list-extension + */ + rcu_read_lock(); + qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later); + rcu_read_unlock(); + (*pos)++; + + return (&qe->later == &ima_measurements) ? NULL : qe; +} + +static void ima_measurements_stop(struct seq_file *m, void *v) +{ +} + +void ima_putc(struct seq_file *m, void *data, int datalen) +{ + while (datalen--) + seq_putc(m, *(char *)data++); +} + +/* print format: + * 32bit-le=pcr# + * char[20]=template digest + * 32bit-le=template name size + * char[n]=template name + * [eventdata length] + * eventdata[n]=template specific data + */ +int ima_measurements_show(struct seq_file *m, void *v) +{ + /* the list never shrinks, so we don't need a lock here */ + struct ima_queue_entry *qe = v; + struct ima_template_entry *e; + char *template_name; + u32 pcr, namelen, template_data_len; /* temporary fields */ + bool is_ima_template = false; + int i; + + /* get entry */ + e = qe->entry; + if (e == NULL) + return -1; + + template_name = (e->template_desc->name[0] != '\0') ? + e->template_desc->name : e->template_desc->fmt; + + /* + * 1st: PCRIndex + * PCR used defaults to the same (config option) in + * little-endian format, unless set in policy + */ + pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr); + ima_putc(m, &pcr, sizeof(e->pcr)); + + /* 2nd: template digest */ + ima_putc(m, e->digest, TPM_DIGEST_SIZE); + + /* 3rd: template name size */ + namelen = !ima_canonical_fmt ? strlen(template_name) : + cpu_to_le32(strlen(template_name)); + ima_putc(m, &namelen, sizeof(namelen)); + + /* 4th: template name */ + ima_putc(m, template_name, strlen(template_name)); + + /* 5th: template length (except for 'ima' template) */ + if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0) + is_ima_template = true; + + if (!is_ima_template) { + template_data_len = !ima_canonical_fmt ? e->template_data_len : + cpu_to_le32(e->template_data_len); + ima_putc(m, &template_data_len, sizeof(e->template_data_len)); + } + + /* 6th: template specific data */ + for (i = 0; i < e->template_desc->num_fields; i++) { + enum ima_show_type show = IMA_SHOW_BINARY; + struct ima_template_field *field = e->template_desc->fields[i]; + + if (is_ima_template && strcmp(field->field_id, "d") == 0) + show = IMA_SHOW_BINARY_NO_FIELD_LEN; + if (is_ima_template && strcmp(field->field_id, "n") == 0) + show = IMA_SHOW_BINARY_OLD_STRING_FMT; + field->field_show(m, show, &e->template_data[i]); + } + return 0; +} + +static const struct seq_operations ima_measurments_seqops = { + .start = ima_measurements_start, + .next = ima_measurements_next, + .stop = ima_measurements_stop, + .show = ima_measurements_show +}; + +static int ima_measurements_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &ima_measurments_seqops); +} + +static const struct file_operations ima_measurements_ops = { + .open = ima_measurements_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +void ima_print_digest(struct seq_file *m, u8 *digest, u32 size) +{ + u32 i; + + for (i = 0; i < size; i++) + seq_printf(m, "%02x", *(digest + i)); +} + +/* print in ascii */ +static int ima_ascii_measurements_show(struct seq_file *m, void *v) +{ + /* the list never shrinks, so we don't need a lock here */ + struct ima_queue_entry *qe = v; + struct ima_template_entry *e; + char *template_name; + int i; + + /* get entry */ + e = qe->entry; + if (e == NULL) + return -1; + + template_name = (e->template_desc->name[0] != '\0') ? + e->template_desc->name : e->template_desc->fmt; + + /* 1st: PCR used (config option) */ + seq_printf(m, "%2d ", e->pcr); + + /* 2nd: SHA1 template hash */ + ima_print_digest(m, e->digest, TPM_DIGEST_SIZE); + + /* 3th: template name */ + seq_printf(m, " %s", template_name); + + /* 4th: template specific data */ + for (i = 0; i < e->template_desc->num_fields; i++) { + seq_puts(m, " "); + if (e->template_data[i].len == 0) + continue; + + e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII, + &e->template_data[i]); + } + seq_puts(m, "\n"); + return 0; +} + +static const struct seq_operations ima_ascii_measurements_seqops = { + .start = ima_measurements_start, + .next = ima_measurements_next, + .stop = ima_measurements_stop, + .show = ima_ascii_measurements_show +}; + +static int ima_ascii_measurements_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &ima_ascii_measurements_seqops); +} + +static const struct file_operations ima_ascii_measurements_ops = { + .open = ima_ascii_measurements_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static ssize_t ima_read_policy(char *path) +{ + void *data; + char *datap; + loff_t size; + int rc, pathlen = strlen(path); + + char *p; + + /* remove \n */ + datap = path; + strsep(&datap, "\n"); + + rc = kernel_read_file_from_path(path, &data, &size, 0, READING_POLICY); + if (rc < 0) { + pr_err("Unable to open file: %s (%d)", path, rc); + return rc; + } + + datap = data; + while (size > 0 && (p = strsep(&datap, "\n"))) { + pr_debug("rule: %s\n", p); + rc = ima_parse_add_rule(p); + if (rc < 0) + break; + size -= rc; + } + + vfree(data); + if (rc < 0) + return rc; + else if (size) + return -EINVAL; + else + return pathlen; +} + +static ssize_t ima_write_policy(struct file *file, const char __user *buf, + size_t datalen, loff_t *ppos) +{ + char *data; + ssize_t result; + + if (datalen >= PAGE_SIZE) + datalen = PAGE_SIZE - 1; + + /* No partial writes. */ + result = -EINVAL; + if (*ppos != 0) + goto out; + + data = memdup_user_nul(buf, datalen); + if (IS_ERR(data)) { + result = PTR_ERR(data); + goto out; + } + + result = mutex_lock_interruptible(&ima_write_mutex); + if (result < 0) + goto out_free; + + if (data[0] == '/') { + result = ima_read_policy(data); + } else if (ima_appraise & IMA_APPRAISE_POLICY) { + pr_err("signed policy file (specified as an absolute pathname) required\n"); + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, + "policy_update", "signed policy required", + 1, 0); + result = -EACCES; + } else { + result = ima_parse_add_rule(data); + } + mutex_unlock(&ima_write_mutex); +out_free: + kfree(data); +out: + if (result < 0) + valid_policy = 0; + + return result; +} + +static struct dentry *ima_dir; +static struct dentry *ima_symlink; +static struct dentry *binary_runtime_measurements; +static struct dentry *ascii_runtime_measurements; +static struct dentry *runtime_measurements_count; +static struct dentry *violations; +static struct dentry *ima_policy; + +enum ima_fs_flags { + IMA_FS_BUSY, +}; + +static unsigned long ima_fs_flags; + +#ifdef CONFIG_IMA_READ_POLICY +static const struct seq_operations ima_policy_seqops = { + .start = ima_policy_start, + .next = ima_policy_next, + .stop = ima_policy_stop, + .show = ima_policy_show, +}; +#endif + +/* + * ima_open_policy: sequentialize access to the policy file + */ +static int ima_open_policy(struct inode *inode, struct file *filp) +{ + if (!(filp->f_flags & O_WRONLY)) { +#ifndef CONFIG_IMA_READ_POLICY + return -EACCES; +#else + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + return -EACCES; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return seq_open(filp, &ima_policy_seqops); +#endif + } + if (test_and_set_bit(IMA_FS_BUSY, &ima_fs_flags)) + return -EBUSY; + return 0; +} + +/* + * ima_release_policy - start using the new measure policy rules. + * + * Initially, ima_measure points to the default policy rules, now + * point to the new policy rules, and remove the securityfs policy file, + * assuming a valid policy. + */ +static int ima_release_policy(struct inode *inode, struct file *file) +{ + const char *cause = valid_policy ? "completed" : "failed"; + + if ((file->f_flags & O_ACCMODE) == O_RDONLY) + return seq_release(inode, file); + + if (valid_policy && ima_check_policy() < 0) { + cause = "failed"; + valid_policy = 0; + } + + pr_info("policy update %s\n", cause); + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, + "policy_update", cause, !valid_policy, 0); + + if (!valid_policy) { + ima_delete_rules(); + valid_policy = 1; + clear_bit(IMA_FS_BUSY, &ima_fs_flags); + return 0; + } + + ima_update_policy(); +#if !defined(CONFIG_IMA_WRITE_POLICY) && !defined(CONFIG_IMA_READ_POLICY) + securityfs_remove(ima_policy); + ima_policy = NULL; +#elif defined(CONFIG_IMA_WRITE_POLICY) + clear_bit(IMA_FS_BUSY, &ima_fs_flags); +#elif defined(CONFIG_IMA_READ_POLICY) + inode->i_mode &= ~S_IWUSR; +#endif + return 0; +} + +static const struct file_operations ima_measure_policy_ops = { + .open = ima_open_policy, + .write = ima_write_policy, + .read = seq_read, + .release = ima_release_policy, + .llseek = generic_file_llseek, +}; + +int __init ima_fs_init(void) +{ + ima_dir = securityfs_create_dir("ima", integrity_dir); + if (IS_ERR(ima_dir)) + return -1; + + ima_symlink = securityfs_create_symlink("ima", NULL, "integrity/ima", + NULL); + if (IS_ERR(ima_symlink)) + goto out; + + binary_runtime_measurements = + securityfs_create_file("binary_runtime_measurements", + S_IRUSR | S_IRGRP, ima_dir, NULL, + &ima_measurements_ops); + if (IS_ERR(binary_runtime_measurements)) + goto out; + + ascii_runtime_measurements = + securityfs_create_file("ascii_runtime_measurements", + S_IRUSR | S_IRGRP, ima_dir, NULL, + &ima_ascii_measurements_ops); + if (IS_ERR(ascii_runtime_measurements)) + goto out; + + runtime_measurements_count = + securityfs_create_file("runtime_measurements_count", + S_IRUSR | S_IRGRP, ima_dir, NULL, + &ima_measurements_count_ops); + if (IS_ERR(runtime_measurements_count)) + goto out; + + violations = + securityfs_create_file("violations", S_IRUSR | S_IRGRP, + ima_dir, NULL, &ima_htable_violations_ops); + if (IS_ERR(violations)) + goto out; + + ima_policy = securityfs_create_file("policy", POLICY_FILE_FLAGS, + ima_dir, NULL, + &ima_measure_policy_ops); + if (IS_ERR(ima_policy)) + goto out; + + return 0; +out: + securityfs_remove(ima_policy); + securityfs_remove(violations); + securityfs_remove(runtime_measurements_count); + securityfs_remove(ascii_runtime_measurements); + securityfs_remove(binary_runtime_measurements); + securityfs_remove(ima_symlink); + securityfs_remove(ima_dir); + return -1; +} diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c new file mode 100644 index 000000000..a2bc4cb44 --- /dev/null +++ b/security/integrity/ima/ima_init.c @@ -0,0 +1,135 @@ +/* + * Copyright (C) 2005,2006,2007,2008 IBM Corporation + * + * Authors: + * Reiner Sailer <sailer@watson.ibm.com> + * Leendert van Doorn <leendert@watson.ibm.com> + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_init.c + * initialization and cleanup functions + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/err.h> + +#include "ima.h" + +/* name for boot aggregate entry */ +const char boot_aggregate_name[] = "boot_aggregate"; +struct tpm_chip *ima_tpm_chip; + +/* Add the boot aggregate to the IMA measurement list and extend + * the PCR register. + * + * Calculate the boot aggregate, a SHA1 over tpm registers 0-7, + * assuming a TPM chip exists, and zeroes if the TPM chip does not + * exist. Add the boot aggregate measurement to the measurement + * list and extend the PCR register. + * + * If a tpm chip does not exist, indicate the core root of trust is + * not hardware based by invalidating the aggregate PCR value. + * (The aggregate PCR value is invalidated by adding one value to + * the measurement list and extending the aggregate PCR value with + * a different value.) Violations add a zero entry to the measurement + * list and extend the aggregate PCR value with ff...ff's. + */ +static int __init ima_add_boot_aggregate(void) +{ + static const char op[] = "add_boot_aggregate"; + const char *audit_cause = "ENOMEM"; + struct ima_template_entry *entry; + struct integrity_iint_cache tmp_iint, *iint = &tmp_iint; + struct ima_event_data event_data = {iint, NULL, boot_aggregate_name, + NULL, 0, NULL}; + int result = -ENOMEM; + int violation = 0; + struct { + struct ima_digest_data hdr; + char digest[TPM_DIGEST_SIZE]; + } hash; + + memset(iint, 0, sizeof(*iint)); + memset(&hash, 0, sizeof(hash)); + iint->ima_hash = &hash.hdr; + iint->ima_hash->algo = HASH_ALGO_SHA1; + iint->ima_hash->length = SHA1_DIGEST_SIZE; + + if (ima_tpm_chip) { + result = ima_calc_boot_aggregate(&hash.hdr); + if (result < 0) { + audit_cause = "hashing_error"; + goto err_out; + } + } + + result = ima_alloc_init_template(&event_data, &entry); + if (result < 0) { + audit_cause = "alloc_entry"; + goto err_out; + } + + result = ima_store_template(entry, violation, NULL, + boot_aggregate_name, + CONFIG_IMA_MEASURE_PCR_IDX); + if (result < 0) { + ima_free_template_entry(entry); + audit_cause = "store_entry"; + goto err_out; + } + return 0; +err_out: + integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op, + audit_cause, result, 0); + return result; +} + +#ifdef CONFIG_IMA_LOAD_X509 +void __init ima_load_x509(void) +{ + int unset_flags = ima_policy_flag & IMA_APPRAISE; + + ima_policy_flag &= ~unset_flags; + integrity_load_x509(INTEGRITY_KEYRING_IMA, CONFIG_IMA_X509_PATH); + ima_policy_flag |= unset_flags; +} +#endif + +int __init ima_init(void) +{ + int rc; + + ima_tpm_chip = tpm_default_chip(); + if (!ima_tpm_chip) + pr_info("No TPM chip found, activating TPM-bypass!\n"); + + rc = integrity_init_keyring(INTEGRITY_KEYRING_IMA); + if (rc) + return rc; + + rc = ima_init_crypto(); + if (rc) + return rc; + rc = ima_init_template(); + if (rc != 0) + return rc; + + ima_load_kexec_buffer(); + + rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */ + if (rc != 0) + return rc; + + ima_init_policy(); + + return ima_fs_init(); +} diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c new file mode 100644 index 000000000..6a10d4d8b --- /dev/null +++ b/security/integrity/ima/ima_kexec.c @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2016 IBM Corporation + * + * Authors: + * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com> + * Mimi Zohar <zohar@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/seq_file.h> +#include <linux/vmalloc.h> +#include <linux/kexec.h> +#include "ima.h" + +#ifdef CONFIG_IMA_KEXEC +static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer, + unsigned long segment_size) +{ + struct ima_queue_entry *qe; + struct seq_file file; + struct ima_kexec_hdr khdr; + int ret = 0; + + /* segment size can't change between kexec load and execute */ + file.buf = vmalloc(segment_size); + if (!file.buf) { + ret = -ENOMEM; + goto out; + } + + file.size = segment_size; + file.read_pos = 0; + file.count = sizeof(khdr); /* reserved space */ + + memset(&khdr, 0, sizeof(khdr)); + khdr.version = 1; + list_for_each_entry_rcu(qe, &ima_measurements, later) { + if (file.count < file.size) { + khdr.count++; + ima_measurements_show(&file, qe); + } else { + ret = -EINVAL; + break; + } + } + + if (ret < 0) + goto out; + + /* + * fill in reserved space with some buffer details + * (eg. version, buffer size, number of measurements) + */ + khdr.buffer_size = file.count; + if (ima_canonical_fmt) { + khdr.version = cpu_to_le16(khdr.version); + khdr.count = cpu_to_le64(khdr.count); + khdr.buffer_size = cpu_to_le64(khdr.buffer_size); + } + memcpy(file.buf, &khdr, sizeof(khdr)); + + print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE, + 16, 1, file.buf, + file.count < 100 ? file.count : 100, true); + + *buffer_size = file.count; + *buffer = file.buf; +out: + if (ret == -EINVAL) + vfree(file.buf); + return ret; +} + +/* + * Called during kexec_file_load so that IMA can add a segment to the kexec + * image for the measurement list for the next kernel. + * + * This function assumes that kexec_mutex is held. + */ +void ima_add_kexec_buffer(struct kimage *image) +{ + struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE, + .buf_min = 0, .buf_max = ULONG_MAX, + .top_down = true }; + unsigned long binary_runtime_size; + + /* use more understandable variable names than defined in kbuf */ + void *kexec_buffer = NULL; + size_t kexec_buffer_size; + size_t kexec_segment_size; + int ret; + + /* + * Reserve an extra half page of memory for additional measurements + * added during the kexec load. + */ + binary_runtime_size = ima_get_binary_runtime_size(); + if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE) + kexec_segment_size = ULONG_MAX; + else + kexec_segment_size = ALIGN(ima_get_binary_runtime_size() + + PAGE_SIZE / 2, PAGE_SIZE); + if ((kexec_segment_size == ULONG_MAX) || + ((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) { + pr_err("Binary measurement list too large.\n"); + return; + } + + ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer, + kexec_segment_size); + if (!kexec_buffer) { + pr_err("Not enough memory for the kexec measurement buffer.\n"); + return; + } + + kbuf.buffer = kexec_buffer; + kbuf.bufsz = kexec_buffer_size; + kbuf.memsz = kexec_segment_size; + ret = kexec_add_buffer(&kbuf); + if (ret) { + pr_err("Error passing over kexec measurement buffer.\n"); + vfree(kexec_buffer); + return; + } + + ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size); + if (ret) { + pr_err("Error passing over kexec measurement buffer.\n"); + return; + } + + image->ima_buffer = kexec_buffer; + + pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n", + kbuf.mem); +} +#endif /* IMA_KEXEC */ + +/* + * Restore the measurement list from the previous kernel. + */ +void ima_load_kexec_buffer(void) +{ + void *kexec_buffer = NULL; + size_t kexec_buffer_size = 0; + int rc; + + rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size); + switch (rc) { + case 0: + rc = ima_restore_measurement_list(kexec_buffer_size, + kexec_buffer); + if (rc != 0) + pr_err("Failed to restore the measurement list: %d\n", + rc); + + ima_free_kexec_buffer(); + break; + case -ENOTSUPP: + pr_debug("Restoring the measurement list not supported\n"); + break; + case -ENOENT: + pr_debug("No measurement list to restore\n"); + break; + default: + pr_debug("Error restoring the measurement list: %d\n", rc); + } +} diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c new file mode 100644 index 000000000..2d31921fb --- /dev/null +++ b/security/integrity/ima/ima_main.c @@ -0,0 +1,565 @@ +/* + * Copyright (C) 2005,2006,2007,2008 IBM Corporation + * + * Authors: + * Reiner Sailer <sailer@watson.ibm.com> + * Serge Hallyn <serue@us.ibm.com> + * Kylene Hall <kylene@us.ibm.com> + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_main.c + * implements the IMA hooks: ima_bprm_check, ima_file_mmap, + * and ima_file_check. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/file.h> +#include <linux/binfmts.h> +#include <linux/mount.h> +#include <linux/mman.h> +#include <linux/slab.h> +#include <linux/xattr.h> +#include <linux/ima.h> +#include <linux/iversion.h> +#include <linux/fs.h> + +#include "ima.h" + +#ifdef CONFIG_IMA_APPRAISE +int ima_appraise = IMA_APPRAISE_ENFORCE; +#else +int ima_appraise; +#endif + +int ima_hash_algo = HASH_ALGO_SHA1; +static int hash_setup_done; + +static int __init hash_setup(char *str) +{ + struct ima_template_desc *template_desc = ima_template_desc_current(); + int i; + + if (hash_setup_done) + return 1; + + if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { + if (strncmp(str, "sha1", 4) == 0) + ima_hash_algo = HASH_ALGO_SHA1; + else if (strncmp(str, "md5", 3) == 0) + ima_hash_algo = HASH_ALGO_MD5; + else + return 1; + goto out; + } + + i = match_string(hash_algo_name, HASH_ALGO__LAST, str); + if (i < 0) + return 1; + + ima_hash_algo = i; +out: + hash_setup_done = 1; + return 1; +} +__setup("ima_hash=", hash_setup); + +/* + * ima_rdwr_violation_check + * + * Only invalidate the PCR for measured files: + * - Opening a file for write when already open for read, + * results in a time of measure, time of use (ToMToU) error. + * - Opening a file for read when already open for write, + * could result in a file measurement error. + * + */ +static void ima_rdwr_violation_check(struct file *file, + struct integrity_iint_cache *iint, + int must_measure, + char **pathbuf, + const char **pathname, + char *filename) +{ + struct inode *inode = file_inode(file); + fmode_t mode = file->f_mode; + bool send_tomtou = false, send_writers = false; + + if (mode & FMODE_WRITE) { + if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { + if (!iint) + iint = integrity_iint_find(inode); + /* IMA_MEASURE is set from reader side */ + if (iint && test_bit(IMA_MUST_MEASURE, + &iint->atomic_flags)) + send_tomtou = true; + } + } else { + if (must_measure) + set_bit(IMA_MUST_MEASURE, &iint->atomic_flags); + if ((atomic_read(&inode->i_writecount) > 0) && must_measure) + send_writers = true; + } + + if (!send_tomtou && !send_writers) + return; + + *pathname = ima_d_path(&file->f_path, pathbuf, filename); + + if (send_tomtou) + ima_add_violation(file, *pathname, iint, + "invalid_pcr", "ToMToU"); + if (send_writers) + ima_add_violation(file, *pathname, iint, + "invalid_pcr", "open_writers"); +} + +static void ima_check_last_writer(struct integrity_iint_cache *iint, + struct inode *inode, struct file *file) +{ + fmode_t mode = file->f_mode; + bool update; + + if (!(mode & FMODE_WRITE)) + return; + + mutex_lock(&iint->mutex); + if (atomic_read(&inode->i_writecount) == 1) { + update = test_and_clear_bit(IMA_UPDATE_XATTR, + &iint->atomic_flags); + if (!IS_I_VERSION(inode) || + !inode_eq_iversion(inode, iint->version) || + (iint->flags & IMA_NEW_FILE)) { + iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); + iint->measured_pcrs = 0; + if (update) + ima_update_xattr(iint, file); + } + } + mutex_unlock(&iint->mutex); +} + +/** + * ima_file_free - called on __fput() + * @file: pointer to file structure being freed + * + * Flag files that changed, based on i_version + */ +void ima_file_free(struct file *file) +{ + struct inode *inode = file_inode(file); + struct integrity_iint_cache *iint; + + if (!ima_policy_flag || !S_ISREG(inode->i_mode)) + return; + + iint = integrity_iint_find(inode); + if (!iint) + return; + + ima_check_last_writer(iint, inode, file); +} + +static int process_measurement(struct file *file, const struct cred *cred, + u32 secid, char *buf, loff_t size, int mask, + enum ima_hooks func) +{ + struct inode *inode = file_inode(file); + struct integrity_iint_cache *iint = NULL; + struct ima_template_desc *template_desc; + char *pathbuf = NULL; + char filename[NAME_MAX]; + const char *pathname = NULL; + int rc = 0, action, must_appraise = 0; + int pcr = CONFIG_IMA_MEASURE_PCR_IDX; + struct evm_ima_xattr_data *xattr_value = NULL; + int xattr_len = 0; + bool violation_check; + enum hash_algo hash_algo; + + if (!ima_policy_flag || !S_ISREG(inode->i_mode)) + return 0; + + /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action + * bitmask based on the appraise/audit/measurement policy. + * Included is the appraise submask. + */ + action = ima_get_action(inode, cred, secid, mask, func, &pcr); + violation_check = ((func == FILE_CHECK || func == MMAP_CHECK) && + (ima_policy_flag & IMA_MEASURE)); + if (!action && !violation_check) + return 0; + + must_appraise = action & IMA_APPRAISE; + + /* Is the appraise rule hook specific? */ + if (action & IMA_FILE_APPRAISE) + func = FILE_CHECK; + + inode_lock(inode); + + if (action) { + iint = integrity_inode_get(inode); + if (!iint) + rc = -ENOMEM; + } + + if (!rc && violation_check) + ima_rdwr_violation_check(file, iint, action & IMA_MEASURE, + &pathbuf, &pathname, filename); + + inode_unlock(inode); + + if (rc) + goto out; + if (!action) + goto out; + + mutex_lock(&iint->mutex); + + if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) + /* reset appraisal flags if ima_inode_post_setattr was called */ + iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | + IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | + IMA_ACTION_FLAGS); + + /* + * Re-evaulate the file if either the xattr has changed or the + * kernel has no way of detecting file change on the filesystem. + * (Limited to privileged mounted filesystems.) + */ + if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags) || + ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) && + !(inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) && + !(action & IMA_FAIL_UNVERIFIABLE_SIGS))) { + iint->flags &= ~IMA_DONE_MASK; + iint->measured_pcrs = 0; + } + + /* Determine if already appraised/measured based on bitmask + * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, + * IMA_AUDIT, IMA_AUDITED) + */ + iint->flags |= action; + action &= IMA_DO_MASK; + action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1); + + /* If target pcr is already measured, unset IMA_MEASURE action */ + if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr))) + action ^= IMA_MEASURE; + + /* HASH sets the digital signature and update flags, nothing else */ + if ((action & IMA_HASH) && + !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) { + xattr_len = ima_read_xattr(file_dentry(file), &xattr_value); + if ((xattr_value && xattr_len > 2) && + (xattr_value->type == EVM_IMA_XATTR_DIGSIG)) + set_bit(IMA_DIGSIG, &iint->atomic_flags); + iint->flags |= IMA_HASHED; + action ^= IMA_HASH; + set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); + } + + /* Nothing to do, just return existing appraised status */ + if (!action) { + if (must_appraise) + rc = ima_get_cache_status(iint, func); + goto out_locked; + } + + template_desc = ima_template_desc_current(); + if ((action & IMA_APPRAISE_SUBMASK) || + strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) + /* read 'security.ima' */ + xattr_len = ima_read_xattr(file_dentry(file), &xattr_value); + + hash_algo = ima_get_hash_algo(xattr_value, xattr_len); + + rc = ima_collect_measurement(iint, file, buf, size, hash_algo); + if (rc != 0 && rc != -EBADF && rc != -EINVAL) + goto out_locked; + + if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */ + pathname = ima_d_path(&file->f_path, &pathbuf, filename); + + if (action & IMA_MEASURE) + ima_store_measurement(iint, file, pathname, + xattr_value, xattr_len, pcr); + if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) { + inode_lock(inode); + rc = ima_appraise_measurement(func, iint, file, pathname, + xattr_value, xattr_len); + inode_unlock(inode); + } + if (action & IMA_AUDIT) + ima_audit_measurement(iint, pathname); + + if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO)) + rc = 0; +out_locked: + if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) && + !(iint->flags & IMA_NEW_FILE)) + rc = -EACCES; + mutex_unlock(&iint->mutex); + kfree(xattr_value); +out: + if (pathbuf) + __putname(pathbuf); + if (must_appraise) { + if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE)) + return -EACCES; + if (file->f_mode & FMODE_WRITE) + set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); + } + return 0; +} + +/** + * ima_file_mmap - based on policy, collect/store measurement. + * @file: pointer to the file to be measured (May be NULL) + * @prot: contains the protection that will be applied by the kernel. + * + * Measure files being mmapped executable based on the ima_must_measure() + * policy decision. + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +int ima_file_mmap(struct file *file, unsigned long prot) +{ + u32 secid; + + if (file && (prot & PROT_EXEC)) { + security_task_getsecid(current, &secid); + return process_measurement(file, current_cred(), secid, NULL, + 0, MAY_EXEC, MMAP_CHECK); + } + + return 0; +} + +/** + * ima_bprm_check - based on policy, collect/store measurement. + * @bprm: contains the linux_binprm structure + * + * The OS protects against an executable file, already open for write, + * from being executed in deny_write_access() and an executable file, + * already open for execute, from being modified in get_write_access(). + * So we can be certain that what we verify and measure here is actually + * what is being executed. + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +int ima_bprm_check(struct linux_binprm *bprm) +{ + int ret; + u32 secid; + + security_task_getsecid(current, &secid); + ret = process_measurement(bprm->file, current_cred(), secid, NULL, 0, + MAY_EXEC, BPRM_CHECK); + if (ret) + return ret; + + security_cred_getsecid(bprm->cred, &secid); + return process_measurement(bprm->file, bprm->cred, secid, NULL, 0, + MAY_EXEC, CREDS_CHECK); +} + +/** + * ima_path_check - based on policy, collect/store measurement. + * @file: pointer to the file to be measured + * @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND + * + * Measure files based on the ima_must_measure() policy decision. + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +int ima_file_check(struct file *file, int mask) +{ + u32 secid; + + security_task_getsecid(current, &secid); + return process_measurement(file, current_cred(), secid, NULL, 0, + mask & (MAY_READ | MAY_WRITE | MAY_EXEC | + MAY_APPEND), FILE_CHECK); +} +EXPORT_SYMBOL_GPL(ima_file_check); + +/** + * ima_post_path_mknod - mark as a new inode + * @dentry: newly created dentry + * + * Mark files created via the mknodat syscall as new, so that the + * file data can be written later. + */ +void ima_post_path_mknod(struct dentry *dentry) +{ + struct integrity_iint_cache *iint; + struct inode *inode = dentry->d_inode; + int must_appraise; + + must_appraise = ima_must_appraise(inode, MAY_ACCESS, FILE_CHECK); + if (!must_appraise) + return; + + iint = integrity_inode_get(inode); + if (iint) + iint->flags |= IMA_NEW_FILE; +} + +/** + * ima_read_file - pre-measure/appraise hook decision based on policy + * @file: pointer to the file to be measured/appraised/audit + * @read_id: caller identifier + * + * Permit reading a file based on policy. The policy rules are written + * in terms of the policy identifier. Appraising the integrity of + * a file requires a file descriptor. + * + * For permission return 0, otherwise return -EACCES. + */ +int ima_read_file(struct file *file, enum kernel_read_file_id read_id) +{ + /* + * READING_FIRMWARE_PREALLOC_BUFFER + * + * Do devices using pre-allocated memory run the risk of the + * firmware being accessible to the device prior to the completion + * of IMA's signature verification any more than when using two + * buffers? + */ + return 0; +} + +static int read_idmap[READING_MAX_ID] = { + [READING_FIRMWARE] = FIRMWARE_CHECK, + [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK, + [READING_MODULE] = MODULE_CHECK, + [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK, + [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK, + [READING_POLICY] = POLICY_CHECK +}; + +/** + * ima_post_read_file - in memory collect/appraise/audit measurement + * @file: pointer to the file to be measured/appraised/audit + * @buf: pointer to in memory file contents + * @size: size of in memory file contents + * @read_id: caller identifier + * + * Measure/appraise/audit in memory file based on policy. Policy rules + * are written in terms of a policy identifier. + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +int ima_post_read_file(struct file *file, void *buf, loff_t size, + enum kernel_read_file_id read_id) +{ + enum ima_hooks func; + u32 secid; + + if (!file && read_id == READING_FIRMWARE) { + if ((ima_appraise & IMA_APPRAISE_FIRMWARE) && + (ima_appraise & IMA_APPRAISE_ENFORCE)) { + pr_err("Prevent firmware loading_store.\n"); + return -EACCES; /* INTEGRITY_UNKNOWN */ + } + return 0; + } + + /* permit signed certs */ + if (!file && read_id == READING_X509_CERTIFICATE) + return 0; + + if (!file || !buf || size == 0) { /* should never happen */ + if (ima_appraise & IMA_APPRAISE_ENFORCE) + return -EACCES; + return 0; + } + + func = read_idmap[read_id] ?: FILE_CHECK; + security_task_getsecid(current, &secid); + return process_measurement(file, current_cred(), secid, buf, size, + MAY_READ, func); +} + +/** + * ima_load_data - appraise decision based on policy + * @id: kernel load data caller identifier + * + * Callers of this LSM hook can not measure, appraise, or audit the + * data provided by userspace. Enforce policy rules requring a file + * signature (eg. kexec'ed kernel image). + * + * For permission return 0, otherwise return -EACCES. + */ +int ima_load_data(enum kernel_load_data_id id) +{ + bool sig_enforce; + + if ((ima_appraise & IMA_APPRAISE_ENFORCE) != IMA_APPRAISE_ENFORCE) + return 0; + + switch (id) { + case LOADING_KEXEC_IMAGE: + if (ima_appraise & IMA_APPRAISE_KEXEC) { + pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n"); + return -EACCES; /* INTEGRITY_UNKNOWN */ + } + break; + case LOADING_FIRMWARE: + if (ima_appraise & IMA_APPRAISE_FIRMWARE) { + pr_err("Prevent firmware sysfs fallback loading.\n"); + return -EACCES; /* INTEGRITY_UNKNOWN */ + } + break; + case LOADING_MODULE: + sig_enforce = is_module_sig_enforced(); + + if (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES)) { + pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n"); + return -EACCES; /* INTEGRITY_UNKNOWN */ + } + default: + break; + } + return 0; +} + +static int __init init_ima(void) +{ + int error; + + ima_init_template_list(); + hash_setup(CONFIG_IMA_DEFAULT_HASH); + error = ima_init(); + + if (error && strcmp(hash_algo_name[ima_hash_algo], + CONFIG_IMA_DEFAULT_HASH) != 0) { + pr_info("Allocating %s failed, going to use default hash algorithm %s\n", + hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH); + hash_setup_done = 0; + hash_setup(CONFIG_IMA_DEFAULT_HASH); + error = ima_init(); + } + + if (!error) + ima_update_policy_flag(); + + return error; +} + +late_initcall(init_ima); /* Start IMA after the TPM is available */ + +MODULE_DESCRIPTION("Integrity Measurement Architecture"); +MODULE_LICENSE("GPL"); diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c new file mode 100644 index 000000000..daad75ee7 --- /dev/null +++ b/security/integrity/ima/ima_mok.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Juniper Networks, Inc. + * + * Author: + * Petko Manolov <petko.manolov@konsulko.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/cred.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <keys/system_keyring.h> + + +struct key *ima_blacklist_keyring; + +/* + * Allocate the IMA blacklist keyring + */ +static __init int ima_mok_init(void) +{ + struct key_restriction *restriction; + + pr_notice("Allocating IMA blacklist keyring.\n"); + + restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL); + if (!restriction) + panic("Can't allocate IMA blacklist restriction."); + + restriction->check = restrict_link_by_builtin_trusted; + + ima_blacklist_keyring = keyring_alloc(".ima_blacklist", + KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), + (KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ | + KEY_USR_WRITE | KEY_USR_SEARCH, + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_SET_KEEP, + restriction, NULL); + + if (IS_ERR(ima_blacklist_keyring)) + panic("Can't allocate IMA blacklist keyring."); + return 0; +} +device_initcall(ima_mok_init); diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c new file mode 100644 index 000000000..2d5a3daa0 --- /dev/null +++ b/security/integrity/ima/ima_policy.c @@ -0,0 +1,1256 @@ +/* + * Copyright (C) 2008 IBM Corporation + * Author: Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * ima_policy.c + * - initialize default measure policy rules + * + */ +#include <linux/module.h> +#include <linux/list.h> +#include <linux/fs.h> +#include <linux/security.h> +#include <linux/magic.h> +#include <linux/parser.h> +#include <linux/slab.h> +#include <linux/rculist.h> +#include <linux/genhd.h> +#include <linux/seq_file.h> + +#include "ima.h" + +/* flags definitions */ +#define IMA_FUNC 0x0001 +#define IMA_MASK 0x0002 +#define IMA_FSMAGIC 0x0004 +#define IMA_UID 0x0008 +#define IMA_FOWNER 0x0010 +#define IMA_FSUUID 0x0020 +#define IMA_INMASK 0x0040 +#define IMA_EUID 0x0080 +#define IMA_PCR 0x0100 +#define IMA_FSNAME 0x0200 + +#define UNKNOWN 0 +#define MEASURE 0x0001 /* same as IMA_MEASURE */ +#define DONT_MEASURE 0x0002 +#define APPRAISE 0x0004 /* same as IMA_APPRAISE */ +#define DONT_APPRAISE 0x0008 +#define AUDIT 0x0040 +#define HASH 0x0100 +#define DONT_HASH 0x0200 + +#define INVALID_PCR(a) (((a) < 0) || \ + (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8)) + +int ima_policy_flag; +static int temp_ima_appraise; +static int build_ima_appraise __ro_after_init; + +#define MAX_LSM_RULES 6 +enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE, + LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE +}; + +enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB }; + +struct ima_rule_entry { + struct list_head list; + int action; + unsigned int flags; + enum ima_hooks func; + int mask; + unsigned long fsmagic; + uuid_t fsuuid; + kuid_t uid; + kuid_t fowner; + bool (*uid_op)(kuid_t, kuid_t); /* Handlers for operators */ + bool (*fowner_op)(kuid_t, kuid_t); /* uid_eq(), uid_gt(), uid_lt() */ + int pcr; + struct { + void *rule; /* LSM file metadata specific */ + void *args_p; /* audit value */ + int type; /* audit type */ + } lsm[MAX_LSM_RULES]; + char *fsname; +}; + +/* + * Without LSM specific knowledge, the default policy can only be + * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner + */ + +/* + * The minimum rule set to allow for full TCB coverage. Measures all files + * opened or mmap for exec and everything read by root. Dangerous because + * normal users can easily run the machine out of memory simply building + * and running executables. + */ +static struct ima_rule_entry dont_measure_rules[] __ro_after_init = { + {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC, + .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = CGROUP2_SUPER_MAGIC, + .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC} +}; + +static struct ima_rule_entry original_measurement_rules[] __ro_after_init = { + {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC, + .flags = IMA_FUNC | IMA_MASK}, + {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC, + .flags = IMA_FUNC | IMA_MASK}, + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, + .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq, + .flags = IMA_FUNC | IMA_MASK | IMA_UID}, + {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC}, + {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC}, +}; + +static struct ima_rule_entry default_measurement_rules[] __ro_after_init = { + {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC, + .flags = IMA_FUNC | IMA_MASK}, + {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC, + .flags = IMA_FUNC | IMA_MASK}, + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, + .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq, + .flags = IMA_FUNC | IMA_INMASK | IMA_EUID}, + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, + .uid = GLOBAL_ROOT_UID, .uid_op = &uid_eq, + .flags = IMA_FUNC | IMA_INMASK | IMA_UID}, + {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC}, + {.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC}, + {.action = MEASURE, .func = POLICY_CHECK, .flags = IMA_FUNC}, +}; + +static struct ima_rule_entry default_appraise_rules[] __ro_after_init = { + {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SMACK_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = CGROUP2_SUPER_MAGIC, .flags = IMA_FSMAGIC}, +#ifdef CONFIG_IMA_WRITE_POLICY + {.action = APPRAISE, .func = POLICY_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, +#endif +#ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT + {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &uid_eq, + .flags = IMA_FOWNER}, +#else + /* force signature */ + {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .fowner_op = &uid_eq, + .flags = IMA_FOWNER | IMA_DIGSIG_REQUIRED}, +#endif +}; + +static struct ima_rule_entry build_appraise_rules[] __ro_after_init = { +#ifdef CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS + {.action = APPRAISE, .func = MODULE_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, +#endif +#ifdef CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS + {.action = APPRAISE, .func = FIRMWARE_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, +#endif +#ifdef CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS + {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, +#endif +#ifdef CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS + {.action = APPRAISE, .func = POLICY_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, +#endif +}; + +static struct ima_rule_entry secure_boot_rules[] __ro_after_init = { + {.action = APPRAISE, .func = MODULE_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, + {.action = APPRAISE, .func = FIRMWARE_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, + {.action = APPRAISE, .func = KEXEC_KERNEL_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, + {.action = APPRAISE, .func = POLICY_CHECK, + .flags = IMA_FUNC | IMA_DIGSIG_REQUIRED}, +}; + +static LIST_HEAD(ima_default_rules); +static LIST_HEAD(ima_policy_rules); +static LIST_HEAD(ima_temp_rules); +static struct list_head *ima_rules = &ima_default_rules; + +static int ima_policy __initdata; + +static int __init default_measure_policy_setup(char *str) +{ + if (ima_policy) + return 1; + + ima_policy = ORIGINAL_TCB; + return 1; +} +__setup("ima_tcb", default_measure_policy_setup); + +static bool ima_use_appraise_tcb __initdata; +static bool ima_use_secure_boot __initdata; +static bool ima_fail_unverifiable_sigs __ro_after_init; +static int __init policy_setup(char *str) +{ + char *p; + + while ((p = strsep(&str, " |\n")) != NULL) { + if (*p == ' ') + continue; + if ((strcmp(p, "tcb") == 0) && !ima_policy) + ima_policy = DEFAULT_TCB; + else if (strcmp(p, "appraise_tcb") == 0) + ima_use_appraise_tcb = true; + else if (strcmp(p, "secure_boot") == 0) + ima_use_secure_boot = true; + else if (strcmp(p, "fail_securely") == 0) + ima_fail_unverifiable_sigs = true; + } + + return 1; +} +__setup("ima_policy=", policy_setup); + +static int __init default_appraise_policy_setup(char *str) +{ + ima_use_appraise_tcb = true; + return 1; +} +__setup("ima_appraise_tcb", default_appraise_policy_setup); + +/* + * The LSM policy can be reloaded, leaving the IMA LSM based rules referring + * to the old, stale LSM policy. Update the IMA LSM based rules to reflect + * the reloaded LSM policy. We assume the rules still exist; and BUG_ON() if + * they don't. + */ +static void ima_lsm_update_rules(void) +{ + struct ima_rule_entry *entry; + int result; + int i; + + list_for_each_entry(entry, &ima_policy_rules, list) { + for (i = 0; i < MAX_LSM_RULES; i++) { + if (!entry->lsm[i].rule) + continue; + result = security_filter_rule_init(entry->lsm[i].type, + Audit_equal, + entry->lsm[i].args_p, + &entry->lsm[i].rule); + BUG_ON(!entry->lsm[i].rule); + } + } +} + +/** + * ima_match_rules - determine whether an inode matches the measure rule. + * @rule: a pointer to a rule + * @inode: a pointer to an inode + * @cred: a pointer to a credentials structure for user validation + * @secid: the secid of the task to be validated + * @func: LIM hook identifier + * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) + * + * Returns true on rule match, false on failure. + */ +static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, + const struct cred *cred, u32 secid, + enum ima_hooks func, int mask) +{ + int i; + + if ((rule->flags & IMA_FUNC) && + (rule->func != func && func != POST_SETATTR)) + return false; + if ((rule->flags & IMA_MASK) && + (rule->mask != mask && func != POST_SETATTR)) + return false; + if ((rule->flags & IMA_INMASK) && + (!(rule->mask & mask) && func != POST_SETATTR)) + return false; + if ((rule->flags & IMA_FSMAGIC) + && rule->fsmagic != inode->i_sb->s_magic) + return false; + if ((rule->flags & IMA_FSNAME) + && strcmp(rule->fsname, inode->i_sb->s_type->name)) + return false; + if ((rule->flags & IMA_FSUUID) && + !uuid_equal(&rule->fsuuid, &inode->i_sb->s_uuid)) + return false; + if ((rule->flags & IMA_UID) && !rule->uid_op(cred->uid, rule->uid)) + return false; + if (rule->flags & IMA_EUID) { + if (has_capability_noaudit(current, CAP_SETUID)) { + if (!rule->uid_op(cred->euid, rule->uid) + && !rule->uid_op(cred->suid, rule->uid) + && !rule->uid_op(cred->uid, rule->uid)) + return false; + } else if (!rule->uid_op(cred->euid, rule->uid)) + return false; + } + + if ((rule->flags & IMA_FOWNER) && + !rule->fowner_op(inode->i_uid, rule->fowner)) + return false; + for (i = 0; i < MAX_LSM_RULES; i++) { + int rc = 0; + u32 osid; + int retried = 0; + + if (!rule->lsm[i].rule) + continue; +retry: + switch (i) { + case LSM_OBJ_USER: + case LSM_OBJ_ROLE: + case LSM_OBJ_TYPE: + security_inode_getsecid(inode, &osid); + rc = security_filter_rule_match(osid, + rule->lsm[i].type, + Audit_equal, + rule->lsm[i].rule, + NULL); + break; + case LSM_SUBJ_USER: + case LSM_SUBJ_ROLE: + case LSM_SUBJ_TYPE: + rc = security_filter_rule_match(secid, + rule->lsm[i].type, + Audit_equal, + rule->lsm[i].rule, + NULL); + default: + break; + } + if ((rc < 0) && (!retried)) { + retried = 1; + ima_lsm_update_rules(); + goto retry; + } + if (!rc) + return false; + } + return true; +} + +/* + * In addition to knowing that we need to appraise the file in general, + * we need to differentiate between calling hooks, for hook specific rules. + */ +static int get_subaction(struct ima_rule_entry *rule, enum ima_hooks func) +{ + if (!(rule->flags & IMA_FUNC)) + return IMA_FILE_APPRAISE; + + switch (func) { + case MMAP_CHECK: + return IMA_MMAP_APPRAISE; + case BPRM_CHECK: + return IMA_BPRM_APPRAISE; + case CREDS_CHECK: + return IMA_CREDS_APPRAISE; + case FILE_CHECK: + case POST_SETATTR: + return IMA_FILE_APPRAISE; + case MODULE_CHECK ... MAX_CHECK - 1: + default: + return IMA_READ_APPRAISE; + } +} + +/** + * ima_match_policy - decision based on LSM and other conditions + * @inode: pointer to an inode for which the policy decision is being made + * @cred: pointer to a credentials structure for which the policy decision is + * being made + * @secid: LSM secid of the task to be validated + * @func: IMA hook identifier + * @mask: requested action (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) + * @pcr: set the pcr to extend + * + * Measure decision based on func/mask/fsmagic and LSM(subj/obj/type) + * conditions. + * + * Since the IMA policy may be updated multiple times we need to lock the + * list when walking it. Reads are many orders of magnitude more numerous + * than writes so ima_match_policy() is classical RCU candidate. + */ +int ima_match_policy(struct inode *inode, const struct cred *cred, u32 secid, + enum ima_hooks func, int mask, int flags, int *pcr) +{ + struct ima_rule_entry *entry; + int action = 0, actmask = flags | (flags << 1); + + rcu_read_lock(); + list_for_each_entry_rcu(entry, ima_rules, list) { + + if (!(entry->action & actmask)) + continue; + + if (!ima_match_rules(entry, inode, cred, secid, func, mask)) + continue; + + action |= entry->flags & IMA_ACTION_FLAGS; + + action |= entry->action & IMA_DO_MASK; + if (entry->action & IMA_APPRAISE) { + action |= get_subaction(entry, func); + action &= ~IMA_HASH; + if (ima_fail_unverifiable_sigs) + action |= IMA_FAIL_UNVERIFIABLE_SIGS; + } + + if (entry->action & IMA_DO_MASK) + actmask &= ~(entry->action | entry->action << 1); + else + actmask &= ~(entry->action | entry->action >> 1); + + if ((pcr) && (entry->flags & IMA_PCR)) + *pcr = entry->pcr; + + if (!actmask) + break; + } + rcu_read_unlock(); + + return action; +} + +/* + * Initialize the ima_policy_flag variable based on the currently + * loaded policy. Based on this flag, the decision to short circuit + * out of a function or not call the function in the first place + * can be made earlier. + */ +void ima_update_policy_flag(void) +{ + struct ima_rule_entry *entry; + + list_for_each_entry(entry, ima_rules, list) { + if (entry->action & IMA_DO_MASK) + ima_policy_flag |= entry->action; + } + + ima_appraise |= (build_ima_appraise | temp_ima_appraise); + if (!ima_appraise) + ima_policy_flag &= ~IMA_APPRAISE; +} + +static int ima_appraise_flag(enum ima_hooks func) +{ + if (func == MODULE_CHECK) + return IMA_APPRAISE_MODULES; + else if (func == FIRMWARE_CHECK) + return IMA_APPRAISE_FIRMWARE; + else if (func == POLICY_CHECK) + return IMA_APPRAISE_POLICY; + else if (func == KEXEC_KERNEL_CHECK) + return IMA_APPRAISE_KEXEC; + return 0; +} + +/** + * ima_init_policy - initialize the default measure rules. + * + * ima_rules points to either the ima_default_rules or the + * the new ima_policy_rules. + */ +void __init ima_init_policy(void) +{ + int i, measure_entries, appraise_entries, secure_boot_entries; + + /* if !ima_policy set entries = 0 so we load NO default rules */ + measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0; + appraise_entries = ima_use_appraise_tcb ? + ARRAY_SIZE(default_appraise_rules) : 0; + secure_boot_entries = ima_use_secure_boot ? + ARRAY_SIZE(secure_boot_rules) : 0; + + for (i = 0; i < measure_entries; i++) + list_add_tail(&dont_measure_rules[i].list, &ima_default_rules); + + switch (ima_policy) { + case ORIGINAL_TCB: + for (i = 0; i < ARRAY_SIZE(original_measurement_rules); i++) + list_add_tail(&original_measurement_rules[i].list, + &ima_default_rules); + break; + case DEFAULT_TCB: + for (i = 0; i < ARRAY_SIZE(default_measurement_rules); i++) + list_add_tail(&default_measurement_rules[i].list, + &ima_default_rules); + default: + break; + } + + /* + * Insert the builtin "secure_boot" policy rules requiring file + * signatures, prior to any other appraise rules. + */ + for (i = 0; i < secure_boot_entries; i++) { + list_add_tail(&secure_boot_rules[i].list, &ima_default_rules); + temp_ima_appraise |= + ima_appraise_flag(secure_boot_rules[i].func); + } + + /* + * Insert the build time appraise rules requiring file signatures + * for both the initial and custom policies, prior to other appraise + * rules. + */ + for (i = 0; i < ARRAY_SIZE(build_appraise_rules); i++) { + struct ima_rule_entry *entry; + + if (!secure_boot_entries) + list_add_tail(&build_appraise_rules[i].list, + &ima_default_rules); + + entry = kmemdup(&build_appraise_rules[i], sizeof(*entry), + GFP_KERNEL); + if (entry) + list_add_tail(&entry->list, &ima_policy_rules); + build_ima_appraise |= + ima_appraise_flag(build_appraise_rules[i].func); + } + + for (i = 0; i < appraise_entries; i++) { + list_add_tail(&default_appraise_rules[i].list, + &ima_default_rules); + if (default_appraise_rules[i].func == POLICY_CHECK) + temp_ima_appraise |= IMA_APPRAISE_POLICY; + } + + ima_update_policy_flag(); +} + +/* Make sure we have a valid policy, at least containing some rules. */ +int ima_check_policy(void) +{ + if (list_empty(&ima_temp_rules)) + return -EINVAL; + return 0; +} + +/** + * ima_update_policy - update default_rules with new measure rules + * + * Called on file .release to update the default rules with a complete new + * policy. What we do here is to splice ima_policy_rules and ima_temp_rules so + * they make a queue. The policy may be updated multiple times and this is the + * RCU updater. + * + * Policy rules are never deleted so ima_policy_flag gets zeroed only once when + * we switch from the default policy to user defined. + */ +void ima_update_policy(void) +{ + struct list_head *policy = &ima_policy_rules; + + list_splice_tail_init_rcu(&ima_temp_rules, policy, synchronize_rcu); + + if (ima_rules != policy) { + ima_policy_flag = 0; + ima_rules = policy; + } + ima_update_policy_flag(); +} + +enum { + Opt_err = -1, + Opt_measure = 1, Opt_dont_measure, + Opt_appraise, Opt_dont_appraise, + Opt_audit, Opt_hash, Opt_dont_hash, + Opt_obj_user, Opt_obj_role, Opt_obj_type, + Opt_subj_user, Opt_subj_role, Opt_subj_type, + Opt_func, Opt_mask, Opt_fsmagic, Opt_fsname, + Opt_fsuuid, Opt_uid_eq, Opt_euid_eq, Opt_fowner_eq, + Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt, + Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt, + Opt_appraise_type, Opt_permit_directio, + Opt_pcr +}; + +static match_table_t policy_tokens = { + {Opt_measure, "measure"}, + {Opt_dont_measure, "dont_measure"}, + {Opt_appraise, "appraise"}, + {Opt_dont_appraise, "dont_appraise"}, + {Opt_audit, "audit"}, + {Opt_hash, "hash"}, + {Opt_dont_hash, "dont_hash"}, + {Opt_obj_user, "obj_user=%s"}, + {Opt_obj_role, "obj_role=%s"}, + {Opt_obj_type, "obj_type=%s"}, + {Opt_subj_user, "subj_user=%s"}, + {Opt_subj_role, "subj_role=%s"}, + {Opt_subj_type, "subj_type=%s"}, + {Opt_func, "func=%s"}, + {Opt_mask, "mask=%s"}, + {Opt_fsmagic, "fsmagic=%s"}, + {Opt_fsname, "fsname=%s"}, + {Opt_fsuuid, "fsuuid=%s"}, + {Opt_uid_eq, "uid=%s"}, + {Opt_euid_eq, "euid=%s"}, + {Opt_fowner_eq, "fowner=%s"}, + {Opt_uid_gt, "uid>%s"}, + {Opt_euid_gt, "euid>%s"}, + {Opt_fowner_gt, "fowner>%s"}, + {Opt_uid_lt, "uid<%s"}, + {Opt_euid_lt, "euid<%s"}, + {Opt_fowner_lt, "fowner<%s"}, + {Opt_appraise_type, "appraise_type=%s"}, + {Opt_permit_directio, "permit_directio"}, + {Opt_pcr, "pcr=%s"}, + {Opt_err, NULL} +}; + +static int ima_lsm_rule_init(struct ima_rule_entry *entry, + substring_t *args, int lsm_rule, int audit_type) +{ + int result; + + if (entry->lsm[lsm_rule].rule) + return -EINVAL; + + entry->lsm[lsm_rule].args_p = match_strdup(args); + if (!entry->lsm[lsm_rule].args_p) + return -ENOMEM; + + entry->lsm[lsm_rule].type = audit_type; + result = security_filter_rule_init(entry->lsm[lsm_rule].type, + Audit_equal, + entry->lsm[lsm_rule].args_p, + &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) { + kfree(entry->lsm[lsm_rule].args_p); + return -EINVAL; + } + + return result; +} + +static void ima_log_string_op(struct audit_buffer *ab, char *key, char *value, + bool (*rule_operator)(kuid_t, kuid_t)) +{ + if (!ab) + return; + + if (rule_operator == &uid_gt) + audit_log_format(ab, "%s>", key); + else if (rule_operator == &uid_lt) + audit_log_format(ab, "%s<", key); + else + audit_log_format(ab, "%s=", key); + audit_log_format(ab, "%s ", value); +} +static void ima_log_string(struct audit_buffer *ab, char *key, char *value) +{ + ima_log_string_op(ab, key, value, NULL); +} + +static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) +{ + struct audit_buffer *ab; + char *from; + char *p; + bool uid_token; + int result = 0; + + ab = integrity_audit_log_start(audit_context(), GFP_KERNEL, + AUDIT_INTEGRITY_POLICY_RULE); + + entry->uid = INVALID_UID; + entry->fowner = INVALID_UID; + entry->uid_op = &uid_eq; + entry->fowner_op = &uid_eq; + entry->action = UNKNOWN; + while ((p = strsep(&rule, " \t")) != NULL) { + substring_t args[MAX_OPT_ARGS]; + int token; + unsigned long lnum; + + if (result < 0) + break; + if ((*p == '\0') || (*p == ' ') || (*p == '\t')) + continue; + token = match_token(p, policy_tokens, args); + switch (token) { + case Opt_measure: + ima_log_string(ab, "action", "measure"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = MEASURE; + break; + case Opt_dont_measure: + ima_log_string(ab, "action", "dont_measure"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = DONT_MEASURE; + break; + case Opt_appraise: + ima_log_string(ab, "action", "appraise"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = APPRAISE; + break; + case Opt_dont_appraise: + ima_log_string(ab, "action", "dont_appraise"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = DONT_APPRAISE; + break; + case Opt_audit: + ima_log_string(ab, "action", "audit"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = AUDIT; + break; + case Opt_hash: + ima_log_string(ab, "action", "hash"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = HASH; + break; + case Opt_dont_hash: + ima_log_string(ab, "action", "dont_hash"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = DONT_HASH; + break; + case Opt_func: + ima_log_string(ab, "func", args[0].from); + + if (entry->func) + result = -EINVAL; + + if (strcmp(args[0].from, "FILE_CHECK") == 0) + entry->func = FILE_CHECK; + /* PATH_CHECK is for backwards compat */ + else if (strcmp(args[0].from, "PATH_CHECK") == 0) + entry->func = FILE_CHECK; + else if (strcmp(args[0].from, "MODULE_CHECK") == 0) + entry->func = MODULE_CHECK; + else if (strcmp(args[0].from, "FIRMWARE_CHECK") == 0) + entry->func = FIRMWARE_CHECK; + else if ((strcmp(args[0].from, "FILE_MMAP") == 0) + || (strcmp(args[0].from, "MMAP_CHECK") == 0)) + entry->func = MMAP_CHECK; + else if (strcmp(args[0].from, "BPRM_CHECK") == 0) + entry->func = BPRM_CHECK; + else if (strcmp(args[0].from, "CREDS_CHECK") == 0) + entry->func = CREDS_CHECK; + else if (strcmp(args[0].from, "KEXEC_KERNEL_CHECK") == + 0) + entry->func = KEXEC_KERNEL_CHECK; + else if (strcmp(args[0].from, "KEXEC_INITRAMFS_CHECK") + == 0) + entry->func = KEXEC_INITRAMFS_CHECK; + else if (strcmp(args[0].from, "POLICY_CHECK") == 0) + entry->func = POLICY_CHECK; + else + result = -EINVAL; + if (!result) + entry->flags |= IMA_FUNC; + break; + case Opt_mask: + ima_log_string(ab, "mask", args[0].from); + + if (entry->mask) + result = -EINVAL; + + from = args[0].from; + if (*from == '^') + from++; + + if ((strcmp(from, "MAY_EXEC")) == 0) + entry->mask = MAY_EXEC; + else if (strcmp(from, "MAY_WRITE") == 0) + entry->mask = MAY_WRITE; + else if (strcmp(from, "MAY_READ") == 0) + entry->mask = MAY_READ; + else if (strcmp(from, "MAY_APPEND") == 0) + entry->mask = MAY_APPEND; + else + result = -EINVAL; + if (!result) + entry->flags |= (*args[0].from == '^') + ? IMA_INMASK : IMA_MASK; + break; + case Opt_fsmagic: + ima_log_string(ab, "fsmagic", args[0].from); + + if (entry->fsmagic) { + result = -EINVAL; + break; + } + + result = kstrtoul(args[0].from, 16, &entry->fsmagic); + if (!result) + entry->flags |= IMA_FSMAGIC; + break; + case Opt_fsname: + ima_log_string(ab, "fsname", args[0].from); + + entry->fsname = kstrdup(args[0].from, GFP_KERNEL); + if (!entry->fsname) { + result = -ENOMEM; + break; + } + result = 0; + entry->flags |= IMA_FSNAME; + break; + case Opt_fsuuid: + ima_log_string(ab, "fsuuid", args[0].from); + + if (!uuid_is_null(&entry->fsuuid)) { + result = -EINVAL; + break; + } + + result = uuid_parse(args[0].from, &entry->fsuuid); + if (!result) + entry->flags |= IMA_FSUUID; + break; + case Opt_uid_gt: + case Opt_euid_gt: + entry->uid_op = &uid_gt; + case Opt_uid_lt: + case Opt_euid_lt: + if ((token == Opt_uid_lt) || (token == Opt_euid_lt)) + entry->uid_op = &uid_lt; + case Opt_uid_eq: + case Opt_euid_eq: + uid_token = (token == Opt_uid_eq) || + (token == Opt_uid_gt) || + (token == Opt_uid_lt); + + ima_log_string_op(ab, uid_token ? "uid" : "euid", + args[0].from, entry->uid_op); + + if (uid_valid(entry->uid)) { + result = -EINVAL; + break; + } + + result = kstrtoul(args[0].from, 10, &lnum); + if (!result) { + entry->uid = make_kuid(current_user_ns(), + (uid_t) lnum); + if (!uid_valid(entry->uid) || + (uid_t)lnum != lnum) + result = -EINVAL; + else + entry->flags |= uid_token + ? IMA_UID : IMA_EUID; + } + break; + case Opt_fowner_gt: + entry->fowner_op = &uid_gt; + case Opt_fowner_lt: + if (token == Opt_fowner_lt) + entry->fowner_op = &uid_lt; + case Opt_fowner_eq: + ima_log_string_op(ab, "fowner", args[0].from, + entry->fowner_op); + + if (uid_valid(entry->fowner)) { + result = -EINVAL; + break; + } + + result = kstrtoul(args[0].from, 10, &lnum); + if (!result) { + entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum); + if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum)) + result = -EINVAL; + else + entry->flags |= IMA_FOWNER; + } + break; + case Opt_obj_user: + ima_log_string(ab, "obj_user", args[0].from); + result = ima_lsm_rule_init(entry, args, + LSM_OBJ_USER, + AUDIT_OBJ_USER); + break; + case Opt_obj_role: + ima_log_string(ab, "obj_role", args[0].from); + result = ima_lsm_rule_init(entry, args, + LSM_OBJ_ROLE, + AUDIT_OBJ_ROLE); + break; + case Opt_obj_type: + ima_log_string(ab, "obj_type", args[0].from); + result = ima_lsm_rule_init(entry, args, + LSM_OBJ_TYPE, + AUDIT_OBJ_TYPE); + break; + case Opt_subj_user: + ima_log_string(ab, "subj_user", args[0].from); + result = ima_lsm_rule_init(entry, args, + LSM_SUBJ_USER, + AUDIT_SUBJ_USER); + break; + case Opt_subj_role: + ima_log_string(ab, "subj_role", args[0].from); + result = ima_lsm_rule_init(entry, args, + LSM_SUBJ_ROLE, + AUDIT_SUBJ_ROLE); + break; + case Opt_subj_type: + ima_log_string(ab, "subj_type", args[0].from); + result = ima_lsm_rule_init(entry, args, + LSM_SUBJ_TYPE, + AUDIT_SUBJ_TYPE); + break; + case Opt_appraise_type: + if (entry->action != APPRAISE) { + result = -EINVAL; + break; + } + + ima_log_string(ab, "appraise_type", args[0].from); + if ((strcmp(args[0].from, "imasig")) == 0) + entry->flags |= IMA_DIGSIG_REQUIRED; + else + result = -EINVAL; + break; + case Opt_permit_directio: + entry->flags |= IMA_PERMIT_DIRECTIO; + break; + case Opt_pcr: + if (entry->action != MEASURE) { + result = -EINVAL; + break; + } + ima_log_string(ab, "pcr", args[0].from); + + result = kstrtoint(args[0].from, 10, &entry->pcr); + if (result || INVALID_PCR(entry->pcr)) + result = -EINVAL; + else + entry->flags |= IMA_PCR; + + break; + case Opt_err: + ima_log_string(ab, "UNKNOWN", p); + result = -EINVAL; + break; + } + } + if (!result && (entry->action == UNKNOWN)) + result = -EINVAL; + else if (entry->action == APPRAISE) + temp_ima_appraise |= ima_appraise_flag(entry->func); + + audit_log_format(ab, "res=%d", !result); + audit_log_end(ab); + return result; +} + +/** + * ima_parse_add_rule - add a rule to ima_policy_rules + * @rule - ima measurement policy rule + * + * Avoid locking by allowing just one writer at a time in ima_write_policy() + * Returns the length of the rule parsed, an error code on failure + */ +ssize_t ima_parse_add_rule(char *rule) +{ + static const char op[] = "update_policy"; + char *p; + struct ima_rule_entry *entry; + ssize_t result, len; + int audit_info = 0; + + p = strsep(&rule, "\n"); + len = strlen(p) + 1; + p += strspn(p, " \t"); + + if (*p == '#' || *p == '\0') + return len; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, + NULL, op, "-ENOMEM", -ENOMEM, audit_info); + return -ENOMEM; + } + + INIT_LIST_HEAD(&entry->list); + + result = ima_parse_rule(p, entry); + if (result) { + kfree(entry); + integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, + NULL, op, "invalid-policy", result, + audit_info); + return result; + } + + list_add_tail(&entry->list, &ima_temp_rules); + + return len; +} + +/** + * ima_delete_rules() called to cleanup invalid in-flight policy. + * We don't need locking as we operate on the temp list, which is + * different from the active one. There is also only one user of + * ima_delete_rules() at a time. + */ +void ima_delete_rules(void) +{ + struct ima_rule_entry *entry, *tmp; + int i; + + temp_ima_appraise = 0; + list_for_each_entry_safe(entry, tmp, &ima_temp_rules, list) { + for (i = 0; i < MAX_LSM_RULES; i++) + kfree(entry->lsm[i].args_p); + + list_del(&entry->list); + kfree(entry); + } +} + +#ifdef CONFIG_IMA_READ_POLICY +enum { + mask_exec = 0, mask_write, mask_read, mask_append +}; + +static const char *const mask_tokens[] = { + "^MAY_EXEC", + "^MAY_WRITE", + "^MAY_READ", + "^MAY_APPEND" +}; + +#define __ima_hook_stringify(str) (#str), + +static const char *const func_tokens[] = { + __ima_hooks(__ima_hook_stringify) +}; + +void *ima_policy_start(struct seq_file *m, loff_t *pos) +{ + loff_t l = *pos; + struct ima_rule_entry *entry; + + rcu_read_lock(); + list_for_each_entry_rcu(entry, ima_rules, list) { + if (!l--) { + rcu_read_unlock(); + return entry; + } + } + rcu_read_unlock(); + return NULL; +} + +void *ima_policy_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct ima_rule_entry *entry = v; + + rcu_read_lock(); + entry = list_entry_rcu(entry->list.next, struct ima_rule_entry, list); + rcu_read_unlock(); + (*pos)++; + + return (&entry->list == ima_rules) ? NULL : entry; +} + +void ima_policy_stop(struct seq_file *m, void *v) +{ +} + +#define pt(token) policy_tokens[token + Opt_err].pattern +#define mt(token) mask_tokens[token] + +/* + * policy_func_show - display the ima_hooks policy rule + */ +static void policy_func_show(struct seq_file *m, enum ima_hooks func) +{ + if (func > 0 && func < MAX_CHECK) + seq_printf(m, "func=%s ", func_tokens[func]); + else + seq_printf(m, "func=%d ", func); +} + +int ima_policy_show(struct seq_file *m, void *v) +{ + struct ima_rule_entry *entry = v; + int i; + char tbuf[64] = {0,}; + int offset = 0; + + rcu_read_lock(); + + if (entry->action & MEASURE) + seq_puts(m, pt(Opt_measure)); + if (entry->action & DONT_MEASURE) + seq_puts(m, pt(Opt_dont_measure)); + if (entry->action & APPRAISE) + seq_puts(m, pt(Opt_appraise)); + if (entry->action & DONT_APPRAISE) + seq_puts(m, pt(Opt_dont_appraise)); + if (entry->action & AUDIT) + seq_puts(m, pt(Opt_audit)); + if (entry->action & HASH) + seq_puts(m, pt(Opt_hash)); + if (entry->action & DONT_HASH) + seq_puts(m, pt(Opt_dont_hash)); + + seq_puts(m, " "); + + if (entry->flags & IMA_FUNC) + policy_func_show(m, entry->func); + + if ((entry->flags & IMA_MASK) || (entry->flags & IMA_INMASK)) { + if (entry->flags & IMA_MASK) + offset = 1; + if (entry->mask & MAY_EXEC) + seq_printf(m, pt(Opt_mask), mt(mask_exec) + offset); + if (entry->mask & MAY_WRITE) + seq_printf(m, pt(Opt_mask), mt(mask_write) + offset); + if (entry->mask & MAY_READ) + seq_printf(m, pt(Opt_mask), mt(mask_read) + offset); + if (entry->mask & MAY_APPEND) + seq_printf(m, pt(Opt_mask), mt(mask_append) + offset); + seq_puts(m, " "); + } + + if (entry->flags & IMA_FSMAGIC) { + snprintf(tbuf, sizeof(tbuf), "0x%lx", entry->fsmagic); + seq_printf(m, pt(Opt_fsmagic), tbuf); + seq_puts(m, " "); + } + + if (entry->flags & IMA_FSNAME) { + snprintf(tbuf, sizeof(tbuf), "%s", entry->fsname); + seq_printf(m, pt(Opt_fsname), tbuf); + seq_puts(m, " "); + } + + if (entry->flags & IMA_PCR) { + snprintf(tbuf, sizeof(tbuf), "%d", entry->pcr); + seq_printf(m, pt(Opt_pcr), tbuf); + seq_puts(m, " "); + } + + if (entry->flags & IMA_FSUUID) { + seq_printf(m, "fsuuid=%pU", &entry->fsuuid); + seq_puts(m, " "); + } + + if (entry->flags & IMA_UID) { + snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid)); + if (entry->uid_op == &uid_gt) + seq_printf(m, pt(Opt_uid_gt), tbuf); + else if (entry->uid_op == &uid_lt) + seq_printf(m, pt(Opt_uid_lt), tbuf); + else + seq_printf(m, pt(Opt_uid_eq), tbuf); + seq_puts(m, " "); + } + + if (entry->flags & IMA_EUID) { + snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->uid)); + if (entry->uid_op == &uid_gt) + seq_printf(m, pt(Opt_euid_gt), tbuf); + else if (entry->uid_op == &uid_lt) + seq_printf(m, pt(Opt_euid_lt), tbuf); + else + seq_printf(m, pt(Opt_euid_eq), tbuf); + seq_puts(m, " "); + } + + if (entry->flags & IMA_FOWNER) { + snprintf(tbuf, sizeof(tbuf), "%d", __kuid_val(entry->fowner)); + if (entry->fowner_op == &uid_gt) + seq_printf(m, pt(Opt_fowner_gt), tbuf); + else if (entry->fowner_op == &uid_lt) + seq_printf(m, pt(Opt_fowner_lt), tbuf); + else + seq_printf(m, pt(Opt_fowner_eq), tbuf); + seq_puts(m, " "); + } + + for (i = 0; i < MAX_LSM_RULES; i++) { + if (entry->lsm[i].rule) { + switch (i) { + case LSM_OBJ_USER: + seq_printf(m, pt(Opt_obj_user), + (char *)entry->lsm[i].args_p); + break; + case LSM_OBJ_ROLE: + seq_printf(m, pt(Opt_obj_role), + (char *)entry->lsm[i].args_p); + break; + case LSM_OBJ_TYPE: + seq_printf(m, pt(Opt_obj_type), + (char *)entry->lsm[i].args_p); + break; + case LSM_SUBJ_USER: + seq_printf(m, pt(Opt_subj_user), + (char *)entry->lsm[i].args_p); + break; + case LSM_SUBJ_ROLE: + seq_printf(m, pt(Opt_subj_role), + (char *)entry->lsm[i].args_p); + break; + case LSM_SUBJ_TYPE: + seq_printf(m, pt(Opt_subj_type), + (char *)entry->lsm[i].args_p); + break; + } + } + } + if (entry->flags & IMA_DIGSIG_REQUIRED) + seq_puts(m, "appraise_type=imasig "); + if (entry->flags & IMA_PERMIT_DIRECTIO) + seq_puts(m, "permit_directio "); + rcu_read_unlock(); + seq_puts(m, "\n"); + return 0; +} +#endif /* CONFIG_IMA_READ_POLICY */ diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c new file mode 100644 index 000000000..b186819bd --- /dev/null +++ b/security/integrity/ima/ima_queue.c @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2005,2006,2007,2008 IBM Corporation + * + * Authors: + * Serge Hallyn <serue@us.ibm.com> + * Reiner Sailer <sailer@watson.ibm.com> + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_queue.c + * Implements queues that store template measurements and + * maintains aggregate over the stored measurements + * in the pre-configured TPM PCR (if available). + * The measurement list is append-only. No entry is + * ever removed or changed during the boot-cycle. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/rculist.h> +#include <linux/slab.h> +#include "ima.h" + +#define AUDIT_CAUSE_LEN_MAX 32 + +LIST_HEAD(ima_measurements); /* list of all measurements */ +#ifdef CONFIG_IMA_KEXEC +static unsigned long binary_runtime_size; +#else +static unsigned long binary_runtime_size = ULONG_MAX; +#endif + +/* key: inode (before secure-hashing a file) */ +struct ima_h_table ima_htable = { + .len = ATOMIC_LONG_INIT(0), + .violations = ATOMIC_LONG_INIT(0), + .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT +}; + +/* mutex protects atomicity of extending measurement list + * and extending the TPM PCR aggregate. Since tpm_extend can take + * long (and the tpm driver uses a mutex), we can't use the spinlock. + */ +static DEFINE_MUTEX(ima_extend_list_mutex); + +/* lookup up the digest value in the hash table, and return the entry */ +static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value, + int pcr) +{ + struct ima_queue_entry *qe, *ret = NULL; + unsigned int key; + int rc; + + key = ima_hash_key(digest_value); + rcu_read_lock(); + hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { + rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); + if ((rc == 0) && (qe->entry->pcr == pcr)) { + ret = qe; + break; + } + } + rcu_read_unlock(); + return ret; +} + +/* + * Calculate the memory required for serializing a single + * binary_runtime_measurement list entry, which contains a + * couple of variable length fields (e.g template name and data). + */ +static int get_binary_runtime_size(struct ima_template_entry *entry) +{ + int size = 0; + + size += sizeof(u32); /* pcr */ + size += sizeof(entry->digest); + size += sizeof(int); /* template name size field */ + size += strlen(entry->template_desc->name); + size += sizeof(entry->template_data_len); + size += entry->template_data_len; + return size; +} + +/* ima_add_template_entry helper function: + * - Add template entry to the measurement list and hash table, for + * all entries except those carried across kexec. + * + * (Called with ima_extend_list_mutex held.) + */ +static int ima_add_digest_entry(struct ima_template_entry *entry, + bool update_htable) +{ + struct ima_queue_entry *qe; + unsigned int key; + + qe = kmalloc(sizeof(*qe), GFP_KERNEL); + if (qe == NULL) { + pr_err("OUT OF MEMORY ERROR creating queue entry\n"); + return -ENOMEM; + } + qe->entry = entry; + + INIT_LIST_HEAD(&qe->later); + list_add_tail_rcu(&qe->later, &ima_measurements); + + atomic_long_inc(&ima_htable.len); + if (update_htable) { + key = ima_hash_key(entry->digest); + hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); + } + + if (binary_runtime_size != ULONG_MAX) { + int size; + + size = get_binary_runtime_size(entry); + binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ? + binary_runtime_size + size : ULONG_MAX; + } + return 0; +} + +/* + * Return the amount of memory required for serializing the + * entire binary_runtime_measurement list, including the ima_kexec_hdr + * structure. + */ +unsigned long ima_get_binary_runtime_size(void) +{ + if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr))) + return ULONG_MAX; + else + return binary_runtime_size + sizeof(struct ima_kexec_hdr); +}; + +static int ima_pcr_extend(const u8 *hash, int pcr) +{ + int result = 0; + + if (!ima_tpm_chip) + return result; + + result = tpm_pcr_extend(ima_tpm_chip, pcr, hash); + if (result != 0) + pr_err("Error Communicating to TPM chip, result: %d\n", result); + return result; +} + +/* + * Add template entry to the measurement list and hash table, and + * extend the pcr. + * + * On systems which support carrying the IMA measurement list across + * kexec, maintain the total memory size required for serializing the + * binary_runtime_measurements. + */ +int ima_add_template_entry(struct ima_template_entry *entry, int violation, + const char *op, struct inode *inode, + const unsigned char *filename) +{ + u8 digest[TPM_DIGEST_SIZE]; + const char *audit_cause = "hash_added"; + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; + int audit_info = 1; + int result = 0, tpmresult = 0; + + mutex_lock(&ima_extend_list_mutex); + if (!violation) { + memcpy(digest, entry->digest, sizeof(digest)); + if (ima_lookup_digest_entry(digest, entry->pcr)) { + audit_cause = "hash_exists"; + result = -EEXIST; + goto out; + } + } + + result = ima_add_digest_entry(entry, 1); + if (result < 0) { + audit_cause = "ENOMEM"; + audit_info = 0; + goto out; + } + + if (violation) /* invalidate pcr */ + memset(digest, 0xff, sizeof(digest)); + + tpmresult = ima_pcr_extend(digest, entry->pcr); + if (tpmresult != 0) { + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", + tpmresult); + audit_cause = tpm_audit_cause; + audit_info = 0; + } +out: + mutex_unlock(&ima_extend_list_mutex); + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, + op, audit_cause, result, audit_info); + return result; +} + +int ima_restore_measurement_entry(struct ima_template_entry *entry) +{ + int result = 0; + + mutex_lock(&ima_extend_list_mutex); + result = ima_add_digest_entry(entry, 0); + mutex_unlock(&ima_extend_list_mutex); + return result; +} diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c new file mode 100644 index 000000000..4dfdccce4 --- /dev/null +++ b/security/integrity/ima/ima_template.c @@ -0,0 +1,439 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template.c + * Helpers to manage template descriptors. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/rculist.h> +#include "ima.h" +#include "ima_template_lib.h" + +enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME, + HDR_TEMPLATE_DATA, HDR__LAST }; + +static struct ima_template_desc builtin_templates[] = { + {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, + {.name = "ima-ng", .fmt = "d-ng|n-ng"}, + {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"}, + {.name = "", .fmt = ""}, /* placeholder for a custom format */ +}; + +static LIST_HEAD(defined_templates); +static DEFINE_SPINLOCK(template_list); +static int template_setup_done; + +static struct ima_template_field supported_fields[] = { + {.field_id = "d", .field_init = ima_eventdigest_init, + .field_show = ima_show_template_digest}, + {.field_id = "n", .field_init = ima_eventname_init, + .field_show = ima_show_template_string}, + {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init, + .field_show = ima_show_template_digest_ng}, + {.field_id = "n-ng", .field_init = ima_eventname_ng_init, + .field_show = ima_show_template_string}, + {.field_id = "sig", .field_init = ima_eventsig_init, + .field_show = ima_show_template_sig}, +}; +#define MAX_TEMPLATE_NAME_LEN 15 + +static struct ima_template_desc *ima_template; +static struct ima_template_desc *lookup_template_desc(const char *name); +static int template_desc_init_fields(const char *template_fmt, + struct ima_template_field ***fields, + int *num_fields); + +static int __init ima_template_setup(char *str) +{ + struct ima_template_desc *template_desc; + int template_len = strlen(str); + + if (template_setup_done) + return 1; + + if (!ima_template) + ima_init_template_list(); + + /* + * Verify that a template with the supplied name exists. + * If not, use CONFIG_IMA_DEFAULT_TEMPLATE. + */ + template_desc = lookup_template_desc(str); + if (!template_desc) { + pr_err("template %s not found, using %s\n", + str, CONFIG_IMA_DEFAULT_TEMPLATE); + return 1; + } + + /* + * Verify whether the current hash algorithm is supported + * by the 'ima' template. + */ + if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && + ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { + pr_err("template does not support hash alg\n"); + return 1; + } + + ima_template = template_desc; + template_setup_done = 1; + return 1; +} +__setup("ima_template=", ima_template_setup); + +static int __init ima_template_fmt_setup(char *str) +{ + int num_templates = ARRAY_SIZE(builtin_templates); + + if (template_setup_done) + return 1; + + if (template_desc_init_fields(str, NULL, NULL) < 0) { + pr_err("format string '%s' not valid, using template %s\n", + str, CONFIG_IMA_DEFAULT_TEMPLATE); + return 1; + } + + builtin_templates[num_templates - 1].fmt = str; + ima_template = builtin_templates + num_templates - 1; + template_setup_done = 1; + + return 1; +} +__setup("ima_template_fmt=", ima_template_fmt_setup); + +static struct ima_template_desc *lookup_template_desc(const char *name) +{ + struct ima_template_desc *template_desc; + int found = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(template_desc, &defined_templates, list) { + if ((strcmp(template_desc->name, name) == 0) || + (strcmp(template_desc->fmt, name) == 0)) { + found = 1; + break; + } + } + rcu_read_unlock(); + return found ? template_desc : NULL; +} + +static struct ima_template_field *lookup_template_field(const char *field_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(supported_fields); i++) + if (strncmp(supported_fields[i].field_id, field_id, + IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0) + return &supported_fields[i]; + return NULL; +} + +static int template_fmt_size(const char *template_fmt) +{ + char c; + int template_fmt_len = strlen(template_fmt); + int i = 0, j = 0; + + while (i < template_fmt_len) { + c = template_fmt[i]; + if (c == '|') + j++; + i++; + } + + return j + 1; +} + +static int template_desc_init_fields(const char *template_fmt, + struct ima_template_field ***fields, + int *num_fields) +{ + const char *template_fmt_ptr; + struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX]; + int template_num_fields; + int i, len; + + if (num_fields && *num_fields > 0) /* already initialized? */ + return 0; + + template_num_fields = template_fmt_size(template_fmt); + + if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) { + pr_err("format string '%s' contains too many fields\n", + template_fmt); + return -EINVAL; + } + + for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields; + i++, template_fmt_ptr += len + 1) { + char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1]; + + len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr; + if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) { + pr_err("Invalid field with length %d\n", len); + return -EINVAL; + } + + memcpy(tmp_field_id, template_fmt_ptr, len); + tmp_field_id[len] = '\0'; + found_fields[i] = lookup_template_field(tmp_field_id); + if (!found_fields[i]) { + pr_err("field '%s' not found\n", tmp_field_id); + return -ENOENT; + } + } + + if (fields && num_fields) { + *fields = kmalloc_array(i, sizeof(*fields), GFP_KERNEL); + if (*fields == NULL) + return -ENOMEM; + + memcpy(*fields, found_fields, i * sizeof(*fields)); + *num_fields = i; + } + + return 0; +} + +void ima_init_template_list(void) +{ + int i; + + if (!list_empty(&defined_templates)) + return; + + spin_lock(&template_list); + for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) { + list_add_tail_rcu(&builtin_templates[i].list, + &defined_templates); + } + spin_unlock(&template_list); +} + +struct ima_template_desc *ima_template_desc_current(void) +{ + if (!ima_template) { + ima_init_template_list(); + ima_template = + lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE); + } + return ima_template; +} + +int __init ima_init_template(void) +{ + struct ima_template_desc *template = ima_template_desc_current(); + int result; + + result = template_desc_init_fields(template->fmt, + &(template->fields), + &(template->num_fields)); + if (result < 0) + pr_err("template %s init failed, result: %d\n", + (strlen(template->name) ? + template->name : template->fmt), result); + + return result; +} + +static struct ima_template_desc *restore_template_fmt(char *template_name) +{ + struct ima_template_desc *template_desc = NULL; + int ret; + + ret = template_desc_init_fields(template_name, NULL, NULL); + if (ret < 0) { + pr_err("attempting to initialize the template \"%s\" failed\n", + template_name); + goto out; + } + + template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL); + if (!template_desc) + goto out; + + template_desc->name = ""; + template_desc->fmt = kstrdup(template_name, GFP_KERNEL); + if (!template_desc->fmt) + goto out; + + spin_lock(&template_list); + list_add_tail_rcu(&template_desc->list, &defined_templates); + spin_unlock(&template_list); +out: + return template_desc; +} + +static int ima_restore_template_data(struct ima_template_desc *template_desc, + void *template_data, + int template_data_size, + struct ima_template_entry **entry) +{ + int ret = 0; + int i; + + *entry = kzalloc(sizeof(**entry) + + template_desc->num_fields * sizeof(struct ima_field_data), + GFP_NOFS); + if (!*entry) + return -ENOMEM; + + ret = ima_parse_buf(template_data, template_data + template_data_size, + NULL, template_desc->num_fields, + (*entry)->template_data, NULL, NULL, + ENFORCE_FIELDS | ENFORCE_BUFEND, "template data"); + if (ret < 0) { + kfree(*entry); + return ret; + } + + (*entry)->template_desc = template_desc; + for (i = 0; i < template_desc->num_fields; i++) { + struct ima_field_data *field_data = &(*entry)->template_data[i]; + u8 *data = field_data->data; + + (*entry)->template_data[i].data = + kzalloc(field_data->len + 1, GFP_KERNEL); + if (!(*entry)->template_data[i].data) { + ret = -ENOMEM; + break; + } + memcpy((*entry)->template_data[i].data, data, field_data->len); + (*entry)->template_data_len += sizeof(field_data->len); + (*entry)->template_data_len += field_data->len; + } + + if (ret < 0) { + ima_free_template_entry(*entry); + *entry = NULL; + } + + return ret; +} + +/* Restore the serialized binary measurement list without extending PCRs. */ +int ima_restore_measurement_list(loff_t size, void *buf) +{ + char template_name[MAX_TEMPLATE_NAME_LEN]; + + struct ima_kexec_hdr *khdr = buf; + struct ima_field_data hdr[HDR__LAST] = { + [HDR_PCR] = {.len = sizeof(u32)}, + [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE}, + }; + + void *bufp = buf + sizeof(*khdr); + void *bufendp; + struct ima_template_entry *entry; + struct ima_template_desc *template_desc; + DECLARE_BITMAP(hdr_mask, HDR__LAST); + unsigned long count = 0; + int ret = 0; + + if (!buf || size < sizeof(*khdr)) + return 0; + + if (ima_canonical_fmt) { + khdr->version = le16_to_cpu(khdr->version); + khdr->count = le64_to_cpu(khdr->count); + khdr->buffer_size = le64_to_cpu(khdr->buffer_size); + } + + if (khdr->version != 1) { + pr_err("attempting to restore a incompatible measurement list"); + return -EINVAL; + } + + if (khdr->count > ULONG_MAX - 1) { + pr_err("attempting to restore too many measurements"); + return -EINVAL; + } + + bitmap_zero(hdr_mask, HDR__LAST); + bitmap_set(hdr_mask, HDR_PCR, 1); + bitmap_set(hdr_mask, HDR_DIGEST, 1); + + /* + * ima kexec buffer prefix: version, buffer size, count + * v1 format: pcr, digest, template-name-len, template-name, + * template-data-size, template-data + */ + bufendp = buf + khdr->buffer_size; + while ((bufp < bufendp) && (count++ < khdr->count)) { + int enforce_mask = ENFORCE_FIELDS; + + enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0; + ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL, + hdr_mask, enforce_mask, "entry header"); + if (ret < 0) + break; + + if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) { + pr_err("attempting to restore a template name that is too long\n"); + ret = -EINVAL; + break; + } + + /* template name is not null terminated */ + memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data, + hdr[HDR_TEMPLATE_NAME].len); + template_name[hdr[HDR_TEMPLATE_NAME].len] = 0; + + if (strcmp(template_name, "ima") == 0) { + pr_err("attempting to restore an unsupported template \"%s\" failed\n", + template_name); + ret = -EINVAL; + break; + } + + template_desc = lookup_template_desc(template_name); + if (!template_desc) { + template_desc = restore_template_fmt(template_name); + if (!template_desc) + break; + } + + /* + * Only the running system's template format is initialized + * on boot. As needed, initialize the other template formats. + */ + ret = template_desc_init_fields(template_desc->fmt, + &(template_desc->fields), + &(template_desc->num_fields)); + if (ret < 0) { + pr_err("attempting to restore the template fmt \"%s\" failed\n", + template_desc->fmt); + ret = -EINVAL; + break; + } + + ret = ima_restore_template_data(template_desc, + hdr[HDR_TEMPLATE_DATA].data, + hdr[HDR_TEMPLATE_DATA].len, + &entry); + if (ret < 0) + break; + + memcpy(entry->digest, hdr[HDR_DIGEST].data, + hdr[HDR_DIGEST].len); + entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) : + le32_to_cpu(*(hdr[HDR_PCR].data)); + ret = ima_restore_measurement_entry(entry); + if (ret < 0) + break; + + } + return ret; +} diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c new file mode 100644 index 000000000..48c5a1be8 --- /dev/null +++ b/security/integrity/ima/ima_template_lib.c @@ -0,0 +1,408 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template_lib.c + * Library of supported template fields. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "ima_template_lib.h" + +static bool ima_template_hash_algo_allowed(u8 algo) +{ + if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5) + return true; + + return false; +} + +enum data_formats { + DATA_FMT_DIGEST = 0, + DATA_FMT_DIGEST_WITH_ALGO, + DATA_FMT_STRING, + DATA_FMT_HEX +}; + +static int ima_write_template_field_data(const void *data, const u32 datalen, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u8 *buf, *buf_ptr; + u32 buflen = datalen; + + if (datafmt == DATA_FMT_STRING) + buflen = datalen + 1; + + buf = kzalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, data, datalen); + + /* + * Replace all space characters with underscore for event names and + * strings. This avoid that, during the parsing of a measurements list, + * filenames with spaces or that end with the suffix ' (deleted)' are + * split into multiple template fields (the space is the delimitator + * character for measurements lists in ASCII format). + */ + if (datafmt == DATA_FMT_STRING) { + for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++) + if (*buf_ptr == ' ') + *buf_ptr = '_'; + } + + field_data->data = buf; + field_data->len = buflen; + return 0; +} + +static void ima_show_template_data_ascii(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u8 *buf_ptr = field_data->data; + u32 buflen = field_data->len; + + switch (datafmt) { + case DATA_FMT_DIGEST_WITH_ALGO: + buf_ptr = strnchr(field_data->data, buflen, ':'); + if (buf_ptr != field_data->data) + seq_printf(m, "%s", field_data->data); + + /* skip ':' and '\0' */ + buf_ptr += 2; + buflen -= buf_ptr - field_data->data; + case DATA_FMT_DIGEST: + case DATA_FMT_HEX: + if (!buflen) + break; + ima_print_digest(m, buf_ptr, buflen); + break; + case DATA_FMT_STRING: + seq_printf(m, "%s", buf_ptr); + break; + default: + break; + } +} + +static void ima_show_template_data_binary(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ? + strlen(field_data->data) : field_data->len; + + if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) { + u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len); + + ima_putc(m, &field_len, sizeof(field_len)); + } + + if (!len) + return; + + ima_putc(m, field_data->data, len); +} + +static void ima_show_template_field_data(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + switch (show) { + case IMA_SHOW_ASCII: + ima_show_template_data_ascii(m, show, datafmt, field_data); + break; + case IMA_SHOW_BINARY: + case IMA_SHOW_BINARY_NO_FIELD_LEN: + case IMA_SHOW_BINARY_OLD_STRING_FMT: + ima_show_template_data_binary(m, show, datafmt, field_data); + break; + default: + break; + } +} + +void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data); +} + +void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO, + field_data); +} + +void ima_show_template_string(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data); +} + +void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data); +} + +/** + * ima_parse_buf() - Parses lengths and data from an input buffer + * @bufstartp: Buffer start address. + * @bufendp: Buffer end address. + * @bufcurp: Pointer to remaining (non-parsed) data. + * @maxfields: Length of fields array. + * @fields: Array containing lengths and pointers of parsed data. + * @curfields: Number of array items containing parsed data. + * @len_mask: Bitmap (if bit is set, data length should not be parsed). + * @enforce_mask: Check if curfields == maxfields and/or bufcurp == bufendp. + * @bufname: String identifier of the input buffer. + * + * Return: 0 on success, -EINVAL on error. + */ +int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp, + int maxfields, struct ima_field_data *fields, int *curfields, + unsigned long *len_mask, int enforce_mask, char *bufname) +{ + void *bufp = bufstartp; + int i; + + for (i = 0; i < maxfields; i++) { + if (len_mask == NULL || !test_bit(i, len_mask)) { + if (bufp > (bufendp - sizeof(u32))) + break; + + fields[i].len = *(u32 *)bufp; + if (ima_canonical_fmt) + fields[i].len = le32_to_cpu(fields[i].len); + + bufp += sizeof(u32); + } + + if (bufp > (bufendp - fields[i].len)) + break; + + fields[i].data = bufp; + bufp += fields[i].len; + } + + if ((enforce_mask & ENFORCE_FIELDS) && i != maxfields) { + pr_err("%s: nr of fields mismatch: expected: %d, current: %d\n", + bufname, maxfields, i); + return -EINVAL; + } + + if ((enforce_mask & ENFORCE_BUFEND) && bufp != bufendp) { + pr_err("%s: buf end mismatch: expected: %p, current: %p\n", + bufname, bufendp, bufp); + return -EINVAL; + } + + if (curfields) + *curfields = i; + + if (bufcurp) + *bufcurp = bufp; + + return 0; +} + +static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo, + struct ima_field_data *field_data) +{ + /* + * digest formats: + * - DATA_FMT_DIGEST: digest + * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest, + * where <hash algo> is provided if the hash algoritm is not + * SHA1 or MD5 + */ + u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 }; + enum data_formats fmt = DATA_FMT_DIGEST; + u32 offset = 0; + + if (hash_algo < HASH_ALGO__LAST) { + fmt = DATA_FMT_DIGEST_WITH_ALGO; + offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s", + hash_algo_name[hash_algo]); + buffer[offset] = ':'; + offset += 2; + } + + if (digest) + memcpy(buffer + offset, digest, digestsize); + else + /* + * If digest is NULL, the event being recorded is a violation. + * Make room for the digest by increasing the offset of + * IMA_DIGEST_SIZE. + */ + offset += IMA_DIGEST_SIZE; + + return ima_write_template_field_data(buffer, offset + digestsize, + fmt, field_data); +} + +/* + * This function writes the digest of an event (with size limit). + */ +int ima_eventdigest_init(struct ima_event_data *event_data, + struct ima_field_data *field_data) +{ + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + u8 *cur_digest = NULL; + u32 cur_digestsize = 0; + struct inode *inode; + int result; + + memset(&hash, 0, sizeof(hash)); + + if (event_data->violation) /* recording a violation. */ + goto out; + + if (ima_template_hash_algo_allowed(event_data->iint->ima_hash->algo)) { + cur_digest = event_data->iint->ima_hash->digest; + cur_digestsize = event_data->iint->ima_hash->length; + goto out; + } + + if ((const char *)event_data->filename == boot_aggregate_name) { + if (ima_tpm_chip) { + hash.hdr.algo = HASH_ALGO_SHA1; + result = ima_calc_boot_aggregate(&hash.hdr); + + /* algo can change depending on available PCR banks */ + if (!result && hash.hdr.algo != HASH_ALGO_SHA1) + result = -EINVAL; + + if (result < 0) + memset(&hash, 0, sizeof(hash)); + } + + cur_digest = hash.hdr.digest; + cur_digestsize = hash_digest_size[HASH_ALGO_SHA1]; + goto out; + } + + if (!event_data->file) /* missing info to re-calculate the digest */ + return -EINVAL; + + inode = file_inode(event_data->file); + hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ? + ima_hash_algo : HASH_ALGO_SHA1; + result = ima_calc_file_hash(event_data->file, &hash.hdr); + if (result) { + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, + event_data->filename, "collect_data", + "failed", result, 0); + return result; + } + cur_digest = hash.hdr.digest; + cur_digestsize = hash.hdr.length; +out: + return ima_eventdigest_init_common(cur_digest, cur_digestsize, + HASH_ALGO__LAST, field_data); +} + +/* + * This function writes the digest of an event (without size limit). + */ +int ima_eventdigest_ng_init(struct ima_event_data *event_data, + struct ima_field_data *field_data) +{ + u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1; + u32 cur_digestsize = 0; + + if (event_data->violation) /* recording a violation. */ + goto out; + + cur_digest = event_data->iint->ima_hash->digest; + cur_digestsize = event_data->iint->ima_hash->length; + + hash_algo = event_data->iint->ima_hash->algo; +out: + return ima_eventdigest_init_common(cur_digest, cur_digestsize, + hash_algo, field_data); +} + +static int ima_eventname_init_common(struct ima_event_data *event_data, + struct ima_field_data *field_data, + bool size_limit) +{ + const char *cur_filename = NULL; + u32 cur_filename_len = 0; + + BUG_ON(event_data->filename == NULL && event_data->file == NULL); + + if (event_data->filename) { + cur_filename = event_data->filename; + cur_filename_len = strlen(event_data->filename); + + if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX) + goto out; + } + + if (event_data->file) { + cur_filename = event_data->file->f_path.dentry->d_name.name; + cur_filename_len = strlen(cur_filename); + } else + /* + * Truncate filename if the latter is too long and + * the file descriptor is not available. + */ + cur_filename_len = IMA_EVENT_NAME_LEN_MAX; +out: + return ima_write_template_field_data(cur_filename, cur_filename_len, + DATA_FMT_STRING, field_data); +} + +/* + * This function writes the name of an event (with size limit). + */ +int ima_eventname_init(struct ima_event_data *event_data, + struct ima_field_data *field_data) +{ + return ima_eventname_init_common(event_data, field_data, true); +} + +/* + * This function writes the name of an event (without size limit). + */ +int ima_eventname_ng_init(struct ima_event_data *event_data, + struct ima_field_data *field_data) +{ + return ima_eventname_init_common(event_data, field_data, false); +} + +/* + * ima_eventsig_init - include the file signature as part of the template data + */ +int ima_eventsig_init(struct ima_event_data *event_data, + struct ima_field_data *field_data) +{ + struct evm_ima_xattr_data *xattr_value = event_data->xattr_value; + + if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG)) + return 0; + + return ima_write_template_field_data(xattr_value, event_data->xattr_len, + DATA_FMT_HEX, field_data); +} diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h new file mode 100644 index 000000000..6a3d8b831 --- /dev/null +++ b/security/integrity/ima/ima_template_lib.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template_lib.h + * Header for the library of supported template fields. + */ +#ifndef __LINUX_IMA_TEMPLATE_LIB_H +#define __LINUX_IMA_TEMPLATE_LIB_H + +#include <linux/seq_file.h> +#include "ima.h" + +#define ENFORCE_FIELDS 0x00000001 +#define ENFORCE_BUFEND 0x00000002 + +void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_string(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +int ima_parse_buf(void *bufstartp, void *bufendp, void **bufcurp, + int maxfields, struct ima_field_data *fields, int *curfields, + unsigned long *len_mask, int enforce_mask, char *bufname); +int ima_eventdigest_init(struct ima_event_data *event_data, + struct ima_field_data *field_data); +int ima_eventname_init(struct ima_event_data *event_data, + struct ima_field_data *field_data); +int ima_eventdigest_ng_init(struct ima_event_data *event_data, + struct ima_field_data *field_data); +int ima_eventname_ng_init(struct ima_event_data *event_data, + struct ima_field_data *field_data); +int ima_eventsig_init(struct ima_event_data *event_data, + struct ima_field_data *field_data); +#endif /* __LINUX_IMA_TEMPLATE_LIB_H */ |